summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/baseline')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h28
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h45
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h188
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h8
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc73
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h2817
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h12
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h25
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h377
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h384
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h341
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h246
13 files changed, 3766 insertions, 794 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 6e2bacc043..211cf82398 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -4262,14 +4262,34 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ if (kind == kF32) {
+ FloatRegister src_f = liftoff::GetFloatRegister(src);
+ VFPCompareAndSetFlags(src_f, src_f);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ VFPCompareAndSetFlags(src, src);
+ }
+
+ // Store a non-zero value if src is NaN.
+ str(dst, MemOperand(dst), ne); // x != x iff isnan(x)
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ QwNeonRegister src_q = liftoff::GetSimd128Register(src);
+ QwNeonRegister tmp_q = liftoff::GetSimd128Register(tmp_s128);
+ if (lane_kind == kF32) {
+ vpadd(tmp_q.low(), src_q.low(), src_q.high());
+ LowDwVfpRegister tmp_d =
+ LowDwVfpRegister::from_code(tmp_s128.low_fp().code());
+ vadd(tmp_d.low(), tmp_d.low(), tmp_d.high());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vadd(tmp_q.low(), src_q.low(), src_q.high());
+ }
+ emit_set_if_nan(dst, tmp_q.low(), lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a52370f293..1d29ce72bc 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -1173,12 +1173,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.S(), src.W());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.W(), scratch.S());
+ PopcntHelper(dst.W(), src.W());
return true;
}
@@ -1193,12 +1188,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.D(), src.gp().X());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.gp().X(), scratch.D());
+ PopcntHelper(dst.gp().X(), src.gp().X());
return true;
}
@@ -1717,13 +1707,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
UseScratchRegisterScope temps(this);
MemOperand src_op{
liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (dst != src) {
Mov(dst.fp().Q(), src.fp().Q());
}
+ *protected_load_pc = pc_offset();
if (mem_type == MachineType::Int8()) {
ld1(dst.fp().B(), laneidx, src_op);
} else if (mem_type == MachineType::Int16()) {
@@ -3259,14 +3249,35 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (kind == kF32) {
+ Fcmp(src.S(), src.S());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // If it's a NaN, it must be non-zero, so store that as the set value.
+ Str(src.S(), MemOperand(dst));
+ } else {
+ DCHECK_EQ(kind, kF64);
+ Fcmp(src.D(), src.D());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // Double-precision NaNs must be non-zero in the most-significant 32
+ // bits, so store that.
+ St1(src.V4S(), 1, MemOperand(dst));
+ }
+ Bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ DoubleRegister tmp_fp = tmp_s128.fp();
+ if (lane_kind == kF32) {
+ Fmaxv(tmp_fp.S(), src.fp().V4S());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ Fmaxp(tmp_fp.D(), src.fp().V2D());
+ }
+ emit_set_if_nan(dst, tmp_fp, lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index bb2fed83c6..5f92d50f6f 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2718,40 +2718,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-enum class ShiftSignedness { kSigned, kUnsigned };
-
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm is used for both signed and unsigned shifts, the only
- // difference is the actual shift and pack in the end. This is the same
- // algorithm as used in code-generator-ia32.cc
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
- XMMRegister tmp_simd =
- assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
-
- // Unpack the bytes into words, do logical shifts, and repack.
- assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- assm->mov(tmp, rhs.gp());
- // Take shift value modulo 8.
- assm->and_(tmp, 7);
- assm->add(tmp, Immediate(8));
- assm->Movd(tmp_simd, tmp);
- if (is_signed) {
- assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
Register tmp =
@@ -2809,23 +2775,19 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
- movss(dst.fp(), src_op);
+ Movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
- movsd(dst.fp(), src_op);
+ Movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- Vbroadcastss(dst.fp(), src_op);
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2875,12 +2837,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
S128Store32Lane(dst_op, src.fp(), lane);
} else {
DCHECK_EQ(MachineRepresentation::kWord64, rep);
- if (lane == 0) {
- Movlps(dst_op, src.fp());
- } else {
- DCHECK_EQ(1, lane);
- Movhps(dst_op, src.fp());
- }
+ S128Store64Lane(dst_op, src.fp(), lane);
}
}
@@ -2951,16 +2908,12 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Pshufd(dst.fp(), dst.fp(), uint8_t{0});
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -3366,89 +3319,48 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- mov(tmp.gp(), rhs.gp());
- and_(tmp.gp(), Immediate(7));
- add(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp_simd.fp());
- Packuswb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- sub(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psllw(dst.fp(), dst.fp(), tmp_simd.fp());
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), tmp.gp(), liftoff::kScratchDoubleReg,
+ tmp_simd.fp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp.gp(), mask);
- Movd(liftoff::kScratchDoubleReg, tmp.gp());
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, tmp.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(liftoff::kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
Register tmp = GetUnusedRegister(kGpReg, {}).gp();
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7;
- liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 3>(
- this, dst, lhs, rhs);
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp, mask);
- Movd(liftoff::kScratchDoubleReg, tmp);
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, tmp, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4300,26 +4212,8 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
- } else {
- movaps(liftoff::kScratchDoubleReg, src.fp());
- cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(liftoff::kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(liftoff::kScratchDoubleReg, dst.fp());
- Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4SConvertF32x4(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4787,22 +4681,14 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4836,19 +4722,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
or_(Operand(dst, 0), tmp_gp);
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index d445655dca..5b43a2a41d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -46,6 +46,18 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
+#elif V8_TARGET_ARCH_LOONG64
+
+// t6-t8 and s3-s4: scratch registers, s6: root
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, s0,
+ s1, s2, s5, s7, s8);
+
+// f29: zero, f30-f31: macro-assembler scratch float Registers.
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
+ f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
+
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
@@ -95,8 +107,8 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedFpRegs in frame-constants-riscv64.h
constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
- fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
+ DoubleRegister::ListOf(ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
+ fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 19611fb0ee..c94c7ece9e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -1456,12 +1456,12 @@ class LiftoffAssembler : public TurboAssembler {
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
- // Set the i32 at address dst to 1 if src is a NaN.
+ // Set the i32 at address dst to a non-zero value if src is a NaN.
inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
// Set the i32 at address dst to a non-zero value if src contains a NaN.
- inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
- Register tmp_gp, DoubleRegister tmp_fp,
+ inline void emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp, LiftoffRegister tmp_s128,
ValueKind lane_kind);
////////////////////////////////////
@@ -1711,6 +1711,8 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index eeed531cf8..65226ab408 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -306,7 +306,7 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return;
#endif
@@ -2808,30 +2808,6 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
- Register AddMemoryMasking(Register index, uintptr_t* offset,
- LiftoffRegList* pinned) {
- if (!FLAG_untrusted_code_mitigations ||
- env_->bounds_checks == kTrapHandler) {
- return index;
- }
- CODE_COMMENT("mask memory index");
- // Make sure that we can overwrite {index}.
- if (__ cache_state()->is_used(LiftoffRegister(index))) {
- Register old_index = index;
- pinned->clear(LiftoffRegister{old_index});
- index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
- if (index != old_index) {
- __ Move(index, old_index, kPointerKind);
- }
- }
- Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
- if (*offset) __ emit_ptrsize_addi(index, index, *offset);
- __ emit_ptrsize_and(index, index, tmp);
- *offset = 0;
- return index;
- }
-
bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
int access_size, uintptr_t* offset) {
if (!index_slot.is_const()) return false;
@@ -2892,7 +2868,6 @@ class LiftoffCompiler {
CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -2937,7 +2912,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -2977,7 +2951,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load lane");
Register addr = GetMemoryStart(pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -3023,7 +2996,6 @@ class LiftoffCompiler {
if (index == no_reg) return;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
@@ -3058,7 +3030,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store lane to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
@@ -4340,7 +4311,6 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
@@ -4363,7 +4333,6 @@ class LiftoffCompiler {
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
@@ -4411,7 +4380,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
(asm_.*emit_fn)(addr, index, offset, value, result, type);
@@ -4434,7 +4402,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
@@ -4467,7 +4434,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
@@ -4514,7 +4480,6 @@ class LiftoffCompiler {
pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4531,8 +4496,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
- // We have to set the correct register for the index. It may have changed
- // above in {AddMemoryMasking}.
+ // We have to set the correct register for the index.
index.MakeRegister(LiftoffRegister(index_plus_offset));
static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
@@ -4562,7 +4526,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -5055,7 +5018,7 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
- static_cast<int>(wasm::kV8MaxWasmArrayLength));
+ WasmArray::MaxLength(imm.array_type));
}
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size = element_size_bytes(elem_kind);
@@ -5184,6 +5147,8 @@ class LiftoffCompiler {
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
+ // TODO(7748): Unify implementation with TF: Implement this with
+ // GenerateCCall. Remove runtime function and builtin in wasm.tq.
CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
// Builtin parameter order:
@@ -5778,28 +5743,6 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
- // Mask the index to prevent SSCA.
- if (FLAG_untrusted_code_mitigations) {
- CODE_COMMENT("Mask indirect call index");
- // mask = ((index - size) & ~index) >> 31
- // Reuse allocated registers; note: size is still stored in {tmp_const}.
- Register diff = table;
- Register neg_index = tmp_const;
- Register mask = scratch;
- // 1) diff = index - size
- __ emit_i32_sub(diff, index, tmp_const);
- // 2) neg_index = ~index
- __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
- __ emit_i32_xor(neg_index, neg_index, index);
- // 3) mask = diff & neg_index
- __ emit_i32_and(mask, diff, neg_index);
- // 4) mask = mask >> 31
- __ emit_i32_sari(mask, mask, 31);
-
- // Apply mask.
- __ emit_i32_and(index, index, mask);
- }
-
CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
if (imm.table_imm.index == 0) {
@@ -6151,14 +6094,14 @@ class LiftoffCompiler {
ValueKind lane_kind) {
RegClass rc = reg_class_for(kS128);
LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
+ LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned));
LiftoffRegister nondeterminism_addr =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
nondeterminism_addr,
WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
- __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
- tmp_fp.fp(), lane_kind);
+ __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(),
+ tmp_s128, lane_kind);
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
new file mode 100644
index 0000000000..f22e013601
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -0,0 +1,2817 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+template <typename T>
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, T offset_imm) {
+ if (is_int32(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->add_d(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, Operand(offset_imm));
+ assm->add_d(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->add_d(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->Ld_w(dst.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->Ld_d(dst.gp(), src);
+ break;
+ case kF32:
+ assm->Fld_s(dst.fp(), src);
+ break;
+ case kF64:
+ assm->Fld_d(dst.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueKind kind) {
+ MemOperand dst(base, offset);
+ switch (kind) {
+ case kI32:
+ assm->St_w(src.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->St_d(src.gp(), dst);
+ break;
+ case kF32:
+ assm->Fst_s(src.fp(), dst);
+ break;
+ case kF64:
+ assm->Fst_d(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->St_w(reg.gp(), MemOperand(sp, 0));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ assm->Push(reg.gp());
+ break;
+ case kF32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_s(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kF64:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_d(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ addi_d(sp, sp, 0);
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld_d(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld_d(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld_d(scratch, MemOperand(sp, i * 8));
+ St_d(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ addi_d(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Add_d(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Add_d(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset;
+ CHECK(is_int26(imm32));
+ patching_assembler.b(imm32 >> 2);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld_d(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld_d(stack_limit, MemOperand(stack_limit, 0));
+ Add_d(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Add_d(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Add_d(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 3 * kInstrSize;
+ imm32 = func_start_offset - pc_offset();
+ CHECK(is_int26(imm32));
+ b(imm32 >> 2);
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference(kind);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Ld_b(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Ld_w(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld_d(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int32_t offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld_d(dst, MemOperand(instance, offset));
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ St_d(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::ResetOSRTarget() {}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ UseScratchRegisterScope temps(this);
+ Operand offset_op =
+ offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.Acquire();
+ Add_d(effective_offset, offset_reg, Operand(offset_imm));
+ offset_op = Operand(effective_offset);
+ }
+ if (offset_op.is_reg()) {
+ St_d(src.gp(), MemOperand(dst_addr, offset_op.rm()));
+ } else {
+ St_d(src.gp(), MemOperand(dst_addr, offset_imm));
+ }
+
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem,
+ bool i64_offset) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ld_bu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Ld_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ld_hu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ld_h(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ld_wu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ld_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Ld_d(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Fld_s(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Fld_d(dst.fp(), src_op);
+ break;
+ case LoadType::kS128Load:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ St_b(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::St_h(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::St_w(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::St_d(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Fst_s(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Fst_d(src.fp(), dst_op);
+ break;
+ case StoreType::kS128Store:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { dbar(0); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, kind);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, kind);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ St_w(reg.gp(), dst);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ St_d(reg.gp(), dst);
+ break;
+ case kF32:
+ Fst_s(reg.fp(), dst);
+ break;
+ case kF64:
+ TurboAssembler::Fst_d(reg.fp(), dst);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ St_w(tmp.gp(), dst);
+ break;
+ }
+ case kI64:
+ case kRef:
+ case kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ St_d(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ Ld_w(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ // TODO(LOONG_dev): LOONG64 Check, MIPS64 dosn't need, ARM64/LOONG64 need?
+ case kRtt:
+ case kRttWithDepth:
+ Ld_d(reg.gp(), src);
+ break;
+ case kF32:
+ Fld_s(reg.fp(), src);
+ break;
+ case kF64:
+ TurboAssembler::Fld_d(reg.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ St_d(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ St_w(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add_d(a0, fp, Operand(-start - size));
+ Add_d(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ St_d(zero_reg, MemOperand(a0, 0));
+ addi_d(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz_d(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz_d(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt_d(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Div_wu(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_wu(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, add_w)
+I32_BINOP(sub, sub_w)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add_w)
+I32_BINOP_I(sub, Sub_w)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz_w(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz_w(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt_w(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction, instruction1) \
+ I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction1(dst, src, amount & 0x1f); \
+ }
+
+I32_SHIFTOP_I(shl, sll_w, slli_w)
+I32_SHIFTOP_I(sar, sra_w, srai_w)
+I32_SHIFTOP_I(shr, srl_w, srli_w)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
+}
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(
+ kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, Add_d)
+I64_BINOP(sub, Sub_d)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction, instructioni) \
+ I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ instructioni(dst.gp(), src.gp(), amount & 63); \
+ }
+
+I64_SHIFTOP_I(shl, sll_d, slli_d)
+I64_SHIFTOP_I(sar, sra_d, srai_d)
+I64_SHIFTOP_I(shr, srl_d, srli_d)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ bstrpick_d(dst, src, 31, 0);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI32SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_w_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_w(kScratchDoubleReg, dst.gp());
+ ffint_s_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ fcvt_s_d(converted_back.fp(), converted_back.fp());
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_w_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ ffint_d_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ slli_w(dst.gp(), src.gp(), 0);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI64SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_l_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_s_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF32: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_l_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_d_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF64: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64ReinterpretF64:
+ movfr2gr_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ movgr2fr_d(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i32");
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueKind kind,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(kind == kI32 || kind == kI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference(kind) &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltui(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ sltui(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs.gp() || dst == rhs.gp()) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+namespace liftoff {
+
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+ bool* predicate) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kUnequal:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF32(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f32.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF32(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF64(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f64.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF64(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueKind kind) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ bailout(kSimd, "emit_" #name1 "_extadd_pairwise_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "emit_i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_alltrue");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (FLAG_debug_code) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kSystemPointerSize;
+ addi_d(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ St_d(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ unsigned slot_size = 8;
+ addi_d(sp, sp, -(num_fp_regs * slot_size));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += slot_size;
+ }
+ DCHECK_EQ(offset, num_fp_regs * slot_size);
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += 8;
+ }
+ if (fp_offset) addi_d(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld_d(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ addi_d(sp, sp, gp_offset);
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ Drop(static_cast<int>(num_stack_slots));
+ Ret();
+}
+
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueKind out_argument_kind, int stack_bytes,
+ ExternalReference ext_ref) {
+ addi_d(sp, sp, -stack_bytes);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On LoongArch, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mov(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
+ }
+
+ addi_d(sp, sp, stack_bytes);
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ addi_d(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addi_d(sp, sp, size);
+}
+
+void LiftoffAssembler::MaybeOSR() {}
+
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label not_nan;
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ BranchFalseShortF(&not_nan);
+ li(scratch, 1);
+ St_w(scratch, MemOperand(dst, 0));
+ bind(&not_nan);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp,
+ LiftoffRegister tmp_s128,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->Push(kScratchReg);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ }
+ break;
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ liftoff::push(asm_, src.reg(), src.kind());
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->Push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4ab036da8e..35eabecbf0 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -3067,20 +3067,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sw(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ sw(scratch, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 0a23c190e9..e47da84148 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -3235,22 +3235,35 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sd(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ Sw(dst, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (lane_kind == kF32) {
+ fcun_w(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ fcun_d(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ }
+ BranchMSA(&not_nan, MSA_BRANCH_V, all_zero, tmp_s128.fp().toW(),
+ USE_DELAY_SLOT);
+ li(tmp_gp, 1);
+ Sw(tmp_gp, MemOperand(dst));
+ bind(&not_nan);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 8e3808d259..617e193bd1 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -41,7 +41,7 @@ namespace liftoff {
//
//
-constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kInstanceOffset = 3 * kSystemPointerSize;
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
@@ -106,7 +106,26 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
- bailout(kUnsupportedArchitecture, "PrepareTailCall");
+ Register scratch = ip;
+ // Push the return address and frame pointer to complete the stack frame.
+ AddS64(sp, sp, Operand(-2 * kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp, kSystemPointerSize), r0);
+ StoreU64(scratch, MemOperand(sp, kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp), r0);
+ StoreU64(scratch, MemOperand(sp), r0);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize), r0);
+ StoreU64(scratch,
+ MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize), r0);
+ }
+
+ // Set the new stack and frame pointer.
+ AddS64(sp, fp, Operand(-stack_param_delta * kSystemPointerSize), r0);
+ Pop(r0, fp);
+ mtlr(r0);
}
void LiftoffAssembler::AlignFrameSize() {}
@@ -169,14 +188,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kF32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
- MovIntToFloat(reg.fp(), scratch);
+ mov(scratch, Operand(value.to_f32_boxed().get_bits()));
+ MovIntToFloat(reg.fp(), scratch, ip);
break;
}
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f64_boxed().get_scalar()));
+ mov(scratch, Operand(value.to_f64_boxed().get_bits()));
MovInt64ToDouble(reg.fp(), scratch);
break;
}
@@ -750,12 +769,19 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
#define SIGN_EXT(r) extsw(r, r)
#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
#define REGISTER_AND_WITH_1F \
([&](Register rhs) { \
andi(r0, rhs, Operand(31)); \
return r0; \
})
+#define REGISTER_AND_WITH_3F \
+ ([&](Register rhs) { \
+ andi(r0, rhs, Operand(63)); \
+ return r0; \
+ })
+
#define LFR_TO_REG(reg) reg.gp()
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
@@ -772,16 +798,12 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
true, bool) \
V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
true, bool) \
- V(f32_nearest_int, frin, DoubleRegister, DoubleRegister, , , \
- ROUND_F64_TO_F32, true, bool) \
V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
- V(f64_nearest_int, frin, DoubleRegister, DoubleRegister, , , USE, true, \
- bool) \
V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
@@ -873,17 +895,17 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
REGISTER_AND_WITH_1F, USE, , void) \
V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
@@ -921,53 +943,139 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont, done, trap_div_unrepresentable;
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
@@ -1083,10 +1191,10 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- fcmpu(lhs, rhs);
+ fcmpu(lhs, rhs, cr7);
Label done;
mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ b(liftoff::ToCondition(liftoff_cond), &done, cr7);
mov(dst, Operand::Zero());
bind(&done);
}
@@ -1114,7 +1222,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj, r0);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target, cr0); // branch if SMI
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
@@ -2266,18 +2376,31 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PushRegisters");
+ MultiPush(regs.GetGpList());
+ MultiPushDoubles(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PopRegisters");
+ MultiPopDoubles(regs.GetFpList());
+ MultiPop(regs.GetGpList());
}
void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
LiftoffRegList all_spills,
LiftoffRegList ref_spills,
int spill_offset) {
- bailout(kRefTypes, "RecordSpillsInSafepoint");
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
@@ -2289,15 +2412,95 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* rets,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
- bailout(kUnsupportedArchitecture, "CallC");
+ int total_size = RoundUp(stack_bytes, kSystemPointerSize);
+
+ int size = total_size;
+ constexpr int kStackPageSize = 4 * KB;
+
+ // Reserve space in the stack.
+ while (size > kStackPageSize) {
+ SubS64(sp, sp, Operand(kStackPageSize), r0);
+ StoreU64(r0, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+
+ SubS64(sp, sp, Operand(size), r0);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
+ StoreU32(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kI64:
+ StoreU64(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF32:
+ StoreF32(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF64:
+ StoreF64(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += element_size_bytes(param_kind);
+ }
+
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mr(r3, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, r0);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r3;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ switch (out_argument_kind) {
+ case kI32:
+ LoadS32(result_reg->gp(), MemOperand(sp));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(result_reg->gp(), MemOperand(sp));
+ break;
+ case kF32:
+ LoadF32(result_reg->fp(), MemOperand(sp));
+ break;
+ case kF64:
+ LoadF64(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ AddS64(sp, sp, Operand(total_size), r0);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
+ Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
@@ -2315,11 +2518,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- bailout(kUnsupportedArchitecture, "AllocateStackSlot");
+ SubS64(sp, sp, Operand(size), r0);
+ mr(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
+ AddS64(sp, sp, Operand(size));
}
void LiftoffAssembler::MaybeOSR() {}
@@ -2329,15 +2533,114 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
- asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.kind()) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI64: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->Push(scratch);
+ break;
+ }
+ case kF32: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->LoadF32(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize));
+ asm_->StoreF32(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
+ asm_->LoadF64(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ switch (src.kind()) {
+ case kI64:
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ asm_->push(src.reg().gp());
+ break;
+ case kF32:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF32(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kF64:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+
+ switch (src.kind()) {
+ case kI32:
+ asm_->mov(scratch, Operand(src.i32_const()));
+ break;
+ case kI64:
+ asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index fef59471c1..1860a1920f 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -79,16 +79,16 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
if (offset == no_reg) return MemOperand(addr, offset_imm32);
- assm->Add64(kScratchReg, addr, offset);
- return MemOperand(kScratchReg, offset_imm32);
+ assm->Add64(kScratchReg2, addr, offset);
+ return MemOperand(kScratchReg2, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
- assm->li(kScratchReg, offset_imm);
- assm->Add64(kScratchReg, kScratchReg, addr);
+ assm->li(kScratchReg2, offset_imm);
+ assm->Add64(kScratchReg2, kScratchReg2, addr);
if (offset != no_reg) {
- assm->Add64(kScratchReg, kScratchReg, offset);
+ assm->Add64(kScratchReg2, kScratchReg2, offset);
}
- return MemOperand(kScratchReg, 0);
+ return MemOperand(kScratchReg2, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
@@ -128,10 +128,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usd(src.gp(), dst);
break;
case kF32:
- assm->UStoreFloat(src.fp(), dst);
+ assm->UStoreFloat(src.fp(), dst, kScratchReg);
break;
case kF64:
- assm->UStoreDouble(src.fp(), dst);
+ assm->UStoreDouble(src.fp(), dst, kScratchReg);
break;
default:
UNREACHABLE();
@@ -335,7 +335,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// space if we first allocate the frame and then do the stack check (we will
// need some remaining stack space for throwing the exception). That's why we
// check the available stack space before we allocate the frame. To do this we
- // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // replace the {__ Add64(sp, sp, -frame_size)} with a jump to OOL code that
// does this "extended stack check".
//
// The OOL code can simply be generated here with the normal assembler,
@@ -376,7 +376,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
Add64(sp, sp, Operand(-frame_size));
// Jump back to the start of the function, from {pc_offset()} to
- // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // right after the reserved space for the {__ Add64(sp, sp, -framesize)}
// (which is a Branch now).
int func_start_offset = offset + 2 * kInstrSize;
imm32 = func_start_offset - pc_offset();
@@ -552,11 +552,20 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
TurboAssembler::Uld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ TurboAssembler::ULoadFloat(dst.fp(), src_op, kScratchReg);
break;
case LoadType::kF64Load:
- TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ TurboAssembler::ULoadDouble(dst.fp(), src_op, kScratchReg);
break;
+ case LoadType::kS128Load: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
+ if (src_op.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src_op.rm(), src_op.offset());
+ }
+ vl(dst.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -607,11 +616,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ TurboAssembler::UStoreFloat(src.fp(), dst_op, kScratchReg);
break;
case StoreType::kF64Store:
- TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ TurboAssembler::UStoreDouble(src.fp(), dst_op, kScratchReg);
break;
+ case StoreType::kS128Store: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
+ if (dst_op.offset() != 0) {
+ Add64(kScratchReg, dst_op.rm(), dst_op.offset());
+ }
+ vs(src.fp().toV(), dst_reg, 0, VSew::E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -747,24 +765,26 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U:
+ fence(PSR | PSW, PSR | PSW);
lbu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
+ fence(PSR | PSW, PSR | PSW);
lhu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load:
- lr_w(true, true, dst.gp(), src_reg);
- return;
case LoadType::kI64Load32U:
- lr_w(true, true, dst.gp(), src_reg);
- slli(dst.gp(), dst.gp(), 32);
- srli(dst.gp(), dst.gp(), 32);
+ fence(PSR | PSW, PSR | PSW);
+ lw(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI64Load:
- lr_d(true, true, dst.gp(), src_reg);
+ fence(PSR | PSW, PSR | PSW);
+ ld(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
default:
UNREACHABLE();
@@ -780,22 +800,22 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- sync();
+ fence(PSR | PSW, PSW);
sb(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- sync();
+ fence(PSR | PSW, PSW);
sh(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- sc_w(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sw(src.gp(), dst_reg, 0);
return;
case StoreType::kI64Store:
- sc_d(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sd(src.gp(), dst_reg, 0);
return;
default:
UNREACHABLE();
@@ -948,7 +968,11 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
- TurboAssembler::Move(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
@@ -971,9 +995,15 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst);
break;
- case kS128:
- bailout(kSimd, "Spill S128");
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
+ if (dst.offset() != 0) {
+ Add64(kScratchReg, dst.rm(), dst.offset());
+ }
+ vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
break;
+ }
default:
UNREACHABLE();
}
@@ -1021,6 +1051,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kF64:
TurboAssembler::LoadDouble(reg.fp(), src);
break;
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ }
+ vl(reg.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1072,7 +1111,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true;
}
@@ -1154,7 +1193,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src);
+ TurboAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -1663,7 +1702,33 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "emit_i8x16_shuffle");
+ VRegister dst_v = dst.fp().toV();
+ VRegister lhs_v = lhs.fp().toV();
+ VRegister rhs_v = rhs.fp().toV();
+
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(shuffle));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(shuffle)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ VU.set(kScratchReg, E8, m1);
+ if (dst_v == lhs_v) {
+ vmv_vv(kSimd128ScratchReg2, lhs_v);
+ lhs_v = kSimd128ScratchReg2;
+ } else if (dst_v == rhs_v) {
+ vmv_vv(kSimd128ScratchReg2, rhs_v);
+ rhs_v = kSimd128ScratchReg2;
+ }
+ vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg);
+ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ vrgather_vv(kSimd128ScratchReg, rhs_v, kSimd128ScratchReg);
+ vor_vv(dst_v, dst_v, kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -1679,42 +1744,46 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_splat");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_splat");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -1756,7 +1825,11 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
@@ -1781,92 +1854,92 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1941,32 +2014,38 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kSimd, "emit_s128_const");
+ WasmRvvS128const(dst.fp().toV(), imms);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kSimd, "emit_s128_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_or");
+ VU.set(kScratchReg, E8, m1);
+ vor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_xor");
+ VU.set(kScratchReg, E8, m1);
+ vxor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), rhs.fp().toV());
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), dst.fp().toV());
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
@@ -1978,32 +2057,55 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_neg");
+ VU.set(kScratchReg, E8, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v128_anytrue");
+ VU.set(kScratchReg, E8, m1);
+ Label t;
+ vmv_sx(kSimd128ScratchReg, zero_reg);
+ vredmaxu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beq(dst.gp(), zero_reg, &t);
+ li(dst.gp(), 1);
+ bind(&t);
}
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_alltrue");
+ VU.set(kScratchReg, E8, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_bitmask");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shl");
+ VU.set(kScratchReg, E8, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E8, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
@@ -2030,36 +2132,42 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add");
+ VU.set(kScratchReg, E8, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
@@ -2093,22 +2201,37 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_alltrue");
+ VU.set(kScratchReg, E16, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_bitmask");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shl");
+ VU.set(kScratchReg, E16, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E16, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
@@ -2135,7 +2258,8 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add");
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
@@ -2152,7 +2276,8 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub");
+ VU.set(kScratchReg, E16, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
@@ -2203,22 +2328,36 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_alltrue");
+ VU.set(kScratchReg, E32, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_bitmask");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shl");
+ VU.set(kScratchReg, E32, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E32, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
@@ -2245,12 +2384,14 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2295,17 +2436,32 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_alltrue");
+ VU.set(kScratchReg, E64, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shl");
+ VU.set(kScratchReg, E64, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shli");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
@@ -2332,12 +2488,14 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2637,7 +2795,11 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vsub_vv(dst.fp().toV(), kSimd128RegZero, src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
@@ -2667,7 +2829,9 @@ void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_extract_lane");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(v31, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), v31);
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
@@ -2692,28 +2856,40 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_replace_lane");
+ VU.set(kScratchReg, E8, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_replace_lane");
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_replace_lane");
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i64x2_replace_lane");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
@@ -2730,9 +2906,9 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_replace_lane");
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
bailout(kSimd, "emit_s128_set_if_nan");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 722b0b074b..3db9ea0975 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -2143,81 +2143,116 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_min, F64x2Min) \
- V(f64x2_max, F64x2Max) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_ne, F64x2Ne) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_ne, F32x4Ne) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_mul, I64x2Mul) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_ne, I64x2Ne) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i64x2_ge_s, I64x2GeS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_ne, I32x4Ne) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_ge_s, I32x4GeS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i32x4_ge_u, I32x4GeU) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_ne, I16x8Ne) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_ge_s, I16x8GeS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_ge_u, I16x8GeU) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_ne, I8x16Ne) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_ge_s, I8x16GeS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_ge_u, I8x16GeU) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name, op) \
+#define SIMD_BINOP_RR_LIST(V) \
+ V(f64x2_add, F64x2Add, fp) \
+ V(f64x2_sub, F64x2Sub, fp) \
+ V(f64x2_mul, F64x2Mul, fp) \
+ V(f64x2_div, F64x2Div, fp) \
+ V(f64x2_min, F64x2Min, fp) \
+ V(f64x2_max, F64x2Max, fp) \
+ V(f64x2_eq, F64x2Eq, fp) \
+ V(f64x2_ne, F64x2Ne, fp) \
+ V(f64x2_lt, F64x2Lt, fp) \
+ V(f64x2_le, F64x2Le, fp) \
+ V(f32x4_add, F32x4Add, fp) \
+ V(f32x4_sub, F32x4Sub, fp) \
+ V(f32x4_mul, F32x4Mul, fp) \
+ V(f32x4_div, F32x4Div, fp) \
+ V(f32x4_min, F32x4Min, fp) \
+ V(f32x4_max, F32x4Max, fp) \
+ V(f32x4_eq, F32x4Eq, fp) \
+ V(f32x4_ne, F32x4Ne, fp) \
+ V(f32x4_lt, F32x4Lt, fp) \
+ V(f32x4_le, F32x4Le, fp) \
+ V(i64x2_add, I64x2Add, fp) \
+ V(i64x2_sub, I64x2Sub, fp) \
+ V(i64x2_mul, I64x2Mul, fp) \
+ V(i64x2_eq, I64x2Eq, fp) \
+ V(i64x2_ne, I64x2Ne, fp) \
+ V(i64x2_gt_s, I64x2GtS, fp) \
+ V(i64x2_ge_s, I64x2GeS, fp) \
+ V(i64x2_shl, I64x2Shl, gp) \
+ V(i64x2_shr_s, I64x2ShrS, gp) \
+ V(i64x2_shr_u, I64x2ShrU, gp) \
+ V(i32x4_add, I32x4Add, fp) \
+ V(i32x4_sub, I32x4Sub, fp) \
+ V(i32x4_mul, I32x4Mul, fp) \
+ V(i32x4_eq, I32x4Eq, fp) \
+ V(i32x4_ne, I32x4Ne, fp) \
+ V(i32x4_gt_s, I32x4GtS, fp) \
+ V(i32x4_ge_s, I32x4GeS, fp) \
+ V(i32x4_gt_u, I32x4GtU, fp) \
+ V(i32x4_ge_u, I32x4GeU, fp) \
+ V(i32x4_min_s, I32x4MinS, fp) \
+ V(i32x4_min_u, I32x4MinU, fp) \
+ V(i32x4_max_s, I32x4MaxS, fp) \
+ V(i32x4_max_u, I32x4MaxU, fp) \
+ V(i32x4_shl, I32x4Shl, gp) \
+ V(i32x4_shr_s, I32x4ShrS, gp) \
+ V(i32x4_shr_u, I32x4ShrU, gp) \
+ V(i16x8_add, I16x8Add, fp) \
+ V(i16x8_sub, I16x8Sub, fp) \
+ V(i16x8_mul, I16x8Mul, fp) \
+ V(i16x8_eq, I16x8Eq, fp) \
+ V(i16x8_ne, I16x8Ne, fp) \
+ V(i16x8_gt_s, I16x8GtS, fp) \
+ V(i16x8_ge_s, I16x8GeS, fp) \
+ V(i16x8_gt_u, I16x8GtU, fp) \
+ V(i16x8_ge_u, I16x8GeU, fp) \
+ V(i16x8_min_s, I16x8MinS, fp) \
+ V(i16x8_min_u, I16x8MinU, fp) \
+ V(i16x8_max_s, I16x8MaxS, fp) \
+ V(i16x8_max_u, I16x8MaxU, fp) \
+ V(i16x8_shl, I16x8Shl, gp) \
+ V(i16x8_shr_s, I16x8ShrS, gp) \
+ V(i16x8_shr_u, I16x8ShrU, gp) \
+ V(i8x16_add, I8x16Add, fp) \
+ V(i8x16_sub, I8x16Sub, fp) \
+ V(i8x16_eq, I8x16Eq, fp) \
+ V(i8x16_ne, I8x16Ne, fp) \
+ V(i8x16_gt_s, I8x16GtS, fp) \
+ V(i8x16_ge_s, I8x16GeS, fp) \
+ V(i8x16_gt_u, I8x16GtU, fp) \
+ V(i8x16_ge_u, I8x16GeU, fp) \
+ V(i8x16_min_s, I8x16MinS, fp) \
+ V(i8x16_min_u, I8x16MinU, fp) \
+ V(i8x16_max_s, I8x16MaxS, fp) \
+ V(i8x16_max_u, I8x16MaxU, fp) \
+ V(i8x16_shl, I8x16Shl, gp) \
+ V(i8x16_shr_s, I8x16ShrS, gp) \
+ V(i8x16_shr_u, I8x16ShrU, gp)
+
+#define EMIT_SIMD_BINOP_RR(name, op, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- op(dst.fp(), lhs.fp(), rhs.fp()); \
+ op(dst.fp(), lhs.fp(), rhs.stype()); \
}
-SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
-#undef EMIT_SIMD_BINOP
-#undef SIMD_BINOP_LIST
+SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
+#undef EMIT_SIMD_BINOP_RR
+#undef SIMD_BINOP_RR_LIST
+
+#define SIMD_BINOP_RI_LIST(V) \
+ V(i64x2_shli, I64x2Shl) \
+ V(i64x2_shri_s, I64x2ShrS) \
+ V(i64x2_shri_u, I64x2ShrU) \
+ V(i32x4_shli, I32x4Shl) \
+ V(i32x4_shri_s, I32x4ShrS) \
+ V(i32x4_shri_u, I32x4ShrU) \
+ V(i16x8_shli, I16x8Shl) \
+ V(i16x8_shri_s, I16x8ShrS) \
+ V(i16x8_shri_u, I16x8ShrU) \
+ V(i8x16_shli, I8x16Shl) \
+ V(i8x16_shri_s, I8x16ShrS) \
+ V(i8x16_shri_u, I8x16ShrU)
+
+#define EMIT_SIMD_BINOP_RI(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t rhs) { \
+ op(dst.fp(), lhs.fp(), Operand(rhs)); \
+ }
+SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
+#undef EMIT_SIMD_BINOP_RI
+#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
@@ -2424,38 +2459,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
-void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
-}
-
-void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_u");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_u");
-}
-
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2520,38 +2523,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
-void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
-}
-
-void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_u");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_u");
-}
-
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2607,38 +2578,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
-void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
-}
-
-void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_u");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_u");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2736,38 +2675,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
-void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
-}
-
-void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_u");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_u");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3134,14 +3041,40 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (kind == kF32) {
+ cebr(src, src);
+ bunordered(&return_nan);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ cdbr(src, src);
+ bunordered(&return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src, MemOperand(dst), r0);
+ bind(&done);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (lane_kind == kF32) {
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(2));
+ b(Condition(0x5), &return_nan); // If any or all are NaN.
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(3));
+ b(Condition(0x5), &return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src.fp(), MemOperand(dst), r0);
+ bind(&done);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d5cda7b3c4..50032eac23 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -1317,7 +1317,9 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- if (dst.gp() == rhs.gp()) {
+ if (lhs.gp() == rhs.gp()) {
+ xorq(dst.gp(), dst.gp());
+ } else if (dst.gp() == rhs.gp()) {
negq(dst.gp());
addq(dst.gp(), lhs.gp());
} else {
@@ -2335,29 +2337,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm as the one in code-generator-x64.cc.
- assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- // Prepare shift value
- assm->movq(kScratchRegister, rhs.gp());
- // Take shift value modulo 8.
- assm->andq(kScratchRegister, Immediate(7));
- assm->addq(kScratchRegister, Immediate(8));
- assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
- if (is_signed) {
- assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packsswb(dst.fp(), kScratchDoubleReg);
- } else {
- assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packuswb(dst.fp(), kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
assm->xorq(dst.gp(), dst.gp());
@@ -2414,21 +2393,11 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), dst.fp(), src_op, 0);
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst.fp(), src_op);
- } else {
- movss(dst.fp(), src_op);
- shufps(dst.fp(), dst.fp(), byte{0});
- }
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2440,18 +2409,17 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (mem_type == MachineType::Int8()) {
- Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int16()) {
- Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int32()) {
- Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else {
DCHECK_EQ(MachineType::Int64(), mem_type);
- Pinsrq(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrq(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
}
}
@@ -2515,26 +2483,24 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
+ I8x16Popcnt(dst.fp(), src.fp(), kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2, kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2927,89 +2893,37 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- movq(kScratchRegister, rhs.gp());
- andq(kScratchRegister, Immediate(7));
- addq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psrlw(kScratchDoubleReg, tmp_simd.fp());
- Packuswb(kScratchDoubleReg, kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- subq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psllw(dst.fp(), tmp_simd.fp());
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7; // i.InputInt3(1);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlw(dst.fp(), lhs.fp(), byte{shift});
- } else if (dst != lhs) {
- Movaps(dst.fp(), lhs.fp());
- psrlw(dst.fp(), byte{shift});
- }
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3220,14 +3134,13 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- Pmaddubsw(dst.fp(), src.fp(), op);
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3259,7 +3172,7 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3376,14 +3289,12 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- Pmaddwd(dst.fp(), src.fp(), op);
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), kScratchDoubleReg);
}
namespace liftoff {
@@ -3574,28 +3485,12 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrld(kScratchDoubleReg, static_cast<byte>(1));
- Andps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), static_cast<byte>(1));
- Andps(dst.fp(), src.fp());
- }
+ Absps(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Negps(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -3730,28 +3625,12 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Abspd(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllq(kScratchDoubleReg, static_cast<byte>(63));
- Xorpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), static_cast<byte>(63));
- Xorpd(dst.fp(), src.fp());
- }
+ Negpd(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -3842,7 +3721,7 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -3852,26 +3731,7 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), kScratchDoubleReg);
- } else {
- movaps(kScratchDoubleReg, src.fp());
- cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(kScratchDoubleReg, dst.fp());
- Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), kScratchDoubleReg);
+ I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4012,12 +3872,14 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4322,11 +4184,7 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
@@ -4334,11 +4192,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) {
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4376,19 +4230,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
orl(Operand(dst, 0), tmp_gp);
}