diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-12 14:27:29 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-13 09:35:20 +0000 |
commit | c30a6232df03e1efbd9f3b226777b07e087a1122 (patch) | |
tree | e992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/wasm/baseline/mips64 | |
parent | 7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff) | |
download | qtwebengine-chromium-85-based.tar.gz |
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/wasm/baseline/mips64')
-rw-r--r-- | chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h | 348 |
1 files changed, 344 insertions, 4 deletions
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index 292f8032b8f..70946d3f6b5 100644 --- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueType type) { DCHECK_NE(dst_offset, src_offset); - LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); + LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); Fill(reg, src_offset, type); Spill(dst_offset, reg, type); } @@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { MemOperand dst = liftoff::GetStackSlot(offset); switch (value.type().kind()) { case ValueType::kI32: { - LiftoffRegister tmp = GetUnusedRegister(kGpReg); + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); sw(tmp.gp(), dst); break; } case ValueType::kI64: { - LiftoffRegister tmp = GetUnusedRegister(kGpReg); + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); TurboAssembler::li(tmp.gp(), value.to_i64()); sd(tmp.gp(), dst); break; @@ -1177,6 +1177,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprF64ReinterpretI64: dmtc1(src.gp(), dst.fp()); return true; + case kExprI32SConvertSatF32: + bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32"); + return true; + case kExprI32UConvertSatF32: + bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32"); + return true; + case kExprI32SConvertSatF64: + bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64"); + return true; + case kExprI32UConvertSatF64: + bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64"); + return true; + case kExprI64SConvertSatF32: + bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32"); + return true; + case kExprI64UConvertSatF32: + bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32"); + return true; + case kExprI64SConvertSatF64: + bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64"); + return true; + case kExprI64UConvertSatF64: + bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64"); + return true; default: return false; } @@ -1297,6 +1321,26 @@ inline FPUCondition ConditionToConditionCmpFPU(Condition condition, UNREACHABLE(); } +inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, + LiftoffRegister src) { + Label all_false; + assm->BranchMSA(&all_false, MSA_BRANCH_V, all_zero, src.fp().toW(), + USE_DELAY_SLOT); + assm->li(dst.gp(), 0l); + assm->li(dst.gp(), 1); + assm->bind(&all_false); +} + +inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, + LiftoffRegister src, MSABranchDF msa_branch_df) { + Label all_true; + assm->BranchMSA(&all_true, msa_branch_df, all_not_zero, src.fp().toW(), + USE_DELAY_SLOT); + assm->li(dst.gp(), 1); + assm->li(dst.gp(), 0l); + assm->bind(&all_true); +} + } // namespace liftoff void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, @@ -1357,6 +1401,112 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, bind(&cont); } +void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr, + Register offset_reg, uint32_t offset_imm, + LoadType type, + LoadTransformationKind transform, + uint32_t* protected_load_pc) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Daddu(scratch, src_addr, offset_reg); + MemOperand src_op = MemOperand(scratch, offset_imm); + MSARegister dst_msa = dst.fp().toW(); + *protected_load_pc = pc_offset(); + MachineType memtype = type.mem_type(); + + if (transform == LoadTransformationKind::kExtend) { + Ld(scratch, src_op); + if (memtype == MachineType::Int8()) { + fill_d(dst_msa, scratch); + clti_s_b(kSimd128ScratchReg, dst_msa, 0); + ilvr_b(dst_msa, kSimd128ScratchReg, dst_msa); + } else if (memtype == MachineType::Uint8()) { + xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + fill_d(dst_msa, scratch); + ilvr_b(dst_msa, kSimd128RegZero, dst_msa); + } else if (memtype == MachineType::Int16()) { + fill_d(dst_msa, scratch); + clti_s_h(kSimd128ScratchReg, dst_msa, 0); + ilvr_h(dst_msa, kSimd128ScratchReg, dst_msa); + } else if (memtype == MachineType::Uint16()) { + xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + fill_d(dst_msa, scratch); + ilvr_h(dst_msa, kSimd128RegZero, dst_msa); + } else if (memtype == MachineType::Int32()) { + fill_d(dst_msa, scratch); + clti_s_w(kSimd128ScratchReg, dst_msa, 0); + ilvr_w(dst_msa, kSimd128ScratchReg, dst_msa); + } else if (memtype == MachineType::Uint32()) { + xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + fill_d(dst_msa, scratch); + ilvr_w(dst_msa, kSimd128RegZero, dst_msa); + } + } else { + DCHECK_EQ(LoadTransformationKind::kSplat, transform); + if (memtype == MachineType::Int8()) { + Lb(scratch, src_op); + fill_b(dst_msa, scratch); + } else if (memtype == MachineType::Int16()) { + Lh(scratch, src_op); + fill_h(dst_msa, scratch); + } else if (memtype == MachineType::Int32()) { + Lw(scratch, src_op); + fill_w(dst_msa, scratch); + } else if (memtype == MachineType::Int64()) { + Ld(scratch, src_op); + fill_d(dst_msa, scratch); + } + } +} + +void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs, + const uint8_t shuffle[16]) { + MSARegister dst_msa = dst.fp().toW(); + MSARegister lhs_msa = lhs.fp().toW(); + MSARegister rhs_msa = rhs.fp().toW(); + + uint64_t control_hi = 0; + uint64_t control_low = 0; + for (int i = 7; i >= 0; i--) { + control_hi <<= 8; + control_hi |= shuffle[i + 8]; + control_low <<= 8; + control_low |= shuffle[i]; + } + + if (dst_msa == lhs_msa) { + move_v(kSimd128ScratchReg, lhs_msa); + lhs_msa = kSimd128ScratchReg; + } else if (dst_msa == rhs_msa) { + move_v(kSimd128ScratchReg, rhs_msa); + rhs_msa = kSimd128ScratchReg; + } + + li(kScratchReg, control_low); + insert_d(dst_msa, 0, kScratchReg); + li(kScratchReg, control_hi); + insert_d(dst_msa, 1, kScratchReg); + vshf_b(dst_msa, rhs_msa, lhs_msa); +} + +void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + MSARegister dst_msa = dst.fp().toW(); + MSARegister lhs_msa = lhs.fp().toW(); + MSARegister rhs_msa = rhs.fp().toW(); + + if (dst == lhs) { + move_v(kSimd128ScratchReg, lhs_msa); + lhs_msa = kSimd128ScratchReg; + } + xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); + move_v(dst_msa, rhs_msa); + vshf_b(dst_msa, kSimd128RegZero, lhs_msa); +} + void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src) { fill_b(dst.fp().toW(), src.gp()); @@ -1567,6 +1717,32 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); } +void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst, + LiftoffRegister src) { + liftoff::EmitAnyTrue(this, dst, src); +} + +void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst, + LiftoffRegister src) { + liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_B); +} + +void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, + LiftoffRegister src) { + MSARegister scratch0 = kSimd128RegZero; + MSARegister scratch1 = kSimd128ScratchReg; + srli_b(scratch0, src.fp().toW(), 7); + srli_h(scratch1, scratch0, 7); + or_v(scratch0, scratch0, scratch1); + srli_w(scratch1, scratch0, 14); + or_v(scratch0, scratch0, scratch1); + srli_d(scratch1, scratch0, 28); + or_v(scratch0, scratch0, scratch1); + shf_w(scratch1, scratch0, 0x0E); + ilvev_b(scratch0, scratch1, scratch0); + copy_u_h(dst.gp(), scratch0, 0); +} + void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { fill_b(kSimd128ScratchReg, rhs.gp()); @@ -1578,6 +1754,30 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, slli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); } +void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_b(kSimd128ScratchReg, rhs.gp()); + sra_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srai_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); +} + +void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_b(kSimd128ScratchReg, rhs.gp()); + srl_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7); +} + void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); @@ -1647,6 +1847,31 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); } +void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst, + LiftoffRegister src) { + liftoff::EmitAnyTrue(this, dst, src); +} + +void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst, + LiftoffRegister src) { + liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H); +} + +void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, + LiftoffRegister src) { + MSARegister scratch0 = kSimd128RegZero; + MSARegister scratch1 = kSimd128ScratchReg; + srli_h(scratch0, src.fp().toW(), 15); + srli_w(scratch1, scratch0, 15); + or_v(scratch0, scratch0, scratch1); + srli_d(scratch1, scratch0, 30); + or_v(scratch0, scratch0, scratch1); + shf_w(scratch1, scratch0, 0x0E); + slli_d(scratch1, scratch1, 4); + or_v(scratch0, scratch0, scratch1); + copy_u_b(dst.gp(), scratch0, 0); +} + void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { fill_h(kSimd128ScratchReg, rhs.gp()); @@ -1658,6 +1883,30 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, slli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); } +void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_h(kSimd128ScratchReg, rhs.gp()); + sra_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srai_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); +} + +void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_h(kSimd128ScratchReg, rhs.gp()); + srl_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15); +} + void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); @@ -1727,6 +1976,29 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW()); } +void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst, + LiftoffRegister src) { + liftoff::EmitAnyTrue(this, dst, src); +} + +void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst, + LiftoffRegister src) { + liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W); +} + +void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, + LiftoffRegister src) { + MSARegister scratch0 = kSimd128RegZero; + MSARegister scratch1 = kSimd128ScratchReg; + srli_w(scratch0, src.fp().toW(), 31); + srli_d(scratch1, scratch0, 31); + or_v(scratch0, scratch0, scratch1); + shf_w(scratch1, scratch0, 0x0E); + slli_d(scratch1, scratch1, 2); + or_v(scratch0, scratch0, scratch1); + copy_u_b(dst.gp(), scratch0, 0); +} + void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { fill_w(kSimd128ScratchReg, rhs.gp()); @@ -1738,6 +2010,30 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, slli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); } +void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_w(kSimd128ScratchReg, rhs.gp()); + sra_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srai_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); +} + +void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_w(kSimd128ScratchReg, rhs.gp()); + srl_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31); +} + void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { addv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); @@ -1794,6 +2090,30 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, slli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); } +void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_d(kSimd128ScratchReg, rhs.gp()); + sra_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srai_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); +} + +void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + fill_d(kSimd128ScratchReg, rhs.gp()); + srl_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg); +} + +void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst, + LiftoffRegister lhs, int32_t rhs) { + srli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63); +} + void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { addv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW()); @@ -1959,6 +2279,26 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, bsel_v(dst_msa, scratch0, scratch1); } +void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst, + LiftoffRegister src) { + ftrunc_s_w(dst.fp().toW(), src.fp().toW()); +} + +void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst, + LiftoffRegister src) { + ftrunc_u_w(dst.fp().toW(), src.fp().toW()); +} + +void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst, + LiftoffRegister src) { + ffint_s_w(dst.fp().toW(), src.fp().toW()); +} + +void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst, + LiftoffRegister src) { + ffint_u_w(dst.fp().toW(), src.fp().toW()); +} + void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { @@ -2197,7 +2537,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { } void LiftoffAssembler::CallTrapCallbackForTesting() { - PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); + PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); } |