summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-02-24 14:47:06 +0100
committerMichaël Zasso <targos@protonmail.com>2021-02-25 00:14:47 +0100
commitc5ff019a4e93891106859cb272ded1197a92c7e5 (patch)
treecaf6b7e50b0ceac09878ac4402d9f725b8685dd7 /deps/v8/src/wasm/baseline
parent67dc2bf2084b125dec43689874d237d125562cdf (diff)
downloadnode-new-c5ff019a4e93891106859cb272ded1197a92c7e5.tar.gz
deps: update V8 to 8.9.255.19
PR-URL: https://github.com/nodejs/node/pull/37330 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm/baseline')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h244
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h259
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h309
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h63
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc83
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h224
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc1313
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h3
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h242
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h219
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h163
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h364
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h369
13 files changed, 3059 insertions, 796 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index af969f387e..bee45ad9af 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
+#include "src/base/platform/wrappers.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/baseline/liftoff-register.h"
@@ -15,6 +16,31 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedLessEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnsignedGreaterEqual:
+ return hs;
+ }
+}
+
// half
// slot Frame
// -----+--------------------+---------------------------
@@ -71,7 +97,7 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
inline Register CalculateActualAddress(LiftoffAssembler* assm,
UseScratchRegisterScope* temps,
Register addr_reg, Register offset_reg,
- int32_t offset_imm,
+ uintptr_t offset_imm,
Register result_reg = no_reg) {
if (offset_reg == no_reg && offset_imm == 0) {
if (result_reg == no_reg) {
@@ -94,7 +120,7 @@ inline Register CalculateActualAddress(LiftoffAssembler* assm,
return actual_addr_reg;
}
-inline Condition MakeUnsigned(Condition cond) {
+inline LiftoffCondition MakeUnsigned(LiftoffCondition cond) {
switch (cond) {
case kSignedLessThan:
return kUnsignedLessThan;
@@ -138,14 +164,14 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
void (Assembler::*op_with_carry)(Register, Register, const Operand&,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, int32_t imm) {
+ LiftoffRegister lhs, int64_t imm) {
// The compiler allocated registers such that either {dst == lhs} or there is
// no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
- (assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
- // Top half of the immediate sign extended, either 0 or -1.
- int32_t sign_extend = imm < 0 ? -1 : 0;
- (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(sign_extend),
+ int32_t imm_low_word = static_cast<int32_t>(imm);
+ int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm_low_word), SetCC, al);
+ (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(imm_high_word),
LeaveCC, al);
}
@@ -277,6 +303,7 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
+ case ValueType::kRtt:
assm->str(src.gp(), dst);
break;
case ValueType::kI64:
@@ -310,6 +337,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
+ case ValueType::kRtt:
assm->ldr(dst.gp(), src);
break;
case ValueType::kI64:
@@ -445,17 +473,26 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
Pop(lr, fp);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
-#ifdef USE_SIMULATOR
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+
// When using the simulator, deal with Liftoff which allocates the stack
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
+ // Note: This check is only needed for simulator runs, but we run it
+ // unconditionally to make sure that the simulator executes the same code
+ // that's also executed on native hardware (see https://crbug.com/v8/11041).
if (frame_size > KB / 2) {
bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
-#endif
+
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset,
liftoff::kPatchInstructionsRequired);
@@ -661,13 +698,18 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
- // Store the value.
- MemOperand dst_op(dst_addr, offset_imm);
- str(src.gp(), dst_op);
+ {
+ // Store the value.
+ UseScratchRegisterScope temps(this);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
+ str(src.gp(), dst_op);
+ }
// The write barrier.
Label write_barrier;
Label exit;
@@ -1462,7 +1504,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
I32_BINOP_I(i32_add, add)
-I32_BINOP(i32_sub, sub)
+I32_BINOP_I(i32_sub, sub)
I32_BINOP(i32_mul, mul)
I32_BINOP_I(i32_and, and_)
I32_BINOP_I(i32_or, orr)
@@ -1624,7 +1666,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+ int64_t imm) {
liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
@@ -2118,25 +2160,40 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); }
void LiftoffAssembler::emit_jump(Register target) { bx(target); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
- DCHECK_EQ(type, kWasmI32);
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+
if (rhs == no_reg) {
+ DCHECK_EQ(type, kWasmI32);
cmp(lhs, Operand(0));
} else {
+ DCHECK(type == kWasmI32 ||
+ (type.is_reference_type() &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs);
}
b(label, cond);
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ cmp(lhs, Operand(imm));
+ b(label, cond);
+}
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
clz(dst, src);
mov(dst, Operand(dst, LSR, kRegSizeInBitsLog2));
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
cmp(lhs, rhs);
mov(dst, Operand(0), LeaveCC);
mov(dst, Operand(1), LeaveCC, cond);
@@ -2148,13 +2205,15 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
mov(dst, Operand(dst, LSR, 5));
}
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// For signed i64 comparisons, we still need to use unsigned comparison for
// the low word (the only bit carrying signedness information is the MSB in
// the high word).
- Condition unsigned_cond = liftoff::MakeUnsigned(cond);
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Condition unsigned_cond =
+ liftoff::ToCondition(liftoff::MakeUnsigned(liftoff_cond));
Label set_cond;
Label cont;
LiftoffRegister dest = LiftoffRegister(dst);
@@ -2166,7 +2225,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
// equal, compare the low word and use that for set_cond.
cmp(lhs.high_gp(), rhs.high_gp());
if (unsigned_cond == cond) {
- cmp(lhs.low_gp(), rhs.low_gp(), kEqual);
+ cmp(lhs.low_gp(), rhs.low_gp(), eq);
if (!speculative_move) {
mov(dst, Operand(0));
}
@@ -2190,9 +2249,10 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
}
}
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
VFPCompareAndSetFlags(liftoff::GetFloatRegister(lhs),
liftoff::GetFloatRegister(rhs));
mov(dst, Operand(0), LeaveCC);
@@ -2203,9 +2263,10 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
}
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
VFPCompareAndSetFlags(lhs, rhs);
mov(dst, Operand(0), LeaveCC);
mov(dst, Operand(1), LeaveCC, cond);
@@ -2222,8 +2283,15 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ tst(obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target);
+}
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
@@ -2290,6 +2358,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2785,6 +2860,39 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
vmlal(NeonU32, dst_neon, tmp1.low(), tmp2.low());
}
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonS32, liftoff::GetSimd128Register(dst), src1.low_fp(),
+ src2.low_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonU32, liftoff::GetSimd128Register(dst), src1.low_fp(),
+ src2.low_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonS32, liftoff::GetSimd128Register(dst), src1.high_fp(),
+ src2.high_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonU32, liftoff::GetSimd128Register(dst), src1.high_fp(),
+ src2.high_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon32, liftoff::GetSimd128Register(dst), src.gp());
@@ -2950,6 +3058,34 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
vpadd(Neon32, dest.high(), scratch.low(), scratch.high());
}
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonS16, liftoff::GetSimd128Register(dst), src1.low_fp(),
+ src2.low_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonU16, liftoff::GetSimd128Register(dst), src1.low_fp(),
+ src2.low_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonS16, liftoff::GetSimd128Register(dst), src1.high_fp(),
+ src2.high_fp());
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonU16, liftoff::GetSimd128Register(dst), src1.high_fp(),
+ src2.high_fp());
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon16, liftoff::GetSimd128Register(dst), src.gp());
@@ -3135,6 +3271,32 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
imm_lane_idx);
}
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonS8, liftoff::GetSimd128Register(dst), src1.low_fp(), src2.low_fp());
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonU8, liftoff::GetSimd128Register(dst), src1.low_fp(), src2.low_fp());
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonS8, liftoff::GetSimd128Register(dst), src1.high_fp(),
+ src2.high_fp());
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vmull(NeonU8, liftoff::GetSimd128Register(dst), src1.high_fp(),
+ src2.high_fp());
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -3530,7 +3692,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- memcpy(vals, imms, sizeof(vals));
+ base::Memcpy(vals, imms, sizeof(vals));
vmov(dst.low_fp(), Double(vals[0]));
vmov(dst.high_fp(), Double(vals[1]));
}
@@ -3757,6 +3919,24 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Drop(num_stack_slots);
Ret();
@@ -3929,6 +4109,8 @@ void LiftoffStackSlots::Construct() {
asm_->push(reg.gp());
} break;
case ValueType::kI32:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
asm_->push(src.reg().gp());
break;
case ValueType::kF32:
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 402f0d2e84..815586ecd1 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
#define V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
+#include "src/base/platform/wrappers.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -14,6 +15,31 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedLessEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnsignedGreaterEqual:
+ return hs;
+ }
+}
+
// Liftoff Frames.
//
// slot Frame
@@ -53,6 +79,7 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
+ case ValueType::kRtt:
return reg.gp().X();
case ValueType::kF32:
return reg.fp().S();
@@ -98,6 +125,8 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
Register tmp = temps->AcquireW();
+ // TODO(clemensb): Do a 64-bit addition if memory64 is used.
+ DCHECK_GE(kMaxUInt32, offset_imm);
assm->Add(tmp, offset.W(), offset_imm);
return MemOperand(addr.X(), tmp, UXTW);
}
@@ -179,6 +208,10 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
InstructionAccurateScope scope(this, 1);
+ // Next we reserve the memory for the whole stack frame. We do not know yet
+ // how big the stack frame will be so we just emit a placeholder instruction.
+ // PatchPrepareStackFrame will patch this in order to increase the stack
+ // appropriately.
sub(sp, sp, 0);
return offset;
}
@@ -218,7 +251,13 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
Sub(sp, x16, stack_param_delta * 8);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
+void LiftoffAssembler::AlignFrameSize() {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int initial_frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+ int frame_size = initial_frame_size;
+
static_assert(kStackSlotSize == kXRegSize,
"kStackSlotSize must equal kXRegSize");
// The stack pointer is required to be quadword aligned.
@@ -234,6 +273,23 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
return;
}
}
+ if (frame_size > initial_frame_size) {
+ // Record the padding, as it is needed for GC offsets later.
+ max_used_spill_offset_ += (frame_size - initial_frame_size);
+ }
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+ // The stack pointer is required to be quadword aligned.
+ // Misalignment will cause a stack alignment fault.
+ DCHECK_EQ(frame_size, RoundUp(frame_size, kQuadWordSizeInBytes));
+ DCHECK(IsImmAddSub(frame_size));
+
#ifdef USE_SIMULATOR
// When using the simulator, deal with Liftoff which allocates the stack
// before checking it.
@@ -349,11 +405,14 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
// Store the value.
- MemOperand dst_op(dst_addr, offset_imm);
+ UseScratchRegisterScope temps(this);
+ MemOperand dst_op =
+ liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
// The write barrier.
Label write_barrier;
@@ -374,7 +433,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
UseScratchRegisterScope temps(this);
@@ -425,7 +484,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
UseScratchRegisterScope temps(this);
@@ -465,7 +524,7 @@ namespace liftoff {
inline Register CalculateActualAddress(LiftoffAssembler* lasm,
Register addr_reg, Register offset_reg,
- int32_t offset_imm,
+ uintptr_t offset_imm,
Register result_reg) {
DCHECK_NE(offset_reg, no_reg);
DCHECK_NE(addr_reg, no_reg);
@@ -479,7 +538,7 @@ inline Register CalculateActualAddress(LiftoffAssembler* lasm,
enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LiftoffRegister value, LiftoffRegister result,
StoreType type, Binop op) {
LiftoffRegList pinned =
@@ -576,7 +635,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
} // namespace liftoff
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
UseScratchRegisterScope temps(this);
Register src_reg = liftoff::CalculateActualAddress(
@@ -603,7 +662,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
UseScratchRegisterScope temps(this);
Register dst_reg = liftoff::CalculateActualAddress(
@@ -630,42 +689,42 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kXor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm,
+ uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
@@ -673,7 +732,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicCompareExchange(
- Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
LiftoffRegList pinned =
@@ -956,7 +1015,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
I32_BINOP_I(i32_add, Add)
-I32_BINOP(i32_sub, Sub)
+I32_BINOP_I(i32_sub, Sub)
I32_BINOP(i32_mul, Mul)
I32_BINOP_I(i32_and, And)
I32_BINOP_I(i32_or, Orr)
@@ -964,7 +1023,7 @@ I32_BINOP_I(i32_xor, Eor)
I32_SHIFTOP(i32_shl, Lsl)
I32_SHIFTOP(i32_sar, Asr)
I32_SHIFTOP(i32_shr, Lsr)
-I64_BINOP_I(i64_add, Add)
+I64_BINOP(i64_add, Add)
I64_BINOP(i64_sub, Sub)
I64_BINOP(i64_mul, Mul)
I64_BINOP_I(i64_and, And)
@@ -1010,6 +1069,11 @@ FP64_UNOP(f64_sqrt, Fsqrt)
#undef I32_SHIFTOP
#undef I64_SHIFTOP
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ Add(dst.gp().X(), lhs.gp().X(), imm);
+}
+
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
Clz(dst.W(), src.W());
}
@@ -1398,9 +1462,10 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
switch (type.kind()) {
case ValueType::kI32:
if (rhs.is_valid()) {
@@ -1409,6 +1474,12 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
Cmp(lhs.W(), wzr);
}
break;
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ DCHECK(rhs.is_valid());
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
case ValueType::kI64:
if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X());
@@ -1422,13 +1493,23 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
B(label, cond);
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Cmp(lhs.W(), Operand(imm));
+ B(label, cond);
+}
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
Cmp(src.W(), wzr);
Cset(dst.W(), eq);
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Cmp(lhs.W(), rhs.W());
Cset(dst.W(), cond);
}
@@ -1438,16 +1519,18 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
Cset(dst.W(), eq);
}
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Cmp(lhs.gp().X(), rhs.gp().X());
Cset(dst.W(), cond);
}
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Fcmp(lhs.S(), rhs.S());
Cset(dst.W(), cond);
if (cond != ne) {
@@ -1456,9 +1539,10 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
}
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Fcmp(lhs.D(), rhs.D());
Cset(dst.W(), cond);
if (cond != ne) {
@@ -1474,8 +1558,15 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ Label* smi_label = mode == kJumpOnSmi ? target : nullptr;
+ Label* not_smi_label = mode == kJumpOnNotSmi ? target : nullptr;
+ JumpIfSmi(obj, smi_label, not_smi_label);
+}
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
@@ -1539,6 +1630,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1890,6 +1988,35 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V2D(), dst.fp().V2D(), tmp1.V2D());
}
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Smull(dst.fp().V2D(), src1.fp().V2S(), src2.fp().V2S());
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Umull(dst.fp().V2D(), src1.fp().V2S(), src2.fp().V2S());
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Smull2(dst.fp().V2D(), src1.fp().V4S(), src2.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Umull2(dst.fp().V2D(), src1.fp().V4S(), src2.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V4S(), src.gp().W());
@@ -2031,6 +2158,30 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
Addp(dst.fp().V4S(), tmp1, tmp2);
}
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Smull(dst.fp().V4S(), src1.fp().V4H(), src2.fp().V4H());
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Umull(dst.fp().V4S(), src1.fp().V4H(), src2.fp().V4H());
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Smull2(dst.fp().V4S(), src1.fp().V8H(), src2.fp().V8H());
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Umull2(dst.fp().V4S(), src1.fp().V8H(), src2.fp().V8H());
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V8H(), src.gp().W());
@@ -2532,7 +2683,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- memcpy(vals, imms, sizeof(vals));
+ base::Memcpy(vals, imms, sizeof(vals));
Movi(dst.fp().V16B(), vals[1], vals[0]);
}
@@ -2709,6 +2860,30 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
Abs(dst.fp().V8H(), src.fp().V8H());
}
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Smull(dst.fp().V8H(), src1.fp().V8B(), src2.fp().V8B());
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Umull(dst.fp().V8H(), src1.fp().V8B(), src2.fp().V8B());
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Smull2(dst.fp().V8H(), src1.fp().V16B(), src2.fp().V16B());
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Umull2(dst.fp().V8H(), src1.fp().V16B(), src2.fp().V16B());
+}
+
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
Abs(dst.fp().V4S(), src.fp().V4S());
@@ -2738,6 +2913,30 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
PopCPURegList(liftoff::PadRegList(regs.GetGpList()));
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ bool needs_padding =
+ (base::bits::CountPopulation(all_spills.GetGpList()) & 1) != 0;
+ if (needs_padding) {
+ spill_space_size += kSystemPointerSize;
+ ++spill_offset;
+ }
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DropSlots(num_stack_slots);
Ret();
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 5e640093c4..890337fe12 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
#define V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
+#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -21,6 +22,31 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return equal;
+ case kUnequal:
+ return not_equal;
+ case kSignedLessThan:
+ return less;
+ case kSignedLessEqual:
+ return less_equal;
+ case kSignedGreaterThan:
+ return greater;
+ case kSignedGreaterEqual:
+ return greater_equal;
+ case kUnsignedLessThan:
+ return below;
+ case kUnsignedLessEqual:
+ return below_equal;
+ case kUnsignedGreaterThan:
+ return above;
+ case kUnsignedGreaterEqual:
+ return above_equal;
+ }
+}
+
// ebp-4 holds the stack marker, ebp-8 is the instance parameter.
constexpr int kInstanceOffset = 8;
@@ -45,6 +71,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
+ case ValueType::kRtt:
assm->mov(dst.gp(), src);
break;
case ValueType::kI64:
@@ -93,6 +120,8 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type.kind()) {
case ValueType::kI32:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
assm->push(reg.gp());
break;
case ValueType::kI64:
@@ -158,6 +187,10 @@ constexpr int kSubSpSize = 6; // 6 bytes for "sub esp, <imm32>"
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
+ // Next we reserve the memory for the whole stack frame. We do not know yet
+ // how big the stack frame will be so we just emit a placeholder instruction.
+ // PatchPrepareStackFrame will patch this in order to increase the stack
+ // appropriately.
sub_sp_32(0);
DCHECK_EQ(liftoff::kSubSpSize, pc_offset() - offset);
return offset;
@@ -184,7 +217,13 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
pop(ebp);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
DCHECK_EQ(frame_size % kSystemPointerSize, 0);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
@@ -229,7 +268,8 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return type.element_size_bytes();
+ return type.is_reference_type() ? kSystemPointerSize
+ : type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
@@ -291,6 +331,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
@@ -298,7 +339,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
STATIC_ASSERT(kTaggedSize == kInt32Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- Operand dst_op = Operand(dst_addr, offset_imm);
+ Operand dst_op = offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm);
mov(dst_op, src.gp());
Label write_barrier;
@@ -1075,7 +1118,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
movsd(dst, src);
} else {
DCHECK_EQ(kWasmS128, type);
- movapd(dst, src);
+ Movaps(dst, src);
}
}
@@ -1086,6 +1129,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
+ case ValueType::kRtt:
mov(dst, reg.gp());
break;
case ValueType::kI64:
@@ -1195,6 +1239,16 @@ void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
}
}
+void LiftoffAssembler::emit_i32_subi(Register dst, Register lhs, int32_t imm) {
+ if (dst != lhs) {
+ // We'll have to implement an UB-safe version if we need this corner case.
+ DCHECK_NE(imm, kMinInt);
+ lea(dst, Operand(lhs, -imm));
+ } else {
+ sub(dst, Immediate(imm));
+ }
+}
+
namespace liftoff {
template <void (Assembler::*op)(Register, Register)>
void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
@@ -1453,20 +1507,21 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, int32_t imm) {
+ LiftoffRegister lhs, int64_t imm) {
// The compiler allocated registers such that either {dst == lhs} or there is
// no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
+ int32_t imm_low_word = static_cast<int32_t>(imm);
+ int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
+
// First, compute the low half of the result.
if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp());
- (assm->*op)(dst.low_gp(), Immediate(imm));
+ (assm->*op)(dst.low_gp(), Immediate(imm_low_word));
// Now compute the upper half.
if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp());
- // Top half of the immediate sign extended, either 0 or -1.
- int32_t sign_extend = imm < 0 ? -1 : 0;
- (assm->*op_with_carry)(dst.high_gp(), sign_extend);
+ (assm->*op_with_carry)(dst.high_gp(), imm_high_word);
}
} // namespace liftoff
@@ -1476,7 +1531,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+ int64_t imm) {
liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
@@ -2330,11 +2385,17 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (type.kind()) {
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
case ValueType::kI32:
cmp(lhs, rhs);
break;
@@ -2349,6 +2410,14 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
j(cond, label);
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ cmp(lhs, Immediate(imm));
+ j(cond, label);
+}
+
namespace liftoff {
// Setcc into dst register, given a scratch byte register (might be the same as
@@ -2372,8 +2441,10 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
liftoff::setcc_32(this, equal, dst);
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
cmp(lhs, rhs);
liftoff::setcc_32(this, cond, dst);
}
@@ -2391,7 +2462,7 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
}
namespace liftoff {
-inline Condition cond_make_unsigned(Condition cond) {
+inline LiftoffCondition cond_make_unsigned(LiftoffCondition cond) {
switch (cond) {
case kSignedLessThan:
return kUnsignedLessThan;
@@ -2407,9 +2478,13 @@ inline Condition cond_make_unsigned(Condition cond) {
}
} // namespace liftoff
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Condition unsigned_cond =
+ liftoff::ToCondition(liftoff::cond_make_unsigned(liftoff_cond));
+
// Get the tmp byte register out here, such that we don't conditionally spill
// (this cannot be reflected in the cache state).
Register tmp_byte_reg = liftoff::GetTmpByteRegister(this, dst);
@@ -2417,7 +2492,6 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
// For signed i64 comparisons, we still need to use unsigned comparison for
// the low word (the only bit carrying signedness information is the MSB in
// the high word).
- Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
Label setcc;
Label cont;
// Compare high word first. If it differs, use if for the setcc. If it's
@@ -2464,15 +2538,17 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
liftoff::EmitFloatSetCond<&Assembler::ucomiss>(this, cond, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
liftoff::EmitFloatSetCond<&Assembler::ucomisd>(this, cond, dst, lhs, rhs);
}
@@ -2483,6 +2559,13 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ test_b(obj, Immediate(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? zero : not_zero;
+ j(condition, target);
+}
+
namespace liftoff {
template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
void (Assembler::*sse_op)(XMMRegister, XMMRegister)>
@@ -2627,7 +2710,7 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
} // namespace liftoff
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
@@ -2676,6 +2759,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -3013,7 +3103,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- memcpy(vals, imms, sizeof(vals));
+ base::Memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
uint64_t high = vals[1];
@@ -3057,17 +3147,16 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vxorps(liftoff::kScratchDoubleReg, src1.fp(), src2.fp());
- vandps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, mask.fp());
- vxorps(dst.fp(), liftoff::kScratchDoubleReg, src2.fp());
+ // Ensure that we don't overwrite any inputs with the movdqu below.
+ DCHECK_NE(dst, src1);
+ DCHECK_NE(dst, src2);
+ if (!CpuFeatures::IsSupported(AVX) && dst != mask) {
+ movdqu(dst.fp(), mask.fp());
+ S128Select(dst.fp(), dst.fp(), src1.fp(), src2.fp(),
+ liftoff::kScratchDoubleReg);
} else {
- movaps(liftoff::kScratchDoubleReg, src1.fp());
- xorps(liftoff::kScratchDoubleReg, src2.fp());
- andps(liftoff::kScratchDoubleReg, mask.fp());
- if (dst.fp() != src2.fp()) movaps(dst.fp(), src2.fp());
- xorps(dst.fp(), liftoff::kScratchDoubleReg);
+ S128Select(dst.fp(), mask.fp(), src1.fp(), src2.fp(),
+ liftoff::kScratchDoubleReg);
}
}
@@ -3457,6 +3546,34 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/true, /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/true, /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/false, /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/false, /*is_signed=*/false);
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3574,6 +3691,61 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
this, dst, lhs, rhs);
}
+namespace liftoff {
+// Helper function to check for register aliasing, AVX support, and moves
+// registers around before calling the actual macro-assembler function.
+inline void I32x4ExtMulHelper(LiftoffAssembler* assm, XMMRegister dst,
+ XMMRegister src1, XMMRegister src2, bool low,
+ bool is_signed) {
+ // I32x4ExtMul requires dst == src1 if AVX is not supported.
+ if (CpuFeatures::IsSupported(AVX) || dst == src1) {
+ assm->I32x4ExtMul(dst, src1, src2, liftoff::kScratchDoubleReg, low,
+ is_signed);
+ } else if (dst != src2) {
+ // dst != src1 && dst != src2
+ assm->movaps(dst, src1);
+ assm->I32x4ExtMul(dst, dst, src2, liftoff::kScratchDoubleReg, low,
+ is_signed);
+ } else {
+ // dst == src2
+ // Extended multiplication is commutative,
+ assm->movaps(dst, src2);
+ assm->I32x4ExtMul(dst, dst, src1, liftoff::kScratchDoubleReg, low,
+ is_signed);
+ }
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(),
+ /*low=*/false,
+ /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(),
+ /*low=*/false,
+ /*is_signed=*/false);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg =
@@ -3584,7 +3756,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
vpsubq(dst.fp(), reg, src.fp());
} else {
psubq(reg, src.fp());
- if (dst.fp() != reg) movapd(dst.fp(), reg);
+ if (dst.fp() != reg) movaps(dst.fp(), reg);
}
}
@@ -3698,6 +3870,39 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Paddq(dst.fp(), dst.fp(), tmp2.fp());
}
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/true, /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/true, /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/false, /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg,
+ /*low=*/false, /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movmskpd(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3947,13 +4152,13 @@ void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
vminpd(dst.fp(), rhs.fp(), lhs.fp());
} else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movapd(liftoff::kScratchDoubleReg, src);
+ movaps(liftoff::kScratchDoubleReg, src);
minpd(liftoff::kScratchDoubleReg, dst.fp());
minpd(dst.fp(), src);
} else {
- movapd(liftoff::kScratchDoubleReg, lhs.fp());
+ movaps(liftoff::kScratchDoubleReg, lhs.fp());
minpd(liftoff::kScratchDoubleReg, rhs.fp());
- movapd(dst.fp(), rhs.fp());
+ movaps(dst.fp(), rhs.fp());
minpd(dst.fp(), lhs.fp());
}
// propagate -0's and NaNs, which may be non-canonical.
@@ -3975,13 +4180,13 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
} else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movapd(liftoff::kScratchDoubleReg, src);
+ movaps(liftoff::kScratchDoubleReg, src);
maxpd(liftoff::kScratchDoubleReg, dst.fp());
maxpd(dst.fp(), src);
} else {
- movapd(liftoff::kScratchDoubleReg, lhs.fp());
+ movaps(liftoff::kScratchDoubleReg, lhs.fp());
maxpd(liftoff::kScratchDoubleReg, rhs.fp());
- movapd(dst.fp(), rhs.fp());
+ movaps(dst.fp(), rhs.fp());
maxpd(dst.fp(), lhs.fp());
}
// Find discrepancies.
@@ -4068,7 +4273,7 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
Cvttps2dq(tmp, tmp);
Pxor(tmp, liftoff::kScratchDoubleReg);
Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pmaxsd(tmp, liftoff::kScratchDoubleReg);
+ Pmaxsd(tmp, tmp, liftoff::kScratchDoubleReg);
// Convert to int. Overflow lanes above max_signed will be 0x80000000.
Cvttps2dq(dst.fp(), dst.fp());
// Add (src-max_signed) for overflow lanes.
@@ -4429,6 +4634,24 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 781fb87dbc..a4d7fd1221 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -69,66 +69,19 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = CPURegister::ListOf(
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d16, d17,
d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29);
-#else
-
-constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+#elif V8_TARGET_ARCH_S390X
-constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
-
-#endif
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(r2, r3, r4, r5, r6, r7, r8);
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-
-constexpr Condition kEqual = equal;
-constexpr Condition kUnequal = not_equal;
-constexpr Condition kSignedLessThan = less;
-constexpr Condition kSignedLessEqual = less_equal;
-constexpr Condition kSignedGreaterThan = greater;
-constexpr Condition kSignedGreaterEqual = greater_equal;
-constexpr Condition kUnsignedLessThan = below;
-constexpr Condition kUnsignedLessEqual = below_equal;
-constexpr Condition kUnsignedGreaterThan = above;
-constexpr Condition kUnsignedGreaterEqual = above_equal;
-
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-
-constexpr Condition kEqual = eq;
-constexpr Condition kUnequal = ne;
-constexpr Condition kSignedLessThan = lt;
-constexpr Condition kSignedLessEqual = le;
-constexpr Condition kSignedGreaterThan = gt;
-constexpr Condition kSignedGreaterEqual = ge;
-constexpr Condition kUnsignedLessThan = ult;
-constexpr Condition kUnsignedLessEqual = ule;
-constexpr Condition kUnsignedGreaterThan = ugt;
-constexpr Condition kUnsignedGreaterEqual = uge;
-
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
-
-constexpr Condition kEqual = eq;
-constexpr Condition kUnequal = ne;
-constexpr Condition kSignedLessThan = lt;
-constexpr Condition kSignedLessEqual = le;
-constexpr Condition kSignedGreaterThan = gt;
-constexpr Condition kSignedGreaterEqual = ge;
-constexpr Condition kUnsignedLessThan = lo;
-constexpr Condition kUnsignedLessEqual = ls;
-constexpr Condition kUnsignedGreaterThan = hi;
-constexpr Condition kUnsignedGreaterEqual = hs;
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
#else
-// On unimplemented platforms, just make this compile.
-constexpr Condition kEqual = static_cast<Condition>(0);
-constexpr Condition kUnequal = static_cast<Condition>(0);
-constexpr Condition kSignedLessThan = static_cast<Condition>(0);
-constexpr Condition kSignedLessEqual = static_cast<Condition>(0);
-constexpr Condition kSignedGreaterThan = static_cast<Condition>(0);
-constexpr Condition kSignedGreaterEqual = static_cast<Condition>(0);
-constexpr Condition kUnsignedLessThan = static_cast<Condition>(0);
-constexpr Condition kUnsignedLessEqual = static_cast<Condition>(0);
-constexpr Condition kUnsignedGreaterThan = static_cast<Condition>(0);
-constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
+constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index dea5221ac6..587430a107 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -7,6 +7,7 @@
#include <sstream>
#include "src/base/optional.h"
+#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
@@ -495,21 +496,45 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
+namespace {
+int GetSafepointIndexForStackSlot(const VarState& slot) {
+ // index = 0 is for the stack slot at 'fp + kFixedFrameSizeAboveFp -
+ // kSystemPointerSize', the location of the current stack slot is 'fp -
+ // slot.offset()'. The index we need is therefore '(fp +
+ // kFixedFrameSizeAboveFp - kSystemPointerSize) - (fp - slot.offset())' =
+ // 'slot.offset() + kFixedFrameSizeAboveFp - kSystemPointerSize'.
+ // Concretely, the index of the first stack slot is '4'.
+ return (slot.offset() + StandardFrameConstants::kFixedFrameSizeAboveFp -
+ kSystemPointerSize) /
+ kSystemPointerSize;
+}
+} // namespace
+
+void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
+ ZoneVector<int>* slots, LiftoffRegList* spills,
+ SpillLocation spill_location) {
+ for (const auto& slot : stack_state) {
+ if (!slot.type().is_reference_type()) continue;
+
+ if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
+ // Registers get spilled just before the call to the runtime. In {spills}
+ // we store which of the spilled registers contain references, so that we
+ // can add the spill slots to the safepoint.
+ spills->set(slot.reg());
+ continue;
+ }
+ DCHECK_IMPLIES(slot.is_reg(), spill_location == SpillLocation::kStackSlots);
+
+ slots->push_back(GetSafepointIndexForStackSlot(slot));
+ }
+}
+
void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
- for (auto slot : stack_state) {
+ for (const auto& slot : stack_state) {
DCHECK(!slot.is_reg());
if (slot.type().is_reference_type()) {
- // index = 0 is for the stack slot at 'fp + kFixedFrameSizeAboveFp -
- // kSystemPointerSize', the location of the current stack slot is 'fp -
- // slot.offset()'. The index we need is therefore '(fp +
- // kFixedFrameSizeAboveFp - kSystemPointerSize) - (fp - slot.offset())' =
- // 'slot.offset() + kFixedFrameSizeAboveFp - kSystemPointerSize'.
- auto index =
- (slot.offset() + StandardFrameConstants::kFixedFrameSizeAboveFp -
- kSystemPointerSize) /
- kSystemPointerSize;
- safepoint.DefinePointerSlot(index);
+ safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
}
}
}
@@ -520,7 +545,8 @@ int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {
// that the offset of the first spill slot is kSystemPointerSize and not
// '0'. Therefore we don't have to add '+1' here.
return (max_used_spill_offset_ +
- StandardFrameConstants::kFixedFrameSizeAboveFp) /
+ StandardFrameConstants::kFixedFrameSizeAboveFp +
+ ool_spill_space_size_) /
kSystemPointerSize;
}
@@ -538,7 +564,7 @@ LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
LiftoffAssembler::~LiftoffAssembler() {
if (num_locals_ > kInlineLocalTypes) {
- free(more_local_types_);
+ base::Free(more_local_types_);
}
}
@@ -578,14 +604,25 @@ LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
DCHECK_LT(index, cache_state_.stack_state.size());
VarState& slot = cache_state_.stack_state.end()[-1 - index];
if (slot.is_reg()) {
- cache_state_.dec_used(slot.reg());
return slot.reg();
}
LiftoffRegister reg = LoadToRegister(slot, pinned);
+ cache_state_.inc_used(reg);
slot.MakeRegister(reg);
return reg;
}
+void LiftoffAssembler::DropValues(int count) {
+ for (int i = 0; i < count; ++i) {
+ DCHECK(!cache_state_.stack_state.empty());
+ VarState slot = cache_state_.stack_state.back();
+ cache_state_.stack_state.pop_back();
+ if (slot.is_reg()) {
+ cache_state_.dec_used(slot.reg());
+ }
+ }
+}
+
void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
@@ -865,8 +902,6 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
void LiftoffAssembler::FinishCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
- // Offset of the current return value relative to the stack pointer.
- int return_offset = 0;
int call_desc_return_idx = 0;
for (ValueType return_type : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
@@ -888,10 +923,11 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
} else {
DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
- LoadReturnStackSlot(reg_pair[pair_idx], return_offset, lowered_type);
- const int type_size = lowered_type.element_size_bytes();
- const int slot_size = RoundUp<kSystemPointerSize>(type_size);
- return_offset += slot_size;
+ // Get slot offset relative to the stack pointer.
+ int offset = call_descriptor->GetOffsetToReturns();
+ int return_slot = -loc.GetLocation() - offset - 1;
+ LoadReturnStackSlot(reg_pair[pair_idx],
+ return_slot * kSystemPointerSize, lowered_type);
}
if (pair_idx == 0) {
pinned.set(reg_pair[0]);
@@ -904,7 +940,8 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
reg_pair[1].gp()));
}
}
- RecordUsedSpillOffset(TopSpillOffset() + return_offset);
+ int return_slots = static_cast<int>(call_descriptor->StackReturnCount());
+ RecordUsedSpillOffset(TopSpillOffset() + return_slots * kSystemPointerSize);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
@@ -1123,8 +1160,8 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals;
if (num_locals > kInlineLocalTypes) {
- more_local_types_ =
- reinterpret_cast<ValueType*>(malloc(num_locals * sizeof(ValueType)));
+ more_local_types_ = reinterpret_cast<ValueType*>(
+ base::Malloc(num_locals * sizeof(ValueType)));
DCHECK_NOT_NULL(more_local_types_);
}
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 895abbbbb4..94f91ab0fd 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -30,6 +30,44 @@ class CallDescriptor;
namespace wasm {
+enum LiftoffCondition {
+ kEqual,
+ kUnequal,
+ kSignedLessThan,
+ kSignedLessEqual,
+ kSignedGreaterThan,
+ kSignedGreaterEqual,
+ kUnsignedLessThan,
+ kUnsignedLessEqual,
+ kUnsignedGreaterThan,
+ kUnsignedGreaterEqual
+};
+
+inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return kUnequal;
+ case kUnequal:
+ return kEqual;
+ case kSignedLessThan:
+ return kSignedGreaterEqual;
+ case kSignedLessEqual:
+ return kSignedGreaterThan;
+ case kSignedGreaterEqual:
+ return kSignedLessThan;
+ case kSignedGreaterThan:
+ return kSignedLessEqual;
+ case kUnsignedLessThan:
+ return kUnsignedGreaterEqual;
+ case kUnsignedLessEqual:
+ return kUnsignedGreaterThan;
+ case kUnsignedGreaterEqual:
+ return kUnsignedLessThan;
+ case kUnsignedGreaterThan:
+ return kUnsignedLessEqual;
+ }
+}
+
class LiftoffAssembler : public TurboAssembler {
public:
// Each slot in our stack frame currently has exactly 8 bytes.
@@ -135,6 +173,19 @@ class LiftoffAssembler : public TurboAssembler {
// Disallow copy construction.
CacheState(const CacheState&) = delete;
+ enum class SpillLocation { kTopOfStack, kStackSlots };
+ // Generates two lists of locations that contain references. {slots}
+ // contains the indices of slots on the value stack that contain references.
+ // {spills} contains all registers that contain references. The
+ // {spill_location} defines where register values will be spilled for a
+ // function call within the out-of-line code. {kStackSlots} means that the
+ // values in the registers will be written back to their stack slots.
+ // {kTopOfStack} means that the registers will be spilled on the stack with
+ // a {push} instruction.
+ void GetTaggedSlotsForOOLCode(/*out*/ ZoneVector<int>* slots,
+ /*out*/ LiftoffRegList* spills,
+ SpillLocation spill_location);
+
void DefineSafepoint(Safepoint& safepoint);
base::SmallVector<VarState, 8> stack_state;
@@ -291,16 +342,30 @@ class LiftoffAssembler : public TurboAssembler {
return LoadToRegister(slot, pinned);
}
+ // Use this to pop a value into a register that has no other uses, so it
+ // can be modified.
+ LiftoffRegister PopToModifiableRegister(LiftoffRegList pinned = {}) {
+ ValueType type = cache_state_.stack_state.back().type();
+ LiftoffRegister reg = PopToRegister(pinned);
+ if (cache_state()->is_free(reg)) return reg;
+
+ pinned.set(reg);
+ LiftoffRegister new_reg = GetUnusedRegister(reg.reg_class(), pinned);
+ Move(new_reg, reg, type);
+ return new_reg;
+ }
+
// Returns the register which holds the value of stack slot {index}. If the
// value is not stored in a register yet, a register is allocated for it. The
// register is then assigned to the stack slot. The value stack height is not
// modified. The top of the stack is index 0, i.e. {PopToRegister()} and
// {PeekToRegister(0)} should result in the same register.
- // {PeekToRegister} already decrements the used count of the register of the
- // stack slot. Therefore the register must not be popped by {PopToRegister}
- // but discarded with {stack_state.pop_back(count)}.
+ // When the value is finally popped, the use counter of its register has to be
+ // decremented. This can be done by popping the value with {DropValues}.
LiftoffRegister PeekToRegister(int index, LiftoffRegList pinned);
+ void DropValues(int count);
+
// Ensure that the loop inputs are either in a register or spilled to the
// stack, so that we can merge different values on the back-edge.
void PrepareLoopArgs(int num);
@@ -414,6 +479,10 @@ class LiftoffAssembler : public TurboAssembler {
if (offset >= max_used_spill_offset_) max_used_spill_offset_ = offset;
}
+ void RecordOolSpillSpaceSize(int size) {
+ if (size > ool_spill_space_size_) ool_spill_space_size_ = size;
+ }
+
// Load parameters into the right registers / stack slots for the call.
void PrepareBuiltinCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
@@ -469,7 +538,8 @@ class LiftoffAssembler : public TurboAssembler {
inline int PrepareStackFrame();
inline void PrepareTailCall(int num_callee_stack_params,
int stack_param_delta);
- inline void PatchPrepareStackFrame(int offset, int frame_size);
+ inline void AlignFrameSize();
+ inline void PatchPrepareStackFrame(int offset);
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
@@ -485,49 +555,66 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
LiftoffRegList pinned);
- inline void StoreTaggedPointer(Register dst_addr, int32_t offset_imm,
- LiftoffRegister src, LiftoffRegList pinned);
+ inline void StoreTaggedPointer(Register dst_addr, Register offset_reg,
+ int32_t offset_imm, LiftoffRegister src,
+ LiftoffRegList pinned);
+ inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
+ LiftoffRegList pinned) {
+ int offset = FixedArray::kLengthOffset - kHeapObjectTag;
+ if (SmiValuesAre32Bits()) {
+#if V8_TARGET_LITTLE_ENDIAN
+ DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
+ offset += 4;
+#endif
+ Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
+ emit_i32_sari(dst.gp(), dst.gp(), kSmiTagSize);
+ }
+ }
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
- uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
+ uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
bool is_load_mem = false);
- inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
- LiftoffRegister src, StoreType type, LiftoffRegList pinned,
+ inline void Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src, StoreType type,
+ LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr,
bool is_store_mem = false);
inline void AtomicLoad(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned);
inline void AtomicStore(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned);
inline void AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm,
+ uintptr_t offset_imm,
LiftoffRegister expected,
LiftoffRegister new_value,
LiftoffRegister value, StoreType type);
@@ -557,6 +644,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_addi(Register dst, Register lhs, int32_t imm);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_subi(Register dst, Register lhs, int32_t imm);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
@@ -589,7 +677,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm);
+ int64_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -667,11 +755,20 @@ class LiftoffAssembler : public TurboAssembler {
}
}
- inline void emit_ptrsize_addi(Register dst, Register lhs, int32_t imm) {
+ inline void emit_ptrsize_addi(Register dst, Register lhs, intptr_t imm) {
if (kSystemPointerSize == 8) {
emit_i64_addi(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
} else {
- emit_i32_addi(dst, lhs, imm);
+ emit_i32_addi(dst, lhs, static_cast<int32_t>(imm));
+ }
+ }
+
+ inline void emit_ptrsize_set_cond(LiftoffCondition condition, Register dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ if (kSystemPointerSize == 8) {
+ emit_i64_set_cond(condition, dst, lhs, rhs);
+ } else {
+ emit_i32_set_cond(condition, dst, lhs.gp(), rhs.gp());
}
}
@@ -746,18 +843,20 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_jump(Label*);
inline void emit_jump(Register);
- inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
- Register rhs = no_reg);
+ inline void emit_cond_jump(LiftoffCondition, Label*, ValueType value,
+ Register lhs, Register rhs = no_reg);
+ inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
+ Register lhs, int imm);
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_eqz(Register dst, Register src);
- inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
+ inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
Register rhs);
inline void emit_i64_eqz(Register dst, LiftoffRegister src);
- inline void emit_i64_set_cond(Condition condition, Register dst,
+ inline void emit_i64_set_cond(LiftoffCondition condition, Register dst,
LiftoffRegister lhs, LiftoffRegister rhs);
- inline void emit_f32_set_cond(Condition condition, Register dst,
+ inline void emit_f32_set_cond(LiftoffCondition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
- inline void emit_f64_set_cond(Condition condition, Register dst,
+ inline void emit_f64_set_cond(LiftoffCondition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
// Optional select support: Returns false if generic code (via branches)
@@ -766,10 +865,16 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister true_value,
LiftoffRegister false_value, ValueType type);
+ enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi };
+ inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode);
+
inline void LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LoadTransformationKind transform,
uint32_t* protected_load_pc);
+ inline void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr,
+ Register offset_reg, uintptr_t offset_imm, LoadType type,
+ uint8_t lane, uint32_t* protected_load_pc);
inline void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, const uint8_t shuffle[16],
bool is_swizzle);
@@ -919,6 +1024,18 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v32x4_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
@@ -951,6 +1068,18 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -970,6 +1099,19 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
+ inline void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src);
@@ -1111,6 +1253,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
+ inline void RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset);
+
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Execute a C call. Arguments are pushed to the stack and a pointer to this
@@ -1186,7 +1333,10 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 4,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
+ // The maximum spill offset for slots in the value stack.
int max_used_spill_offset_ = StaticStackFrameSize();
+ // The amount of memory needed for register spills in OOL code.
+ int ool_spill_space_size_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
@@ -1234,28 +1384,28 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
LiftoffRegister dst,
- LiftoffRegister lhs, int32_t imm) {
- // Top half of the immediate sign extended, either 0 or -1.
- int32_t sign_extend = imm < 0 ? -1 : 0;
+ LiftoffRegister lhs, int64_t imm) {
+ int32_t low_word = static_cast<int32_t>(imm);
+ int32_t high_word = static_cast<int32_t>(imm >> 32);
// If {dst.low_gp()} does not overlap with {lhs.high_gp()},
// just first compute the lower half, then the upper half.
if (dst.low() != lhs.high()) {
- (assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
- (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), low_word);
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
return;
}
// If {dst.high_gp()} does not overlap with {lhs.low_gp()},
// we can compute this the other way around.
if (dst.high() != lhs.low()) {
- (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
- (assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), low_word);
return;
}
// Otherwise, we need a temporary register.
Register tmp =
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
- (assm->*op)(tmp, lhs.low_gp(), imm);
- (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
+ (assm->*op)(tmp, lhs.low_gp(), low_word);
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
assm->Move(dst.low_gp(), tmp, kWasmI32);
}
} // namespace liftoff
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 1ead202ea0..9a42bbf50c 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -5,6 +5,7 @@
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/base/optional.h"
+#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/external-reference.h"
@@ -121,18 +122,7 @@ compiler::CallDescriptor* GetLoweredCallDescriptor(
: call_desc;
}
-constexpr ValueType kSupportedTypesArr[] = {
- kWasmI32, kWasmI64, kWasmF32, kWasmF64,
- kWasmS128, kWasmExternRef, kWasmFuncRef};
-constexpr Vector<const ValueType> kSupportedTypes =
- ArrayVector(kSupportedTypesArr);
-
-constexpr ValueType kSupportedTypesWithoutRefsArr[] = {
- kWasmI32, kWasmI64, kWasmF32, kWasmF64, kWasmS128};
-constexpr Vector<const ValueType> kSupportedTypesWithoutRefs =
- ArrayVector(kSupportedTypesWithoutRefsArr);
-
-constexpr Condition GetCompareCondition(WasmOpcode opcode) {
+constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
switch (opcode) {
case kExprI32Eq:
return kEqual;
@@ -300,38 +290,60 @@ class LiftoffCompiler {
explicit SpilledRegistersForInspection(Zone* zone) : entries(zone) {}
};
+ struct OutOfLineSafepointInfo {
+ ZoneVector<int> slots;
+ LiftoffRegList spills;
+
+ explicit OutOfLineSafepointInfo(Zone* zone) : slots(zone) {}
+ };
+
struct OutOfLineCode {
MovableLabel label;
MovableLabel continuation;
WasmCode::RuntimeStubId stub;
WasmCodePosition position;
LiftoffRegList regs_to_save;
+ OutOfLineSafepointInfo* safepoint_info;
uint32_t pc; // for trap handler.
// These two pointers will only be used for debug code:
- DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
SpilledRegistersForInspection* spilled_registers;
+ DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
// Named constructors:
static OutOfLineCode Trap(
- WasmCode::RuntimeStubId s, WasmCodePosition pos, uint32_t pc,
- DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder,
- SpilledRegistersForInspection* spilled_registers) {
+ WasmCode::RuntimeStubId s, WasmCodePosition pos,
+ SpilledRegistersForInspection* spilled_registers,
+ OutOfLineSafepointInfo* safepoint_info, uint32_t pc,
+ DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
DCHECK_LT(0, pos);
- return {{},
- {},
- s,
- pos,
- {},
- pc,
- debug_sidetable_entry_builder,
- spilled_registers};
+ return {
+ {}, // label
+ {}, // continuation
+ s, // stub
+ pos, // position
+ {}, // regs_to_save
+ safepoint_info, // safepoint_info
+ pc, // pc
+ spilled_registers, // spilled_registers
+ debug_sidetable_entry_builder // debug_side_table_entry_builder
+ };
}
static OutOfLineCode StackCheck(
WasmCodePosition pos, LiftoffRegList regs_to_save,
SpilledRegistersForInspection* spilled_regs,
+ OutOfLineSafepointInfo* safepoint_info,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
- return {{}, {}, WasmCode::kWasmStackGuard, pos,
- regs_to_save, 0, debug_sidetable_entry_builder, spilled_regs};
+ return {
+ {}, // label
+ {}, // continuation
+ WasmCode::kWasmStackGuard, // stub
+ pos, // position
+ regs_to_save, // regs_to_save
+ safepoint_info, // safepoint_info
+ 0, // pc
+ spilled_regs, // spilled_registers
+ debug_sidetable_entry_builder // debug_side_table_entry_builder
+ };
}
};
@@ -391,6 +403,13 @@ class LiftoffCompiler {
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
detail);
UnuseLabels(decoder);
+ // --liftoff-only ensures that tests actually exercise the Liftoff path
+ // without bailing out. Bailing out due to (simulated) lack of CPU support
+ // is okay though.
+ if (FLAG_liftoff_only && reason != kMissingCPUFeature) {
+ FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s",
+ detail);
+ }
}
bool DidAssemblerBailout(FullDecoder* decoder) {
@@ -399,47 +418,41 @@ class LiftoffCompiler {
return true;
}
- LiftoffBailoutReason BailoutReasonForType(ValueType type) {
+ bool CheckSupportedType(FullDecoder* decoder, ValueType type,
+ const char* context) {
+ LiftoffBailoutReason bailout_reason = kOtherReason;
switch (type.kind()) {
+ case ValueType::kI32:
+ case ValueType::kI64:
+ case ValueType::kF32:
+ case ValueType::kF64:
+ return true;
case ValueType::kS128:
- return kSimd;
- case ValueType::kOptRef:
+ if (CpuFeatures::SupportsWasmSimd128()) return true;
+ bailout_reason = kMissingCPUFeature;
+ break;
case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ case ValueType::kI8:
+ case ValueType::kI16:
+ if (FLAG_experimental_liftoff_extern_ref) return true;
if (type.is_reference_to(HeapType::kExn)) {
- return kExceptionHandling;
+ bailout_reason = kExceptionHandling;
} else {
- return kRefTypes;
+ bailout_reason = kRefTypes;
}
+ break;
case ValueType::kBottom:
- return kMultiValue;
- default:
- return kOtherReason;
- }
- }
-
- bool CheckSupportedType(FullDecoder* decoder,
- Vector<const ValueType> supported_types,
- ValueType type, const char* context) {
- // Special case for kWasm128 which requires specific hardware support.
- if (type == kWasmS128 && (!CpuFeatures::SupportsWasmSimd128())) {
- unsupported(decoder, kSimd, "simd");
- return false;
- }
- // Check supported types.
- for (ValueType supported : supported_types) {
- if (type == supported) return true;
+ case ValueType::kStmt:
+ UNREACHABLE();
}
- LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", type.name().c_str(), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
- int GetSafepointTableOffset() const {
- return safepoint_table_builder_.GetCodeOffset();
- }
-
void UnuseLabels(FullDecoder* decoder) {
#ifdef DEBUG
auto Unuse = [](Label* label) {
@@ -509,12 +522,20 @@ class LiftoffCompiler {
LiftoffRegList regs_to_save = __ cache_state()->used_registers;
SpilledRegistersForInspection* spilled_regs = nullptr;
Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
+
+ OutOfLineSafepointInfo* safepoint_info =
+ compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
+ __ cache_state()->GetTaggedSlotsForOOLCode(
+ &safepoint_info->slots, &safepoint_info->spills,
+ for_debugging_
+ ? LiftoffAssembler::CacheState::SpillLocation::kStackSlots
+ : LiftoffAssembler::CacheState::SpillLocation::kTopOfStack);
if (V8_UNLIKELY(for_debugging_)) {
regs_to_save = {};
spilled_regs = GetSpilledRegistersForInspection();
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
- position, regs_to_save, spilled_regs,
+ position, regs_to_save, spilled_regs, safepoint_info,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
@@ -556,12 +577,7 @@ class LiftoffCompiler {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- if (!CheckSupportedType(decoder,
- FLAG_experimental_liftoff_extern_ref
- ? kSupportedTypes
- : kSupportedTypesWithoutRefs,
- __ local_type(i), "param"))
- return;
+ if (!CheckSupportedType(decoder, __ local_type(i), "param")) return;
}
// Input 0 is the call target, the instance is at 1.
@@ -627,16 +643,9 @@ class LiftoffCompiler {
++param_idx) {
ValueType type = decoder->local_type(param_idx);
if (type.is_reference_type()) {
- Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp();
- // We can re-use the isolate_root register as result register.
- Register result = isolate_root;
-
- LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize);
- __ LoadTaggedPointer(
- result, isolate_root, no_reg,
- IsolateData::root_slot_offset(RootIndex::kNullValue), {});
- __ Spill(__ cache_state()->stack_state.back().offset(),
- LiftoffRegister(result), type);
+ LiftoffRegister result = __ GetUnusedRegister(kGpReg, {});
+ LoadNullValue(result.gp(), {});
+ __ Spill(__ cache_state()->stack_state.back().offset(), result, type);
}
}
}
@@ -733,8 +742,28 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool->position), true);
__ CallRuntimeStub(ool->stub);
- // TODO(ahaas): Define a proper safepoint here.
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(
+ &asm_, Safepoint::kNoLazyDeopt);
+
+ if (ool->safepoint_info) {
+ for (auto index : ool->safepoint_info->slots) {
+ safepoint.DefinePointerSlot(index);
+ }
+
+ int total_frame_size = __ GetTotalFrameSize();
+ LiftoffRegList gp_regs = ool->regs_to_save & kGpCacheRegList;
+ // {total_frame_size} is the highest offset from the FP that is used to
+ // store a value. The offset of the first spill slot should therefore be
+ // {(total_frame_size / kSystemPointerSize) + 1}. However, spill slots
+ // don't start at offset '0' but at offset '-1' (or
+ // {-kSystemPointerSize}). Therefore we have to add another '+ 1' to the
+ // index of the first spill slot.
+ int index = (total_frame_size / kSystemPointerSize) + 2;
+
+ __ RecordSpillsInSafepoint(safepoint, gp_regs,
+ ool->safepoint_info->spills, index);
+ }
+
DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder);
if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)) {
ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
@@ -756,11 +785,15 @@ class LiftoffCompiler {
void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
+ __ AlignFrameSize();
+#if DEBUG
+ int frame_size = __ GetTotalFrameSize();
+#endif
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(&ool);
}
- __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
- __ GetTotalFrameSize());
+ DCHECK_EQ(frame_size, __ GetTotalFrameSize());
+ __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_);
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
__ MaybeEmitOutOfLineConstantPool();
@@ -879,10 +912,24 @@ class LiftoffCompiler {
unsupported(decoder, kExceptionHandling, "try");
}
- void Catch(FullDecoder* decoder, Control* block, Value* exception) {
+ void CatchException(FullDecoder* decoder,
+ const ExceptionIndexImmediate<validate>& imm,
+ Control* block, Vector<Value> values) {
unsupported(decoder, kExceptionHandling, "catch");
}
+ void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
+ unsupported(decoder, kExceptionHandling, "delegate");
+ }
+
+ void Rethrow(FullDecoder* decoder, Control* block) {
+ unsupported(decoder, kExceptionHandling, "rethrow");
+ }
+
+ void CatchAll(FullDecoder* decoder, Control* block) {
+ unsupported(decoder, kExceptionHandling, "catch-all");
+ }
+
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -1218,9 +1265,22 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
- case kExprRefIsNull:
- unsupported(decoder, kRefTypes, "ref_is_null");
+ case kExprRefIsNull: {
+ if (!FLAG_experimental_liftoff_extern_ref) {
+ unsupported(decoder, kRefTypes, "ref_is_null");
+ return;
+ }
+ LiftoffRegList pinned;
+ LiftoffRegister ref = pinned.set(__ PopToRegister());
+ LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
+ LoadNullValue(null.gp(), pinned);
+ // Prefer to overwrite one of the input registers with the result
+ // of the comparison.
+ LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
+ __ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
+ __ PushRegister(kWasmI32, dst);
return;
+ }
default:
UNREACHABLE();
}
@@ -1589,6 +1649,11 @@ class LiftoffCompiler {
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
+ case kExprRefEq: {
+ return EmitBinOp<ValueType::kOptRef, kI32>(
+ BindFirst(&LiftoffAssembler::emit_ptrsize_set_cond, kEqual));
+ }
+
default:
UNREACHABLE();
}
@@ -1632,15 +1697,9 @@ class LiftoffCompiler {
unsupported(decoder, kRefTypes, "ref_null");
return;
}
- Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp();
- // We can re-use the isolate_root register as result register.
- Register result = isolate_root;
-
- LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize);
- __ LoadTaggedPointer(result, isolate_root, no_reg,
- IsolateData::root_slot_offset(RootIndex::kNullValue),
- {});
- __ PushRegister(type, LiftoffRegister(result));
+ LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
+ LoadNullValue(null.gp(), {});
+ __ PushRegister(type, null);
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
@@ -1648,15 +1707,13 @@ class LiftoffCompiler {
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
- unsupported(decoder, kRefTypes, "ref.as_non_null");
+ LiftoffRegList pinned;
+ LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
+ __ PushRegister(ValueType::Ref(arg.type.heap_type(), kNonNullable), obj);
}
- void Drop(FullDecoder* decoder, const Value& value) {
- auto& slot = __ cache_state()->stack_state.back();
- // If the dropped slot contains a register, decrement it's use count.
- if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
- __ cache_state()->stack_state.pop_back();
- }
+ void Drop(FullDecoder* decoder) { __ DropValues(1); }
void TraceFunctionExit(FullDecoder* decoder) {
DEBUG_CODE_COMMENT("trace function exit");
@@ -1810,11 +1867,7 @@ class LiftoffCompiler {
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder,
- FLAG_experimental_liftoff_extern_ref
- ? kSupportedTypes
- : kSupportedTypesWithoutRefs,
- global->type, "global")) {
+ if (!CheckSupportedType(decoder, global->type, "global")) {
return;
}
@@ -1849,11 +1902,7 @@ class LiftoffCompiler {
void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder,
- FLAG_experimental_liftoff_extern_ref
- ? kSupportedTypes
- : kSupportedTypesWithoutRefs,
- global->type, "global")) {
+ if (!CheckSupportedType(decoder, global->type, "global")) {
return;
}
@@ -1868,7 +1917,7 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
- __ StoreTaggedPointer(globals_buffer,
+ __ StoreTaggedPointer(globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset),
value, pinned);
@@ -1882,14 +1931,67 @@ class LiftoffCompiler {
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
- void TableGet(FullDecoder* decoder, const Value& index, Value* result,
+ void TableGet(FullDecoder* decoder, const Value&, Value*,
const TableIndexImmediate<validate>& imm) {
- unsupported(decoder, kRefTypes, "table_get");
+ LiftoffRegList pinned;
+
+ LiftoffRegister table_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(table_index_reg, WasmValue(imm.index));
+ LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
+ 0);
+
+ LiftoffAssembler::VarState index = __ cache_state()->stack_state.back();
+
+ WasmCode::RuntimeStubId target = WasmCode::kWasmTableGet;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
+
+ ValueType result_type = env_->module->tables[imm.index].type;
+ ValueType sig_reps[] = {result_type, kWasmI32, kWasmI32};
+ FunctionSig sig(1, 2, sig_reps);
+
+ __ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(1);
+
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+
+ __ PushRegister(result_type, LiftoffRegister(kReturnRegister0));
}
- void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
+ void TableSet(FullDecoder* decoder, const Value&, const Value&,
const TableIndexImmediate<validate>& imm) {
- unsupported(decoder, kRefTypes, "table_set");
+ LiftoffRegList pinned;
+
+ LiftoffRegister table_index_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(table_index_reg, WasmValue(imm.index));
+ LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
+ 0);
+
+ LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
+
+ WasmCode::RuntimeStubId target = WasmCode::kWasmTableSet;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
+
+ ValueType sig_reps[] = {kWasmI32, kWasmI32,
+ env_->module->tables[imm.index].type};
+ FunctionSig sig(0, 3, sig_reps);
+
+ __ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+
+ // Pop parameters from the value stack.
+ __ cache_state()->stack_state.pop_back(2);
+
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
}
void Unreachable(FullDecoder* decoder) {
@@ -1934,8 +2036,6 @@ class LiftoffCompiler {
__ jmp(target->label.get());
}
- void Br(FullDecoder* decoder, Control* target) { BrImpl(target); }
-
void BrOrRet(FullDecoder* decoder, uint32_t depth) {
if (depth == decoder->control_depth() - 1) {
ReturnImpl(decoder);
@@ -1965,7 +2065,7 @@ class LiftoffCompiler {
outstanding_op_ = kNoOutstandingOp;
} else {
// Otherwise, it's an i32 compare opcode.
- Condition cond = NegateCondition(GetCompareCondition(outstanding_op_));
+ LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
Register rhs = value;
Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
__ emit_cond_jump(cond, &cont_false, kWasmI32, lhs, rhs);
@@ -2070,6 +2170,7 @@ class LiftoffCompiler {
if (!slot.is_reg()) continue;
spilled->entries.push_back(SpilledRegistersForInspection::Entry{
slot.offset(), slot.reg(), slot.type()});
+ __ RecordUsedSpillOffset(slot.offset());
}
return spilled;
}
@@ -2077,22 +2178,35 @@ class LiftoffCompiler {
Label* AddOutOfLineTrap(WasmCodePosition position,
WasmCode::RuntimeStubId stub, uint32_t pc = 0) {
DCHECK(FLAG_wasm_bounds_checks);
-
+ OutOfLineSafepointInfo* safepoint_info = nullptr;
+ if (V8_UNLIKELY(for_debugging_)) {
+ // Execution does not return after a trap. Therefore we don't have to
+ // define a safepoint for traps that would preserve references on the
+ // stack. However, if this is debug code, then we have to preserve the
+ // references so that they can be inspected.
+ safepoint_info =
+ compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
+ __ cache_state()->GetTaggedSlotsForOOLCode(
+ &safepoint_info->slots, &safepoint_info->spills,
+ LiftoffAssembler::CacheState::SpillLocation::kStackSlots);
+ }
out_of_line_code_.push_back(OutOfLineCode::Trap(
- stub, position, pc,
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling),
+ stub, position,
V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
- : nullptr));
+ : nullptr,
+ safepoint_info, pc,
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
return out_of_line_code_.back().label.get();
}
enum ForceCheck : bool { kDoForceCheck = true, kDontForceCheck = false };
- // Returns true if the memory access is statically known to be out of bounds
- // (a jump to the trap was generated then); return false otherwise.
- bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
- uint64_t offset, Register index, LiftoffRegList pinned,
- ForceCheck force_check) {
+ // Returns {no_reg} if the memory access is statically known to be out of
+ // bounds (a jump to the trap was generated then); return the GP {index}
+ // register otherwise (holding the ptrsized index).
+ Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
+ uint64_t offset, LiftoffRegister index,
+ LiftoffRegList pinned, ForceCheck force_check) {
// If the offset does not fit in a uintptr_t, this can never succeed on this
// machine.
const bool statically_oob =
@@ -2100,11 +2214,22 @@ class LiftoffCompiler {
!base::IsInBounds<uintptr_t>(offset, access_size,
env_->max_memory_size);
+ // After bounds checking, we know that the index must be ptrsize, hence only
+ // look at the lower word on 32-bit systems (the high word is bounds-checked
+ // further down).
+ Register index_ptrsize =
+ kNeedI64RegPair && index.is_gp_pair() ? index.low_gp() : index.gp();
+
if (!force_check && !statically_oob &&
(!FLAG_wasm_bounds_checks || env_->use_trap_handler)) {
- return false;
+ // With trap handlers we should not have a register pair as input (we
+ // would only return the lower half).
+ DCHECK_IMPLIES(env_->use_trap_handler, index.is_gp());
+ return index_ptrsize;
}
+ DEBUG_CODE_COMMENT("bounds check memory");
+
// TODO(wasm): This adds protected instruction information for the jump
// instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0.
@@ -2115,41 +2240,50 @@ class LiftoffCompiler {
if (statically_oob) {
__ emit_jump(trap_label);
decoder->SetSucceedingCodeDynamicallyUnreachable();
- return true;
+ return no_reg;
+ }
+
+ // Convert the index to ptrsize, bounds-checking the high word on 32-bit
+ // systems for memory64.
+ if (!env_->module->is_memory64) {
+ __ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
+ } else if (kSystemPointerSize == kInt32Size) {
+ DCHECK_GE(kMaxUInt32, env_->max_memory_size);
+ // Unary "unequal" means "not equals zero".
+ __ emit_cond_jump(kUnequal, trap_label, kWasmI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
- // If the end offset is larger than the smallest memory, dynamically check
- // the end offset against the actual memory size, which is not known at
- // compile time. Otherwise, only one check is required (see below).
+ pinned.set(index_ptrsize);
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
+ LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
+ LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize);
__ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset));
- if (end_offset >= env_->min_memory_size) {
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
- LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
- mem_size);
+ // If the end offset is larger than the smallest memory, dynamically check
+ // the end offset against the actual memory size, which is not known at
+ // compile time. Otherwise, only one check is required (see below).
+ if (end_offset > env_->min_memory_size) {
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerValueType,
+ end_offset_reg.gp(), mem_size.gp());
}
- // Just reuse the end_offset register for computing the effective size.
+ // Just reuse the end_offset register for computing the effective size
+ // (which is >= 0 because of the check above).
LiftoffRegister effective_size_reg = end_offset_reg;
- __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size, end_offset_reg.gp());
-
- __ emit_u32_to_intptr(index, index);
+ __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
+ end_offset_reg.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
- LiftoffAssembler::kWasmIntPtr, index,
- effective_size_reg.gp());
- return false;
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerValueType,
+ index_ptrsize, effective_size_reg.gp());
+ return index_ptrsize;
}
void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
- uint32_t offset, Register index,
+ uintptr_t offset, Register index,
LiftoffRegList pinned) {
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnalignedAccess, 0);
@@ -2165,16 +2299,16 @@ class LiftoffCompiler {
// Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
- return;
+ } else {
+ // For alignment checks we only look at the lower 32-bits in {offset}.
+ __ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
+ __ emit_i32_andi(address, address, align_mask);
+ __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
}
- __ emit_i32_addi(address, index, offset);
- __ emit_i32_andi(address, address, align_mask);
-
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
}
void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
- Register index, uint32_t offset,
+ Register index, uintptr_t offset,
WasmCodePosition position) {
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
@@ -2183,7 +2317,9 @@ class LiftoffCompiler {
// Get one register for computing the effective offset (offset + index).
LiftoffRegister effective_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(effective_offset, WasmValue(offset));
+ // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
+ DCHECK_GE(kMaxUInt32, offset);
+ __ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
__ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
// Get a register to hold the stack slot for MemoryTracingInfo.
@@ -2228,7 +2364,7 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
- Register AddMemoryMasking(Register index, uint32_t* offset,
+ Register AddMemoryMasking(Register index, uintptr_t* offset,
LiftoffRegList* pinned) {
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
return index;
@@ -2237,31 +2373,38 @@ class LiftoffCompiler {
// Make sure that we can overwrite {index}.
if (__ cache_state()->is_used(LiftoffRegister(index))) {
Register old_index = index;
- pinned->clear(LiftoffRegister(old_index));
+ pinned->clear(LiftoffRegister{old_index});
index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
- if (index != old_index) __ Move(index, old_index, kWasmI32);
+ if (index != old_index) {
+ __ Move(index, old_index, kPointerValueType);
+ }
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- __ emit_ptrsize_addi(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
+ if (*offset) __ emit_ptrsize_addi(index, index, *offset);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
return index;
}
+ void Prefetch(FullDecoder* decoder,
+ const MemoryAccessImmediate<validate>& imm,
+ const Value& index_val, bool temporal) {
+ unsupported(decoder, kSimd, "simd prefetch");
+ }
+
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "load"))
- return;
- LiftoffRegList pinned;
- Register index = pinned.set(__ PopToRegister()).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
- kDontForceCheck)) {
- return;
- }
- uint32_t offset = imm.offset;
+ if (!CheckSupportedType(decoder, value_type, "load")) return;
+ LiftoffRegister full_index = __ PopToRegister();
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, {}, kDontForceCheck);
+ if (index == no_reg) return;
+
+ uintptr_t offset = imm.offset;
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -2289,24 +2432,22 @@ class LiftoffCompiler {
const Value& index_val, Value* result) {
// LoadTransform requires SIMD support, so check for it here. If
// unsupported, bailout and let TurboFan lower the code.
- if (!CheckSupportedType(decoder, kSupportedTypes, kWasmS128,
- "LoadTransform")) {
+ if (!CheckSupportedType(decoder, kWasmS128, "LoadTransform")) {
return;
}
- LiftoffRegList pinned;
- Register index = pinned.set(__ PopToRegister()).gp();
+ LiftoffRegister full_index = __ PopToRegister();
// For load splats and load zero, LoadType is the size of the load, and for
// load extends, LoadType is the size of the lane, and it always loads 8
// bytes.
uint32_t access_size =
transform == LoadTransformationKind::kExtend ? 8 : type.size();
- if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
- kDontForceCheck)) {
- return;
- }
+ Register index = BoundsCheckMem(decoder, access_size, imm.offset,
+ full_index, {}, kDontForceCheck);
+ if (index == no_reg) return;
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load with transformation");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
@@ -2333,26 +2474,59 @@ class LiftoffCompiler {
}
}
- void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
- const Value& index, const MemoryAccessImmediate<validate>& imm,
- const uint8_t laneidx, Value* result) {
- unsupported(decoder, kSimd, "simd load lane");
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
+ const Value& _index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* _result) {
+ if (!CheckSupportedType(decoder, kWasmS128, "LoadLane")) {
+ return;
+ }
+
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister());
+ LiftoffRegister full_index = __ PopToRegister();
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDontForceCheck);
+ if (index == no_reg) return;
+
+ uintptr_t offset = imm.offset;
+ pinned.set(index);
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("load lane");
+ Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
+ uint32_t protected_load_pc = 0;
+
+ __ LoadLane(result, value, addr, index, offset, type, laneidx,
+ &protected_load_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
+ }
+
+ __ PushRegister(ValueType::Primitive(kS128), result);
+
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(false, type.mem_type().representation(), index,
+ offset, decoder->position());
+ }
}
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, kSupportedTypes, value_type, "store"))
- return;
+ if (!CheckSupportedType(decoder, value_type, "store")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
- Register index = pinned.set(__ PopToRegister(pinned)).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
- kDontForceCheck)) {
- return;
- }
- uint32_t offset = imm.offset;
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDontForceCheck);
+ if (index == no_reg) return;
+
+ uintptr_t offset = imm.offset;
+ pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -2465,7 +2639,24 @@ class LiftoffCompiler {
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
- unsupported(decoder, kRefTypes, "br_on_null");
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(depth)->br_merge()->arity);
+ }
+
+ Label cont_false;
+ LiftoffRegList pinned;
+ LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
+ Register null = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LoadNullValue(null, pinned);
+ __ emit_cond_jump(kUnequal, &cont_false, ref_object.type, ref.gp(), null);
+
+ BrOrRet(decoder, depth);
+ __ bind(&cont_false);
+ __ PushRegister(ValueType::Ref(ref_object.type.heap_type(), kNonNullable),
+ ref);
}
template <ValueType::Kind src_type, ValueType::Kind result_type,
@@ -2732,6 +2923,18 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_s);
case wasm::kExprI16x8MaxU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
+ case wasm::kExprI16x8ExtMulLowI8x16S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s);
+ case wasm::kExprI16x8ExtMulLowI8x16U:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u);
+ case wasm::kExprI16x8ExtMulHighI8x16S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s);
+ case wasm::kExprI16x8ExtMulHighI8x16U:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u);
case wasm::kExprI32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
case wasm::kExprV32x4AnyTrue:
@@ -2766,6 +2969,18 @@ class LiftoffCompiler {
case wasm::kExprI32x4DotI16x8S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i32x4_dot_i16x8_s);
+ case wasm::kExprI32x4ExtMulLowI16x8S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s);
+ case wasm::kExprI32x4ExtMulLowI16x8U:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u);
+ case wasm::kExprI32x4ExtMulHighI16x8S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s);
+ case wasm::kExprI32x4ExtMulHighI16x8U:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
case wasm::kExprI64x2Shl:
@@ -2783,6 +2998,20 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_sub);
case wasm::kExprI64x2Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_mul);
+ case wasm::kExprI64x2ExtMulLowI32x4S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s);
+ case wasm::kExprI64x2ExtMulLowI32x4U:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u);
+ case wasm::kExprI64x2ExtMulHighI32x4S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s);
+ case wasm::kExprI64x2ExtMulHighI32x4U:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u);
+ case wasm::kExprI64x2BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_bitmask);
case wasm::kExprF32x4Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_abs);
case wasm::kExprF32x4Neg:
@@ -3046,7 +3275,7 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
uint8_t shuffle[kSimd128Size];
- memcpy(shuffle, imm.value, sizeof(shuffle));
+ base::Memcpy(shuffle, imm.value, sizeof(shuffle));
bool is_swizzle;
bool needs_swap;
wasm::SimdShuffle::CanonicalizeShuffle(lhs == rhs, shuffle, &needs_swap,
@@ -3065,23 +3294,18 @@ class LiftoffCompiler {
void Rethrow(FullDecoder* decoder, const Value& exception) {
unsupported(decoder, kExceptionHandling, "rethrow");
}
- void BrOnException(FullDecoder* decoder, const Value& exception,
- const ExceptionIndexImmediate<validate>& imm,
- uint32_t depth, Vector<Value> values) {
- unsupported(decoder, kExceptionHandling, "br_on_exn");
- }
-
void AtomicStoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
- Register index = pinned.set(__ PopToRegister(pinned)).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
- kDoForceCheck)) {
- return;
- }
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDoForceCheck);
+ if (index == no_reg) return;
+
+ pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -3098,14 +3322,14 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm) {
ValueType value_type = type.value_type();
- LiftoffRegList pinned;
- Register index = pinned.set(__ PopToRegister()).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
- kDoForceCheck)) {
- return;
- }
+ LiftoffRegister full_index = __ PopToRegister();
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, {}, kDoForceCheck);
+ if (index == no_reg) return;
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -3124,7 +3348,7 @@ class LiftoffCompiler {
void AtomicBinop(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
void (LiftoffAssembler::*emit_fn)(Register, Register,
- uint32_t, LiftoffRegister,
+ uintptr_t, LiftoffRegister,
LiftoffRegister,
StoreType)) {
ValueType result_type = type.value_type();
@@ -3146,14 +3370,15 @@ class LiftoffCompiler {
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
#endif
- Register index = pinned.set(__ PopToRegister(pinned)).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
- kDoForceCheck)) {
- return;
- }
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDoForceCheck);
+ if (index == no_reg) return;
+
+ pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -3170,25 +3395,24 @@ class LiftoffCompiler {
// complete address calculation first, so that the address only needs a
// single register. Afterwards we load all remaining values into the
// other registers.
- LiftoffRegList pinned;
- Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index_reg, pinned,
- kDoForceCheck)) {
- return;
- }
- AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned);
+ LiftoffRegister full_index = __ PeekToRegister(2, {});
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, {}, kDoForceCheck);
+ if (index == no_reg) return;
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
- uint32_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
+ uintptr_t offset = imm.offset;
+ index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
- __ emit_i32_add(addr, addr, index_reg);
- pinned.clear(LiftoffRegister(index_reg));
+ __ emit_i32_add(addr, addr, index);
+ pinned.clear(LiftoffRegister(index));
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
// Pop the index from the stack.
- __ cache_state()->stack_state.pop_back(1);
+ __ DropValues(1);
LiftoffRegister result = expected;
@@ -3203,14 +3427,14 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
- Register index = pinned.set(__ PopToRegister(pinned)).gp();
- if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
- kDoForceCheck)) {
- return;
- }
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDoForceCheck);
+ if (index == no_reg) return;
+ pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -3237,26 +3461,25 @@ class LiftoffCompiler {
void AtomicWait(FullDecoder* decoder, ValueType type,
const MemoryAccessImmediate<validate>& imm) {
- LiftoffRegList pinned;
- Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
- if (BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
- index_reg, pinned, kDoForceCheck)) {
- return;
- }
+ LiftoffRegister full_index = __ PeekToRegister(2, {});
+ Register index_reg =
+ BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
+ full_index, {}, kDoForceCheck);
+ if (index_reg == no_reg) return;
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
pinned);
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: index_reg;
+ // TODO(clemensb): Skip this if memory is 64 bit.
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
if (offset) {
- __ emit_i32_addi(index_plus_offset, index_reg, offset);
- __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
- } else {
- __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
+ __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
LiftoffAssembler::VarState timeout =
@@ -3305,7 +3528,7 @@ class LiftoffCompiler {
__ CallRuntimeStub(target);
DefineSafepoint();
// Pop parameters from the value stack.
- __ cache_state()->stack_state.pop_back(3);
+ __ DropValues(3);
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
@@ -3314,26 +3537,25 @@ class LiftoffCompiler {
void AtomicNotify(FullDecoder* decoder,
const MemoryAccessImmediate<validate>& imm) {
- LiftoffRegList pinned;
- Register index_reg = pinned.set(__ PeekToRegister(1, pinned)).gp();
- if (BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
- index_reg, pinned, kDoForceCheck)) {
- return;
- }
+ LiftoffRegister full_index = __ PeekToRegister(1, {});
+ Register index_reg =
+ BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
+ full_index, {}, kDoForceCheck);
+ if (index_reg == no_reg) return;
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
index_reg, pinned);
- uint32_t offset = imm.offset;
+ uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: index_reg;
+ // TODO(clemensb): Skip this if memory is 64 bit.
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
if (offset) {
- __ emit_i32_addi(index_plus_offset, index_reg, offset);
- __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
- } else {
- __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
+ __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
@@ -3349,7 +3571,7 @@ class LiftoffCompiler {
__ CallRuntimeStub(WasmCode::kWasmAtomicNotify);
DefineSafepoint();
// Pop parameters from the value stack.
- __ cache_state()->stack_state.pop_back(2);
+ __ DropValues(2);
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
@@ -3631,20 +3853,21 @@ class LiftoffCompiler {
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
LiftoffRegList pinned;
- Register seg_size_array =
+ Register dropped_elem_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(seg_size_array, DroppedElemSegments,
+ LOAD_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments,
kSystemPointerSize);
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(seg_index, WasmValue(imm.index));
- // Set the length of the segment to '0' to drop it.
+ // Mark the segment as dropped by setting its value in the dropped
+ // segments list to 1.
LiftoffRegister one_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(one_reg, WasmValue(1));
- __ Store(seg_size_array, seg_index.gp(), 0, one_reg, StoreType::kI32Store,
- pinned);
+ __ Store(dropped_elem_segments, seg_index.gp(), 0, one_reg,
+ StoreType::kI32Store8, pinned);
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
@@ -3714,104 +3937,452 @@ class LiftoffCompiler {
unsupported(decoder, kRefTypes, "table.fill");
}
+ void StructNew(FullDecoder* decoder,
+ const StructIndexImmediate<validate>& imm, const Value& rtt,
+ bool initial_values_on_stack) {
+ ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
+ WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
+ compilation_zone_);
+ ValueType sig_reps[] = {struct_value_type, rtt.type};
+ FunctionSig sig(1, 1, sig_reps);
+ LiftoffAssembler::VarState rtt_value =
+ __ cache_state()->stack_state.end()[-1];
+ __ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ // Drop the RTT.
+ __ cache_state()->stack_state.pop_back(1);
+
+ LiftoffRegister obj(kReturnRegister0);
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
+ for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
+ i--;
+ int offset = StructFieldOffset(imm.struct_type, i);
+ ValueType field_type = imm.struct_type->field(i);
+ LiftoffRegister value = initial_values_on_stack
+ ? pinned.set(__ PopToRegister(pinned))
+ : pinned.set(__ GetUnusedRegister(
+ reg_class_for(field_type), pinned));
+ if (!initial_values_on_stack) {
+ if (!CheckSupportedType(decoder, field_type, "default value")) return;
+ SetDefaultValue(value, field_type, pinned);
+ }
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
+ pinned.clear(value);
+ }
+ __ PushRegister(struct_value_type, obj);
+ }
+
void StructNewWithRtt(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "struct.new_with_rtt");
+ StructNew(decoder, imm, rtt, true);
}
+
void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "struct.new_default_with_rtt");
+ StructNew(decoder, imm, rtt, false);
}
+
void StructGet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "struct.get");
+ const StructType* struct_type = field.struct_index.struct_type;
+ ValueType field_type = struct_type->field(field.index);
+ if (!CheckSupportedType(decoder, field_type, "field load")) return;
+ int offset = StructFieldOffset(struct_type, field.index);
+ LiftoffRegList pinned;
+ LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
+ LiftoffRegister value =
+ pinned.set(__ GetUnusedRegister(reg_class_for(field_type), pinned));
+ LoadObjectField(value, obj.gp(), no_reg, offset, field_type, is_signed,
+ pinned);
+ __ PushRegister(field_type.Unpacked(), value);
}
+
void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field,
const Value& field_value) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "struct.set");
+ const StructType* struct_type = field.struct_index.struct_type;
+ ValueType field_type = struct_type->field(field.index);
+ int offset = StructFieldOffset(struct_type, field.index);
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
+ }
+
+ void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ ValueType rtt_type, bool initial_value_on_stack) {
+ // Max length check.
+ {
+ LiftoffRegister length =
+ __ LoadToRegister(__ cache_state()->stack_state.end()[-2], {});
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapArrayOutOfBounds);
+ __ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
+ static_cast<int>(wasm::kV8MaxWasmArrayLength));
+ }
+ ValueType array_value_type = ValueType::Ref(imm.index, kNonNullable);
+ ValueType elem_type = imm.array_type->element_type();
+ int elem_size = elem_type.element_size_bytes();
+ // Allocate the array.
+ {
+ WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
+ compilation_zone_);
+ ValueType sig_reps[] = {array_value_type, rtt_type, kWasmI32, kWasmI32};
+ FunctionSig sig(1, 3, sig_reps);
+ LiftoffAssembler::VarState rtt_var =
+ __ cache_state()->stack_state.end()[-1];
+ LiftoffAssembler::VarState length_var =
+ __ cache_state()->stack_state.end()[-2];
+ LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
+ __ LoadConstant(elem_size_reg, WasmValue(elem_size));
+ LiftoffAssembler::VarState elem_size_var(kWasmI32, elem_size_reg, 0);
+ __ PrepareBuiltinCall(&sig, call_descriptor,
+ {rtt_var, length_var, elem_size_var});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ // Drop the RTT.
+ __ cache_state()->stack_state.pop_back(1);
+ }
+
+ LiftoffRegister obj(kReturnRegister0);
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
+ LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
+ LiftoffRegister value = initial_value_on_stack
+ ? pinned.set(__ PopToRegister(pinned))
+ : pinned.set(__ GetUnusedRegister(
+ reg_class_for(elem_type), pinned));
+ if (!initial_value_on_stack) {
+ if (!CheckSupportedType(decoder, elem_type, "default value")) return;
+ SetDefaultValue(value, elem_type, pinned);
+ }
+
+ // Initialize the array's elements.
+ LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(
+ offset,
+ WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
+ LiftoffRegister end_offset = length;
+ if (elem_type.element_size_log2() != 0) {
+ __ emit_i32_shli(end_offset.gp(), length.gp(),
+ elem_type.element_size_log2());
+ }
+ __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
+ Label loop, done;
+ __ bind(&loop);
+ __ emit_cond_jump(kUnsignedGreaterEqual, &done, kWasmI32, offset.gp(),
+ end_offset.gp());
+ StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_type);
+ __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
+ __ emit_jump(&loop);
+
+ __ bind(&done);
+ __ PushRegister(array_value_type, obj);
}
void ArrayNewWithRtt(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
- const Value& length, const Value& initial_value,
+ const Value& length_value, const Value& initial_value,
const Value& rtt, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "array.new_with_rtt");
+ ArrayNew(decoder, imm, rtt.type, true);
}
+
void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "array.new_default_with_rtt");
+ ArrayNew(decoder, imm, rtt.type, false);
}
+
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
- const ArrayIndexImmediate<validate>& imm, const Value& index,
- bool is_signed, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "array.get");
+ const ArrayIndexImmediate<validate>& imm,
+ const Value& index_val, bool is_signed, Value* result) {
+ LiftoffRegList pinned;
+ LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned));
+ LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
+ BoundsCheck(decoder, array, index, pinned);
+ ValueType elem_type = imm.array_type->element_type();
+ if (!CheckSupportedType(decoder, elem_type, "array load")) return;
+ int elem_size_shift = elem_type.element_size_log2();
+ if (elem_size_shift != 0) {
+ __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
+ }
+ LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned);
+ LoadObjectField(value, array.gp(), index.gp(),
+ wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
+ elem_type, is_signed, pinned);
+ __ PushRegister(elem_type.Unpacked(), value);
}
+
void ArraySet(FullDecoder* decoder, const Value& array_obj,
- const ArrayIndexImmediate<validate>& imm, const Value& index,
- const Value& value) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "array.set");
+ const ArrayIndexImmediate<validate>& imm,
+ const Value& index_val, const Value& value_val) {
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned));
+ LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
+ BoundsCheck(decoder, array, index, pinned);
+ ValueType elem_type = imm.array_type->element_type();
+ int elem_size_shift = elem_type.element_size_log2();
+ if (elem_size_shift != 0) {
+ __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
+ }
+ StoreObjectField(array.gp(), index.gp(),
+ wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
+ value, pinned, elem_type);
}
+
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "array.len");
+ LiftoffRegList pinned;
+ LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
+ MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
+ LiftoffRegister len = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
+ LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kWasmI32, false,
+ pinned);
+ __ PushRegister(kWasmI32, len);
}
+ // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
+ constexpr static int kI31To32BitSmiShift = 33;
+
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "i31.new");
+ LiftoffRegister src = __ PopToRegister();
+ LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
+ if (SmiValuesAre31Bits()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ emit_i32_shli(dst.gp(), src.gp(), kSmiTagSize);
+ } else {
+ DCHECK(SmiValuesAre32Bits());
+ __ emit_i64_shli(dst, src, kI31To32BitSmiShift);
+ }
+ __ PushRegister(kWasmI31Ref, dst);
}
+
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "i31.get_s");
+ LiftoffRegister src = __ PopToRegister();
+ LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
+ if (SmiValuesAre31Bits()) {
+ __ emit_i32_sari(dst.gp(), src.gp(), kSmiTagSize);
+ } else {
+ DCHECK(SmiValuesAre32Bits());
+ __ emit_i64_sari(dst, src, kI31To32BitSmiShift);
+ }
+ __ PushRegister(kWasmI32, dst);
}
+
void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "i31.get_u");
+ LiftoffRegister src = __ PopToRegister();
+ LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
+ if (SmiValuesAre31Bits()) {
+ __ emit_i32_shri(dst.gp(), src.gp(), kSmiTagSize);
+ } else {
+ DCHECK(SmiValuesAre32Bits());
+ __ emit_i64_shri(dst, src, kI31To32BitSmiShift);
+ }
+ __ PushRegister(kWasmI32, dst);
}
void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "rtt.canon");
+ LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {});
+ RootIndex index;
+ switch (imm.type.representation()) {
+ case wasm::HeapType::kEq:
+ index = RootIndex::kWasmRttEqrefMap;
+ break;
+ case wasm::HeapType::kExtern:
+ index = RootIndex::kWasmRttExternrefMap;
+ break;
+ case wasm::HeapType::kFunc:
+ index = RootIndex::kWasmRttFuncrefMap;
+ break;
+ case wasm::HeapType::kI31:
+ index = RootIndex::kWasmRttI31refMap;
+ break;
+ case wasm::HeapType::kAny:
+ index = RootIndex::kWasmRttAnyrefMap;
+ break;
+ case wasm::HeapType::kBottom:
+ UNREACHABLE();
+ default:
+ // User-defined type.
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps);
+ __ LoadTaggedPointer(
+ rtt.gp(), rtt.gp(), no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
+ imm.type.ref_index()),
+ {});
+ __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
+ return;
+ }
+ LOAD_INSTANCE_FIELD(rtt.gp(), IsolateRoot, kSystemPointerSize);
+ __ LoadTaggedPointer(rtt.gp(), rtt.gp(), no_reg,
+ IsolateData::root_slot_offset(index), {});
+ __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
}
+
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
const Value& parent, Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "rtt.sub");
+ ValueType parent_value_type = parent.type;
+ ValueType rtt_value_type =
+ ValueType::Rtt(imm.type, parent_value_type.depth() + 1);
+ WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
+ ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
+ FunctionSig sig(1, 2, sig_reps);
+ LiftoffAssembler::VarState parent_var =
+ __ cache_state()->stack_state.end()[-1];
+ LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
+ __ LoadConstant(type_reg, WasmValue(imm.type.representation()));
+ LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
+ __ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ // Drop the parent RTT.
+ __ cache_state()->stack_state.pop_back(1);
+ __ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
+ }
+
+ // Falls through on match (=successful type check).
+ // Returns the register containing the object.
+ LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
+ const Value& rtt, Label* no_match,
+ LiftoffRegList pinned = {},
+ Register opt_scratch = no_reg) {
+ Label match;
+ LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ bool obj_can_be_i31 = IsSubtypeOf(kWasmI31Ref, obj.type, decoder->module_);
+ bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
+ bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
+ if (i31_check_only) {
+ __ emit_smi_check(obj_reg.gp(), no_match,
+ LiftoffAssembler::kJumpOnNotSmi);
+ // Emit no further code, just fall through to {match}.
+ } else {
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ if (obj_can_be_i31) {
+ DCHECK(!rtt_is_i31);
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
+ }
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
+ }
+
+ // At this point, the object is neither null nor an i31ref. Perform
+ // a regular type check. Check for exact match first.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ // {tmp1} now holds the object's map.
+ __ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
+
+ // If the object isn't guaranteed to be an array or struct, check that.
+ // Subsequent code wouldn't handle e.g. funcrefs.
+ if (!is_data_ref_type(obj.type, decoder->module_)) {
+ EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
+ }
+
+ // Constant-time subtyping check: load exactly one candidate RTT from the
+ // supertypes list.
+ // Step 1: load the WasmTypeInfo into {tmp1}.
+ constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
+ Map::kConstructorOrBackPointerOrNativeContextOffset);
+ __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset,
+ pinned);
+ // Step 2: load the super types list into {tmp1}.
+ constexpr int kSuperTypesOffset =
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
+ __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
+ pinned);
+ // Step 3: check the list's length.
+ LiftoffRegister list_length = tmp2;
+ __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
+ rtt.type.depth());
+ // Step 4: load the candidate list slot into {tmp1}, and compare it.
+ __ LoadTaggedPointer(
+ tmp1.gp(), tmp1.gp(), no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
+ pinned);
+ __ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
+ // Fall through to {match}.
+ }
+ __ bind(&match);
+ return obj_reg;
}
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "ref.test");
+ Value* result_val) {
+ Label return_false, done;
+ LiftoffRegList pinned;
+ LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
+
+ SubtypeCheck(decoder, obj, rtt, &return_false, pinned, result.gp());
+
+ __ LoadConstant(result, WasmValue(1));
+ // TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
+ __ emit_jump(&done);
+
+ __ bind(&return_false);
+ __ LoadConstant(result, WasmValue(0));
+ __ bind(&done);
+ __ PushRegister(kWasmI32, result);
}
+
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "ref.cast");
+ Label* trap_label = AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapIllegalCast);
+ LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, trap_label);
+ __ PushRegister(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
+ obj_reg);
}
+
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result_on_branch, uint32_t depth) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "br_on_cast");
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(depth)->br_merge()->arity);
+ }
+
+ Label cont_false;
+ LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, &cont_false);
+
+ __ PushRegister(rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.heap_type(), kNonNullable),
+ obj_reg);
+ BrOrRet(decoder, depth);
+
+ __ bind(&cont_false);
+ // Drop the branch's value, restore original value.
+ Drop(decoder);
+ __ PushRegister(obj.type, obj_reg);
}
- void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
- // TODO(7748): Implement.
- unsupported(decoder, kGC, "");
+ void Forward(FullDecoder* decoder, const Value& from, Value* to) {
+ // Nothing to do here.
}
private:
@@ -3819,16 +4390,7 @@ class LiftoffCompiler {
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], CallKind call_kind) {
for (ValueType ret : imm.sig->returns()) {
- if (!CheckSupportedType(decoder,
- FLAG_experimental_liftoff_extern_ref
- ? kSupportedTypes
- : kSupportedTypesWithoutRefs,
- ret, "return")) {
- // TODO(7581): Remove this once reference-types are full supported.
- if (!ret.is_reference_type()) {
- return;
- }
- }
+ if (!CheckSupportedType(decoder, ret, "return")) return;
}
auto call_descriptor =
@@ -3901,25 +4463,11 @@ class LiftoffCompiler {
return unsupported(decoder, kRefTypes, "table index != 0");
}
for (ValueType ret : imm.sig->returns()) {
- if (!CheckSupportedType(decoder,
- FLAG_experimental_liftoff_extern_ref
- ? kSupportedTypes
- : kSupportedTypesWithoutRefs,
- ret, "return")) {
- return;
- }
+ if (!CheckSupportedType(decoder, ret, "return")) return;
}
- // Pop the index.
- Register index = __ PopToRegister().gp();
- // If that register is still being used after popping, we move it to another
- // register, because we want to modify that register.
- if (__ cache_state()->is_used(LiftoffRegister(index))) {
- Register new_index =
- __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index)).gp();
- __ Move(new_index, index, kWasmI32);
- index = new_index;
- }
+ // Pop the index. We'll modify the register's contents later.
+ Register index = __ PopToModifiableRegister().gp();
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
// Get three temporary registers.
@@ -4036,6 +4584,119 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor);
}
+ void LoadNullValue(Register null, LiftoffRegList pinned) {
+ LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize);
+ __ LoadTaggedPointer(null, null, no_reg,
+ IsolateData::root_slot_offset(RootIndex::kNullValue),
+ pinned);
+ }
+
+ void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
+ LiftoffRegList pinned, ValueType type) {
+ if (!type.is_nullable()) return;
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
+ LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
+ LoadNullValue(null.gp(), pinned);
+ __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object,
+ null.gp());
+ }
+
+ void BoundsCheck(FullDecoder* decoder, LiftoffRegister array,
+ LiftoffRegister index, LiftoffRegList pinned) {
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), WasmCode::kThrowWasmTrapArrayOutOfBounds);
+ LiftoffRegister length = __ GetUnusedRegister(kGpReg, pinned);
+ constexpr int kLengthOffset =
+ wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
+ __ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load,
+ pinned);
+ __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label,
+ kWasmI32, index.gp(), length.gp());
+ }
+
+ int StructFieldOffset(const StructType* struct_type, int field_index) {
+ return wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize +
+ struct_type->field_offset(field_index));
+ }
+
+ void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
+ int offset, ValueType type, bool is_signed,
+ LiftoffRegList pinned) {
+ if (type.is_reference_type()) {
+ __ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
+ } else {
+ // Primitive type.
+ LoadType load_type = LoadType::ForValueType(type, is_signed);
+ __ Load(dst, src, offset_reg, offset, load_type, pinned);
+ }
+ }
+
+ void StoreObjectField(Register obj, Register offset_reg, int offset,
+ LiftoffRegister value, LiftoffRegList pinned,
+ ValueType type) {
+ if (type.is_reference_type()) {
+ __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
+ } else {
+ // Primitive type.
+ StoreType store_type = StoreType::ForValueType(type);
+ __ Store(obj, offset_reg, offset, value, store_type, pinned);
+ }
+ }
+
+ void SetDefaultValue(LiftoffRegister reg, ValueType type,
+ LiftoffRegList pinned) {
+ DCHECK(type.is_defaultable());
+ switch (type.kind()) {
+ case ValueType::kI8:
+ case ValueType::kI16:
+ case ValueType::kI32:
+ return __ LoadConstant(reg, WasmValue(int32_t{0}));
+ case ValueType::kI64:
+ return __ LoadConstant(reg, WasmValue(int64_t{0}));
+ case ValueType::kF32:
+ return __ LoadConstant(reg, WasmValue(float{0.0}));
+ case ValueType::kF64:
+ return __ LoadConstant(reg, WasmValue(double{0.0}));
+ case ValueType::kS128:
+ DCHECK(CpuFeatures::SupportsWasmSimd128());
+ return __ emit_s128_xor(reg, reg, reg);
+ case ValueType::kOptRef:
+ return LoadNullValue(reg.gp(), pinned);
+ case ValueType::kRtt:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
+ case ValueType::kRef:
+ UNREACHABLE();
+ }
+ }
+
+ void EmitDataRefCheck(Register map, Label* not_data_ref, LiftoffRegister tmp,
+ LiftoffRegList pinned) {
+ constexpr int kInstanceTypeOffset =
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset);
+ __ Load(tmp, map, no_reg, kInstanceTypeOffset, LoadType::kI32Load16U,
+ pinned);
+ // We're going to test a range of instance types with a single unsigned
+ // comparison. Statically assert that this is safe, i.e. that there are
+ // no instance types between array and struct types that might possibly
+ // occur (i.e. internal types are OK, types of Wasm objects are not).
+ // At the time of this writing:
+ // WASM_ARRAY_TYPE = 180
+ // WASM_CAPI_FUNCTION_DATA_TYPE = 181
+ // WASM_STRUCT_TYPE = 182
+ // The specific values don't matter; the relative order does.
+ static_assert(
+ WASM_STRUCT_TYPE == static_cast<InstanceType>(WASM_ARRAY_TYPE + 2),
+ "Relying on specific InstanceType values here");
+ static_assert(WASM_CAPI_FUNCTION_DATA_TYPE ==
+ static_cast<InstanceType>(WASM_ARRAY_TYPE + 1),
+ "Relying on specific InstanceType values here");
+ __ emit_i32_subi(tmp.gp(), tmp.gp(), WASM_ARRAY_TYPE);
+ __ emit_i32_cond_jumpi(kUnsignedGreaterThan, not_data_ref, tmp.gp(),
+ WASM_STRUCT_TYPE - WASM_ARRAY_TYPE);
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
LiftoffAssembler asm_;
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 285af7dac0..bd2e6ed4c2 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -58,6 +58,8 @@ static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
case ValueType::kF32:
case ValueType::kF64:
return kFpReg;
+ case ValueType::kI8:
+ case ValueType::kI16:
case ValueType::kI32:
return kGpReg;
case ValueType::kI64:
@@ -66,6 +68,7 @@ static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
return kNeedS128RegPair ? kFpRegPair : kFpReg;
case ValueType::kRef:
case ValueType::kOptRef:
+ case ValueType::kRtt:
return kGpReg;
default:
return kNoReg; // unsupported type
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 5c78eca319..c12eae4c39 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
#define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
+#include "src/base/platform/wrappers.h"
+#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
@@ -13,6 +15,31 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
// half
// slot Frame
// -----+--------------------+---------------------------
@@ -63,6 +90,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
+ case ValueType::kRtt:
assm->lw(dst.gp(), src);
break;
case ValueType::kI64:
@@ -87,6 +115,9 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
MemOperand dst(base, offset);
switch (type.kind()) {
case ValueType::kI32:
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ case ValueType::kRtt:
assm->Usw(src.gp(), dst);
break;
case ValueType::kI64:
@@ -122,6 +153,9 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->addiu(sp, sp, -sizeof(double));
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
+ case ValueType::kOptRef:
+ assm->push(reg.gp());
+ break;
default:
UNREACHABLE();
}
@@ -300,7 +334,14 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
Pop(ra, fp);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
@@ -385,17 +426,41 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
- DCHECK_GE(offset_imm, 0);
STATIC_ASSERT(kTaggedSize == kInt32Size);
Load(LiftoffRegister(dst), src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), LoadType::kI32Load, pinned);
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
- bailout(kRefTypes, "GlobalSet");
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+ Sw(src.gp(), dst_op);
+ // The write barrier.
+ Label write_barrier;
+ Label exit;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ CheckPageFlag(dst_addr, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ Branch(USE_DELAY_SLOT, &exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
+ Addu(scratch, dst_addr, offset_imm);
+ CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -656,6 +721,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI32:
case ValueType::kRef:
case ValueType::kOptRef:
+ case ValueType::kRtt:
sw(reg.gp(), dst);
break;
case ValueType::kI64:
@@ -820,6 +886,7 @@ I32_BINOP(xor, xor_)
// clang-format off
I32_BINOP_I(add, Addu)
+I32_BINOP_I(sub, Subu)
I32_BINOP_I(and, And)
I32_BINOP_I(or, Or)
I32_BINOP_I(xor, Xor)
@@ -861,9 +928,15 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP_I
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
+ int64_t imm) {
+ LiftoffRegister imm_reg =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ int32_t imm_low_word = static_cast<int32_t>(imm);
+ int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
+ TurboAssembler::li(imm_reg.low_gp(), imm_low_word);
+ TurboAssembler::li(imm_reg.high_gp(), imm_high_word);
TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
- lhs.high_gp(), imm,
+ lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(),
kScratchReg, kScratchReg2);
}
@@ -1260,25 +1333,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32SConvertF64: {
- if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
- IsFp64Mode()) {
- LiftoffRegister rounded =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
- LiftoffRegister converted_back =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
-
- // Real conversion.
- TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
- TurboAssembler::Trunc_w_d(kScratchDoubleReg, rounded.fp());
- mfc1(dst.gp(), kScratchDoubleReg);
+ LiftoffRegister scratch =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
- // Checking if trap.
- cvt_d_w(converted_back.fp(), kScratchDoubleReg);
- TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
- TurboAssembler::BranchFalseF(trap);
- return true;
- }
- bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
+ // Try a conversion to a signed integer.
+ trunc_w_d(kScratchDoubleReg, src.fp());
+ mfc1(dst.gp(), kScratchDoubleReg);
+ // Retrieve the FCSR.
+ cfc1(scratch.gp(), FCSR);
+ // Check for overflow and NaNs.
+ And(scratch.gp(), scratch.gp(),
+ kFCSROverflowCauseMask | kFCSRUnderflowCauseMask |
+ kFCSRInvalidOpCauseMask);
+ // If we had exceptions we are trap.
+ Branch(trap, ne, scratch.gp(), Operand(zero_reg));
return true;
}
case kExprI32UConvertF64: {
@@ -1419,22 +1487,36 @@ void LiftoffAssembler::emit_jump(Register target) {
TurboAssembler::Jump(target);
}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
- if (rhs != no_reg) {
- TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
- } else {
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK_EQ(type, kWasmI32);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK(type == kWasmI32 ||
+ (type.is_reference_type() &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
sltiu(dst, src, 1);
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs || dst == rhs) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
@@ -1459,7 +1541,7 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
}
namespace liftoff {
-inline Condition cond_make_unsigned(Condition cond) {
+inline LiftoffCondition cond_make_unsigned(LiftoffCondition cond) {
switch (cond) {
case kSignedLessThan:
return kUnsignedLessThan;
@@ -1475,15 +1557,17 @@ inline Condition cond_make_unsigned(Condition cond) {
}
} // namespace liftoff
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Label low, cont;
// For signed i64 comparisons, we still need to use unsigned comparison for
// the low word (the only bit carrying signedness information is the MSB in
// the high word).
- Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
+ Condition unsigned_cond =
+ liftoff::ToCondition(liftoff::cond_make_unsigned(liftoff_cond));
Register tmp = dst;
if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
@@ -1512,7 +1596,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
bool* predicate) {
switch (condition) {
case kEqual:
@@ -1542,9 +1626,10 @@ inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
TurboAssembler::CompareIsNanF32(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan);
@@ -1560,7 +1645,8 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1571,9 +1657,10 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
bind(&cont);
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
TurboAssembler::CompareIsNanF64(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan);
@@ -1589,7 +1676,8 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1607,14 +1695,30 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
bailout(kSimd, "load extend and load splat unimplemented");
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -1659,6 +1763,35 @@ void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_splat");
}
+#define SIMD_BINOP(name, ilv_instr, dotp_instr) \
+ void LiftoffAssembler::emit_##name( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ MSARegister dst_msa = MSARegister::from_code(dst.liftoff_code()); \
+ MSARegister src1_msa = MSARegister::from_code(src1.liftoff_code()); \
+ MSARegister src2_msa = MSARegister::from_code(src2.liftoff_code()); \
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); \
+ ilv_instr(kSimd128ScratchReg, kSimd128RegZero, src1_msa); \
+ ilv_instr(kSimd128RegZero, kSimd128RegZero, src2_msa); \
+ dotp_instr(dst_msa, kSimd128ScratchReg, kSimd128RegZero); \
+ }
+
+SIMD_BINOP(i16x8_extmul_low_i8x16_s, ilvr_b, dotp_s_h)
+SIMD_BINOP(i16x8_extmul_high_i8x16_s, ilvl_b, dotp_s_h)
+SIMD_BINOP(i16x8_extmul_low_i8x16_u, ilvr_b, dotp_u_h)
+SIMD_BINOP(i16x8_extmul_high_i8x16_u, ilvl_b, dotp_u_h)
+
+SIMD_BINOP(i32x4_extmul_low_i16x8_s, ilvr_h, dotp_s_w)
+SIMD_BINOP(i32x4_extmul_high_i16x8_s, ilvl_h, dotp_s_w)
+SIMD_BINOP(i32x4_extmul_low_i16x8_u, ilvr_h, dotp_u_w)
+SIMD_BINOP(i32x4_extmul_high_i16x8_u, ilvl_h, dotp_u_w)
+
+SIMD_BINOP(i64x2_extmul_low_i32x4_s, ilvr_w, dotp_s_d)
+SIMD_BINOP(i64x2_extmul_high_i32x4_s, ilvl_w, dotp_s_d)
+SIMD_BINOP(i64x2_extmul_low_i32x4_u, ilvr_w, dotp_u_d)
+SIMD_BINOP(i64x2_extmul_high_i32x4_u, ilvl_w, dotp_u_d)
+
+#undef SIMD_BINOP
+
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_eq");
@@ -2158,6 +2291,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i64x2_neg");
}
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_bitmask");
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i64x2_shl");
@@ -2620,6 +2758,24 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
addiu(sp, sp, gp_offset);
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index b97c49437f..b97b423e20 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#include "src/base/platform/wrappers.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -14,6 +15,31 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
// Liftoff Frames.
//
// slot Frame
@@ -47,12 +73,14 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
- Register offset, uint32_t offset_imm) {
- if (is_uint31(offset_imm)) {
- if (offset == no_reg) return MemOperand(addr, offset_imm);
+ Register offset, T offset_imm) {
+ if (is_int32(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
assm->daddu(kScratchReg, addr, offset);
- return MemOperand(kScratchReg, offset_imm);
+ return MemOperand(kScratchReg, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
assm->li(kScratchReg, offset_imm);
@@ -72,6 +100,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
+ case ValueType::kRtt:
assm->Ld(dst.gp(), src);
break;
case ValueType::kF32:
@@ -96,6 +125,9 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usw(src.gp(), dst);
break;
case ValueType::kI64:
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ case ValueType::kRtt:
assm->Usd(src.gp(), dst);
break;
case ValueType::kF32:
@@ -285,7 +317,14 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
Pop(ra, fp);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
@@ -369,28 +408,27 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
- DCHECK_GE(offset_imm, 0);
STATIC_ASSERT(kTaggedSize == kInt64Size);
- Load(LiftoffRegister(dst), src_addr, offset_reg,
- static_cast<uint32_t>(offset_imm), LoadType::kI64Load, pinned);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld(dst, src_op);
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
- DCHECK_GE(offset_imm, 0);
- DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
STATIC_ASSERT(kTaggedSize == kInt64Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- Sd(src.gp(), MemOperand(dst_addr, offset_imm));
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Sd(src.gp(), dst_op);
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
&write_barrier);
- b(&exit);
+ Branch(USE_DELAY_SLOT, &exit);
bind(&write_barrier);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
@@ -403,7 +441,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
@@ -458,7 +496,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
@@ -508,56 +546,56 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicLoad");
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm,
+ uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
- Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
@@ -618,6 +656,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
+ case ValueType::kRtt:
Sd(reg.gp(), dst);
break;
case ValueType::kF32:
@@ -797,6 +836,7 @@ I32_BINOP(xor, xor_)
// clang-format off
I32_BINOP_I(add, Addu)
+I32_BINOP_I(sub, Subu)
I32_BINOP_I(and, And)
I32_BINOP_I(or, Or)
I32_BINOP_I(xor, Xor)
@@ -836,6 +876,11 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm));
+}
+
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
@@ -908,7 +953,6 @@ I64_BINOP(xor, xor_)
}
// clang-format off
-I64_BINOP_I(add, Daddu)
I64_BINOP_I(and, And)
I64_BINOP_I(or, Or)
I64_BINOP_I(xor, Xor)
@@ -1297,22 +1341,36 @@ void LiftoffAssembler::emit_jump(Register target) {
TurboAssembler::Jump(target);
}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
- if (rhs != no_reg) {
- TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
- } else {
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(type == kWasmI32 || type == kWasmI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((type == kWasmI32 || type == kWasmI64) ||
+ (type.is_reference_type() &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
sltiu(dst, src, 1);
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs || dst == rhs) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
@@ -1332,9 +1390,10 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
sltiu(dst, src.gp(), 1);
}
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Register tmp = dst;
if (dst == lhs.gp() || dst == rhs.gp()) {
tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
@@ -1353,7 +1412,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
bool* predicate) {
switch (condition) {
case kEqual:
@@ -1403,9 +1462,10 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
TurboAssembler::CompareIsNanF32(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan);
@@ -1421,7 +1481,8 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1432,9 +1493,10 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
bind(&cont);
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
Label not_nan, cont;
TurboAssembler::CompareIsNanF64(lhs, rhs);
TurboAssembler::BranchFalseF(&not_nan);
@@ -1450,7 +1512,8 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1468,15 +1531,24 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- Daddu(scratch, src_addr, offset_reg);
- MemOperand src_op = MemOperand(scratch, offset_imm);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
MSARegister dst_msa = dst.fp().toW();
*protected_load_pc = pc_offset();
MachineType memtype = type.mem_type();
@@ -1536,6 +1608,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -1617,6 +1696,29 @@ void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
fill_d(dst.fp().toW(), kScratchReg);
}
+#define SIMD_BINOP(name1, name2, type) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ TurboAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \
+ src2.fp().toW()); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ TurboAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \
+ src2.fp().toW()); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s, MSAS8)
+SIMD_BINOP(i16x8, i8x16_u, MSAU8)
+
+SIMD_BINOP(i32x4, i16x8_s, MSAS16)
+SIMD_BINOP(i32x4, i16x8_u, MSAU16)
+
+SIMD_BINOP(i64x2, i32x4_s, MSAS32)
+SIMD_BINOP(i64x2, i32x4_u, MSAU32)
+
+#undef SIMD_BINOP
+
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1754,7 +1856,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
MSARegister dst_msa = dst.fp().toW();
uint64_t vals[2];
- memcpy(vals, imms, sizeof(vals));
+ base::Memcpy(vals, imms, sizeof(vals));
li(kScratchReg, vals[0]);
insert_d(dst_msa, 0, kScratchReg);
li(kScratchReg, vals[1]);
@@ -2174,6 +2276,15 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ srli_d(kSimd128RegZero, src.fp().toW(), 63);
+ shf_w(kSimd128ScratchReg, kSimd128RegZero, 0x02);
+ slli_d(kSimd128ScratchReg, kSimd128ScratchReg, 1);
+ or_v(kSimd128RegZero, kSimd128RegZero, kSimd128ScratchReg);
+ copy_u_b(dst.gp(), kSimd128RegZero, 0);
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_d(kSimd128ScratchReg, rhs.gp());
@@ -2795,6 +2906,24 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
daddiu(sp, sp, gp_offset);
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index f75e9db459..1a2e950615 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -57,7 +57,9 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
bailout(kUnsupportedArchitecture, "PrepareTailCall");
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
@@ -112,6 +114,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
@@ -119,70 +122,70 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicLoad");
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm,
+ uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
- Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
@@ -335,7 +338,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
-UNIMPLEMENTED_I32_BINOP(i32_sub)
+UNIMPLEMENTED_I32_BINOP_I(i32_sub)
UNIMPLEMENTED_I32_BINOP(i32_mul)
UNIMPLEMENTED_I32_BINOP_I(i32_and)
UNIMPLEMENTED_I32_BINOP_I(i32_or)
@@ -343,7 +346,7 @@ UNIMPLEMENTED_I32_BINOP_I(i32_xor)
UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP_I(i64_add)
+UNIMPLEMENTED_I64_BINOP(i64_add)
UNIMPLEMENTED_I64_BINOP(i64_sub)
UNIMPLEMENTED_I64_BINOP(i64_mul)
#ifdef V8_TARGET_ARCH_PPC64
@@ -407,6 +410,11 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ bailout(kUnsupportedArchitecture, "i64_addi");
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -511,18 +519,25 @@ void LiftoffAssembler::emit_jump(Register target) {
bailout(kUnsupportedArchitecture, "emit_jump");
}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ bailout(kUnsupportedArchitecture, "emit_i32_cond_jumpi");
+}
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
@@ -530,20 +545,20 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
@@ -556,13 +571,25 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
}
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
bailout(kSimd, "Load transform unimplemented");
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ bailout(kUnsupportedArchitecture, "emit_smi_check");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -833,6 +860,35 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i64x2mul");
}
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_low_i32x4_s unsupported");
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_low_i32x4_u unsupported");
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_high_i32x4_s unsupported");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
+}
+
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i32x4_splat");
@@ -948,6 +1004,30 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_low_i16x8_s unsupported");
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_low_i16x8_u unsupported");
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_high_i16x8_s unsupported");
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1087,6 +1167,30 @@ void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
}
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8.extmul_low_i8x16_s unsupported");
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8.extmul_low_i8x16_u unsupported");
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_extmul_high_i8x16_u unsupported");
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -1532,6 +1636,13 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
bailout(kUnsupportedArchitecture, "PopRegisters");
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ bailout(kRefTypes, "RecordSpillsInSafepoint");
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index a88baa1146..1161595705 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -5,8 +5,11 @@
#ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
#define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/assembler.h"
+#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
-
+#include "src/wasm/simd-shuffle.h"
namespace v8 {
namespace internal {
@@ -14,6 +17,47 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ case kUnsignedLessEqual:
+ return le;
+ case kSignedGreaterEqual:
+ case kUnsignedGreaterEqual:
+ return ge;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ }
+}
+
+inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ case kUnequal:
+ case kSignedLessThan:
+ case kSignedLessEqual:
+ case kSignedGreaterThan:
+ case kSignedGreaterEqual:
+ return true;
+ case kUnsignedLessThan:
+ case kUnsignedLessEqual:
+ case kUnsignedGreaterThan:
+ case kUnsignedGreaterEqual:
+ return false;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+}
+
// half
// slot Frame
// -----+--------------------+---------------------------
@@ -38,17 +82,20 @@ namespace liftoff {
//
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
-inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
- int32_t half_offset =
- half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
- return MemOperand(fp, -offset + half_offset);
+inline MemOperand GetStackSlot(uint32_t offset) {
+ return MemOperand(fp, -offset);
}
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
bailout(kUnsupportedArchitecture, "PrepareStackFrame");
- return 0;
+ int offset = pc_offset();
+ lay(sp, MemOperand(sp));
+ return offset;
}
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
@@ -56,13 +103,32 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
bailout(kUnsupportedArchitecture, "PrepareTailCall");
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
- bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+
+ constexpr int LayInstrSize = 6;
+
+#ifdef USE_SIMULATOR
+ // When using the simulator, deal with Liftoff which allocates the stack
+ // before checking it.
+ // TODO(arm): Remove this when the stack check mechanism will be updated.
+ if (frame_size > KB / 2) {
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
+ return;
+ }
+#endif
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, LayInstrSize + kGap));
+ patching_assembler.lay(sp, MemOperand(sp, -frame_size));
}
void LiftoffAssembler::FinishCode() {}
-void LiftoffAssembler::AbortCompilation() {}
+void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
@@ -111,6 +177,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
@@ -118,70 +185,70 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicLoad");
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm,
+ uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
- Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
@@ -250,26 +317,25 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// instructions per slot.
uint32_t remainder = size;
for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
- StoreP(r0, liftoff::GetHalfStackSlot(start + remainder, kLowWord));
- StoreP(r0, liftoff::GetHalfStackSlot(start + remainder, kHighWord));
+ StoreU64(r0, liftoff::GetStackSlot(start + remainder));
}
DCHECK(remainder == 4 || remainder == 0);
if (remainder) {
- StoreP(r0, liftoff::GetHalfStackSlot(start + remainder, kLowWord));
+ StoreU32(r0, liftoff::GetStackSlot(start + remainder));
}
} else {
// General case for bigger counts (9 instructions).
// Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3);
push(r4);
- SubP(r3, fp, Operand(start + size));
- SubP(r4, fp, Operand(start));
+ SubS64(r3, fp, Operand(start + size));
+ SubS64(r4, fp, Operand(start));
Label loop;
bind(&loop);
- StoreP(r0, MemOperand(r0));
+ StoreU64(r0, MemOperand(r0));
la(r0, MemOperand(r0, kSystemPointerSize));
- CmpLogicalP(r3, r4);
+ CmpU64(r3, r4);
bne(&loop);
pop(r4);
@@ -339,7 +405,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
-UNIMPLEMENTED_I32_BINOP(i32_sub)
+UNIMPLEMENTED_I32_BINOP_I(i32_sub)
UNIMPLEMENTED_I32_BINOP(i32_mul)
UNIMPLEMENTED_I32_BINOP_I(i32_and)
UNIMPLEMENTED_I32_BINOP_I(i32_or)
@@ -347,7 +413,7 @@ UNIMPLEMENTED_I32_BINOP_I(i32_xor)
UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP_I(i64_add)
+UNIMPLEMENTED_I64_BINOP(i64_add)
UNIMPLEMENTED_I64_BINOP(i64_sub)
UNIMPLEMENTED_I64_BINOP(i64_mul)
#ifdef V8_TARGET_ARCH_S390X
@@ -364,8 +430,6 @@ UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_min)
-UNIMPLEMENTED_FP_BINOP(f32_max)
UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
@@ -378,8 +442,6 @@ UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
UNIMPLEMENTED_FP_BINOP(f64_div)
-UNIMPLEMENTED_FP_BINOP(f64_min)
-UNIMPLEMENTED_FP_BINOP(f64_max)
UNIMPLEMENTED_FP_BINOP(f64_copysign)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
@@ -411,6 +473,47 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
return true;
}
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ bailout(kUnsupportedArchitecture, "i64_addi");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmin(dst, lhs, rhs, Condition(1), Condition(8), Condition(3));
+ return;
+ }
+ DoubleMin(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmin(dst, lhs, rhs, Condition(1), Condition(8), Condition(2));
+ return;
+ }
+ FloatMin(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmax(dst, lhs, rhs, Condition(1), Condition(8), Condition(3));
+ return;
+ }
+ DoubleMax(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmax(dst, lhs, rhs, Condition(1), Condition(8), Condition(2));
+ return;
+ }
+ FloatMax(dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -511,45 +614,106 @@ void LiftoffAssembler::emit_jump(Label* label) {
bailout(kUnsupportedArchitecture, "emit_jump");
}
-void LiftoffAssembler::emit_jump(Register target) {
- bailout(kUnsupportedArchitecture, "emit_jump");
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+
+ if (type.kind() == ValueType::kI32) {
+ if (rhs == no_reg) {
+ if (use_signed) {
+ CmpS32(lhs, Operand::Zero());
+ } else {
+ CmpU32(lhs, Operand::Zero());
+ }
+ } else {
+ if (use_signed) {
+ CmpS32(lhs, rhs);
+ } else {
+ CmpU32(lhs, rhs);
+ }
+ }
+ } else {
+ CHECK_EQ(type.kind(), ValueType::kI64);
+ if (rhs == no_reg) {
+ if (use_signed) {
+ CmpS64(lhs, Operand::Zero());
+ } else {
+ CmpU64(lhs, Operand::Zero());
+ }
+ } else {
+ if (use_signed) {
+ CmpS64(lhs, rhs);
+ } else {
+ CmpU64(lhs, rhs);
+ }
+ }
+ }
+ b(cond, label);
}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
- bailout(kUnsupportedArchitecture, "emit_cond_jump");
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ bailout(kUnsupportedArchitecture, "emit_i32_cond_jumpi");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
+#define EMIT_SET_CONDITION(dst, cond) \
+ { \
+ Label done; \
+ lghi(dst, Operand(1)); \
+ b(cond, &done); \
+ lghi(dst, Operand(0)); \
+ bind(&done); \
+ }
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ if (use_signed) {
+ CmpS32(lhs, rhs);
+ } else {
+ CmpU32(lhs, rhs);
+ }
+
+ EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ if (use_signed) {
+ CmpS64(lhs.gp(), rhs.gp());
+ } else {
+ CmpU64(lhs.gp(), rhs.gp());
+ }
+
+ EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
}
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
+ cebr(lhs, rhs);
+ EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
+ cdbr(lhs, rhs);
+ EMIT_SET_CONDITION(dst, liftoff::ToCondition(liftoff_cond));
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -559,14 +723,26 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return false;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ bailout(kUnsupportedArchitecture, "emit_smi_check");
+}
+
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
bailout(kSimd, "Load transform unimplemented");
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -837,6 +1013,35 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i64x2mul");
}
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_low_i32x4_s unsupported");
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_low_i32x4_u unsupported");
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_high_i32x4_s unsupported");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
+}
+
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i32x4_splat");
@@ -952,6 +1157,30 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_low_i16x8_s unsupported");
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_low_i16x8_u unsupported");
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_high_i16x8_s unsupported");
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1091,6 +1320,30 @@ void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
}
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8.extmul_low_i8x16_s unsupported");
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8.extmul_low_i8x16_u unsupported");
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_extmul_high_i8x16_u unsupported");
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -1536,6 +1789,13 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
bailout(kUnsupportedArchitecture, "PopRegisters");
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ bailout(kRefTypes, "RecordSpillsInSafepoint");
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index a64b0e2e37..a95ef95f26 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -5,7 +5,9 @@
#ifndef V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
#define V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
+#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler.h"
+#include "src/codegen/cpu-features.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
@@ -20,6 +22,31 @@ namespace wasm {
namespace liftoff {
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return equal;
+ case kUnequal:
+ return not_equal;
+ case kSignedLessThan:
+ return less;
+ case kSignedLessEqual:
+ return less_equal;
+ case kSignedGreaterThan:
+ return greater;
+ case kSignedGreaterEqual:
+ return greater_equal;
+ case kUnsignedLessThan:
+ return below;
+ case kUnsignedLessEqual:
+ return below_equal;
+ case kUnsignedGreaterThan:
+ return above;
+ case kUnsignedGreaterEqual:
+ return above_equal;
+ }
+}
+
constexpr Register kScratchRegister2 = r11;
static_assert(kScratchRegister != kScratchRegister2, "collision");
static_assert((kLiftoffAssemblerGpCacheRegs &
@@ -42,17 +69,16 @@ inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
- uint32_t offset_imm) {
+ uintptr_t offset_imm) {
if (is_uint31(offset_imm)) {
- if (offset == no_reg) return Operand(addr, offset_imm);
- return Operand(addr, offset, times_1, offset_imm);
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ return offset == no_reg ? Operand(addr, offset_imm32)
+ : Operand(addr, offset, times_1, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
- assm->movl(scratch, Immediate(offset_imm));
- if (offset != no_reg) {
- assm->addq(scratch, offset);
- }
+ assm->Set(scratch, offset_imm);
+ if (offset != no_reg) assm->addq(scratch, offset);
return Operand(addr, scratch, times_1, 0);
}
@@ -65,6 +91,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
+ case ValueType::kRtt:
assm->movq(dst.gp(), src);
break;
case ValueType::kF32:
@@ -133,6 +160,10 @@ constexpr int kSubSpSize = 7; // 7 bytes for "subq rsp, <imm32>"
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
+ // Next we reserve the memory for the whole stack frame. We do not know yet
+ // how big the stack frame will be so we just emit a placeholder instruction.
+ // PatchPrepareStackFrame will patch this in order to increase the stack
+ // appropriately.
sub_sp_32(0);
DCHECK_EQ(liftoff::kSubSpSize, pc_offset() - offset);
return offset;
@@ -156,7 +187,13 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
popq(rbp);
}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ // The frame_size includes the frame marker. The frame marker has already been
+ // pushed on the stack though, so we don't need to allocate memory for it
+ // anymore.
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
// Need to align sp to system pointer size.
frame_size = RoundUp(frame_size, kSystemPointerSize);
// We can't run out of space, just pass anything big enough to not cause the
@@ -201,7 +238,8 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
}
int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return type.element_size_bytes();
+ return type.is_reference_type() ? kSystemPointerSize
+ : type.element_size_bytes();
}
bool LiftoffAssembler::NeedsAlignment(ValueType type) {
@@ -275,12 +313,13 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
- Operand dst_op = liftoff::GetMemOp(this, dst_addr, no_reg,
+ Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg,
static_cast<uint32_t>(offset_imm));
StoreTaggedField(dst_op, src.gp());
@@ -305,18 +344,15 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
@@ -363,12 +399,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList /* pinned */,
uint32_t* protected_store_pc, bool is_store_mem) {
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
@@ -400,11 +433,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister src,
+ uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Register src_reg = src.gp();
if (cache_state()->is_used(src)) {
@@ -433,7 +463,7 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!cache_state()->is_used(result));
if (cache_state()->is_used(value)) {
@@ -443,9 +473,6 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
movq(result.gp(), value.gp());
value = result;
}
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
lock();
switch (type.value()) {
@@ -478,7 +505,7 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!cache_state()->is_used(result));
if (cache_state()->is_used(value)) {
@@ -488,9 +515,6 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
movq(result.gp(), value.gp());
value = result;
}
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
case StoreType::kI32Store8:
@@ -536,7 +560,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!__ cache_state()->is_used(result));
Register value_reg = value.gp();
@@ -546,9 +570,6 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
__ ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
- if (__ emit_debug_code() && offset_reg != no_reg) {
- __ AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(lasm, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
@@ -613,28 +634,28 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
} // namespace liftoff
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::andl, &Assembler::andq, dst_addr,
offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::orl, &Assembler::orq, dst_addr,
offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
- uint32_t offset_imm, LiftoffRegister value,
+ uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::xorl, &Assembler::xorq, dst_addr,
offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
- uint32_t offset_imm,
+ uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!cache_state()->is_used(result));
@@ -645,9 +666,6 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
movq(result.gp(), value.gp());
value = result;
}
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
case StoreType::kI32Store8:
@@ -679,7 +697,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicCompareExchange(
- Register dst_addr, Register offset_reg, uint32_t offset_imm,
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
Register value_reg = new_value.gp();
@@ -693,9 +711,6 @@ void LiftoffAssembler::AtomicCompareExchange(
movq(rax, expected.gp());
}
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
lock();
@@ -807,6 +822,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
+ case ValueType::kRtt:
movq(dst, reg.gp());
break;
case ValueType::kF32:
@@ -923,6 +939,16 @@ void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
}
}
+void LiftoffAssembler::emit_i32_subi(Register dst, Register lhs, int32_t imm) {
+ if (dst != lhs) {
+ // We'll have to implement an UB-safe version if we need this corner case.
+ DCHECK_NE(imm, kMinInt);
+ leal(dst, Operand(lhs, -imm));
+ } else {
+ subl(dst, Immediate(imm));
+ }
+}
+
namespace liftoff {
template <void (Assembler::*op)(Register, Register),
void (Assembler::*mov)(Register, Register)>
@@ -943,6 +969,7 @@ void EmitCommutativeBinOpImm(LiftoffAssembler* assm, Register dst, Register lhs,
if (dst != lhs) (assm->*mov)(dst, lhs);
(assm->*op)(dst, Immediate(imm));
}
+
} // namespace liftoff
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
@@ -1186,11 +1213,18 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- if (lhs.gp() != dst.gp()) {
- leaq(dst.gp(), Operand(lhs.gp(), imm));
+ int64_t imm) {
+ if (!is_int32(imm)) {
+ TurboAssembler::Set(kScratchRegister, imm);
+ if (lhs.gp() == dst.gp()) {
+ addq(dst.gp(), kScratchRegister);
+ } else {
+ leaq(dst.gp(), Operand(lhs.gp(), kScratchRegister, times_1, 0));
+ }
+ } else if (lhs.gp() == dst.gp()) {
+ addq(dst.gp(), Immediate(static_cast<int32_t>(imm)));
} else {
- addq(dst.gp(), Immediate(imm));
+ leaq(dst.gp(), Operand(lhs.gp(), static_cast<int32_t>(imm)));
}
}
@@ -1992,14 +2026,20 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
- ValueType type, Register lhs,
- Register rhs) {
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (type.kind()) {
case ValueType::kI32:
cmpl(lhs, rhs);
break;
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
case ValueType::kI64:
cmpq(lhs, rhs);
break;
@@ -2014,14 +2054,24 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
j(cond, label);
}
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ cmpl(lhs, Immediate(imm));
+ j(cond, label);
+}
+
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
testl(src, src);
setcc(equal, dst);
movzxbl(dst, dst);
}
-void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
- Register lhs, Register rhs) {
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
cmpl(lhs, rhs);
setcc(cond, dst);
movzxbl(dst, dst);
@@ -2033,9 +2083,10 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
movzxbl(dst, dst);
}
-void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
- LiftoffRegister lhs,
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
cmpq(lhs.gp(), rhs.gp());
setcc(cond, dst);
movzxbl(dst, dst);
@@ -2066,16 +2117,18 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs,
rhs);
}
-void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs,
rhs);
}
@@ -2107,6 +2160,13 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
return true;
}
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ testb(obj, Immediate(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? zero : not_zero;
+ j(condition, target);
+}
+
// TODO(fanchenk): Distinguish mov* if data bypass delay matter.
namespace liftoff {
template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
@@ -2263,13 +2323,10 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
} // namespace liftoff
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
- Register offset_reg, uint32_t offset_imm,
+ Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
- if (emit_debug_code() && offset_reg != no_reg) {
- AssertZeroExtended(offset_reg);
- }
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset();
MachineType memtype = type.mem_type();
@@ -2318,6 +2375,26 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
}
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+
+ MachineType mem_type = type.mem_type();
+ if (mem_type == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ } else if (mem_type == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ } else if (mem_type == MachineType::Int32()) {
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), mem_type);
+ Pinsrq(dst.fp(), src.fp(), src_op, laneidx);
+ }
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -2396,7 +2473,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Shufps(dst.fp(), src.fp(), 0);
+ Shufps(dst.fp(), src.fp(), src.fp(), 0);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -2636,7 +2713,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- memcpy(vals, imms, sizeof(vals));
+ base::Memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
movq(kScratchRegister, vals[1]);
Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
@@ -2674,17 +2751,14 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vxorps(kScratchDoubleReg, src1.fp(), src2.fp());
- vandps(kScratchDoubleReg, kScratchDoubleReg, mask.fp());
- vxorps(dst.fp(), kScratchDoubleReg, src2.fp());
+ // Ensure that we don't overwrite any inputs with the movdqu below.
+ DCHECK_NE(dst, src1);
+ DCHECK_NE(dst, src2);
+ if (!CpuFeatures::IsSupported(AVX) && dst != mask) {
+ movdqu(dst.fp(), mask.fp());
+ S128Select(dst.fp(), dst.fp(), src1.fp(), src2.fp());
} else {
- movaps(kScratchDoubleReg, src1.fp());
- xorps(kScratchDoubleReg, src2.fp());
- andps(kScratchDoubleReg, mask.fp());
- if (dst.fp() != src2.fp()) movaps(dst.fp(), src2.fp());
- xorps(dst.fp(), kScratchDoubleReg);
+ S128Select(dst.fp(), mask.fp(), src1.fp(), src2.fp());
}
}
@@ -3074,6 +3148,33 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true, /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
+ /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
+ /*is_signed=*/false);
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3191,6 +3292,58 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
this, dst, lhs, rhs);
}
+namespace liftoff {
+// Helper function to check for register aliasing, AVX support, and moves
+// registers around before calling the actual macro-assembler function.
+inline void I32x4ExtMulHelper(LiftoffAssembler* assm, XMMRegister dst,
+ XMMRegister src1, XMMRegister src2, bool low,
+ bool is_signed) {
+ // I32x4ExtMul requires dst == src1 if AVX is not supported.
+ if (CpuFeatures::IsSupported(AVX) || dst == src1) {
+ assm->I32x4ExtMul(dst, src1, src2, low, is_signed);
+ } else if (dst != src2) {
+ // dst != src1 && dst != src2
+ assm->movaps(dst, src1);
+ assm->I32x4ExtMul(dst, dst, src2, low, is_signed);
+ } else {
+ // dst == src2
+ // Extended multiplication is commutative,
+ assm->movaps(dst, src2);
+ assm->I32x4ExtMul(dst, dst, src1, low, is_signed);
+ }
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(),
+ /*low=*/false,
+ /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ liftoff::I32x4ExtMulHelper(this, dst.fp(), src1.fp(), src2.fp(),
+ /*low=*/false,
+ /*is_signed=*/false);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
@@ -3280,6 +3433,38 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Paddq(dst.fp(), tmp2.fp());
}
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true, /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/true,
+ /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
+ /*is_signed=*/true);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I64x2ExtMul(dst.fp(), src1.fp(), src2.fp(), /*low=*/false,
+ /*is_signed=*/false);
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movmskpd(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3714,8 +3899,7 @@ void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovsxbw(dst.fp(), dst.fp());
+ I16x8SConvertI8x16High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
@@ -3725,8 +3909,7 @@ void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovzxbw(dst.fp(), dst.fp());
+ I16x8UConvertI8x16High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
@@ -3736,8 +3919,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovsxwd(dst.fp(), dst.fp());
+ I32x4SConvertI16x8High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
@@ -3747,8 +3929,7 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovzxwd(dst.fp(), dst.fp());
+ I32x4UConvertI16x8High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -3989,6 +4170,24 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
}
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots,
(1 << 16) / kSystemPointerSize); // 16 bit immediate