summaryrefslogtreecommitdiff
path: root/chromium/v8/src/wasm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-02 12:21:57 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-12 08:13:00 +0000
commit606d85f2a5386472314d39923da28c70c60dc8e7 (patch)
treea8f4d7bf997f349f45605e6058259fba0630e4d7 /chromium/v8/src/wasm
parent5786336dda477d04fb98483dca1a5426eebde2d7 (diff)
downloadqtwebengine-chromium-606d85f2a5386472314d39923da28c70c60dc8e7.tar.gz
BASELINE: Update Chromium to 96.0.4664.181
Change-Id: I762cd1da89d73aa6313b4a753fe126c34833f046 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/wasm')
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h28
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h52
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h313
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h16
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h54
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc404
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-register.h8
-rw-r--r--chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h2817
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h12
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h25
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h955
-rw-r--r--chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h509
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h421
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h341
-rw-r--r--chromium/v8/src/wasm/c-api.cc10
-rw-r--r--chromium/v8/src/wasm/c-api.h3
-rw-r--r--chromium/v8/src/wasm/code-space-access.cc12
-rw-r--r--chromium/v8/src/wasm/code-space-access.h3
-rw-r--r--chromium/v8/src/wasm/compilation-environment.h16
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h409
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.cc11
-rw-r--r--chromium/v8/src/wasm/function-compiler.cc46
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.cc373
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.h5
-rw-r--r--chromium/v8/src/wasm/init-expr-interface.cc42
-rw-r--r--chromium/v8/src/wasm/jump-table-assembler.cc30
-rw-r--r--chromium/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--chromium/v8/src/wasm/memory-protection-key.cc25
-rw-r--r--chromium/v8/src/wasm/memory-protection-key.h4
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc133
-rw-r--r--chromium/v8/src/wasm/module-compiler.h2
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc160
-rw-r--r--chromium/v8/src/wasm/module-instantiate.cc139
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.cc29
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.h5
-rw-r--r--chromium/v8/src/wasm/sync-streaming-decoder.cc4
-rw-r--r--chromium/v8/src/wasm/value-type.h4
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc202
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h103
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h28
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc6
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc19
-rw-r--r--chromium/v8/src/wasm/wasm-engine.h1
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.cc62
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.h5
-rw-r--r--chromium/v8/src/wasm/wasm-feature-flags.h41
-rw-r--r--chromium/v8/src/wasm/wasm-init-expr.cc4
-rw-r--r--chromium/v8/src/wasm/wasm-init-expr.h42
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc285
-rw-r--r--chromium/v8/src/wasm/wasm-limits.h5
-rw-r--r--chromium/v8/src/wasm/wasm-linkage.h9
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.cc255
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.h122
-rw-r--r--chromium/v8/src/wasm/wasm-module-sourcemap.cc9
-rw-r--r--chromium/v8/src/wasm/wasm-module-sourcemap.h5
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc29
-rw-r--r--chromium/v8/src/wasm/wasm-module.h32
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h54
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc45
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h39
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes-inl.h9
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h13
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.cc4
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.cc45
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.h14
65 files changed, 6715 insertions, 2197 deletions
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 6e2bacc0439..211cf82398a 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -4262,14 +4262,34 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ if (kind == kF32) {
+ FloatRegister src_f = liftoff::GetFloatRegister(src);
+ VFPCompareAndSetFlags(src_f, src_f);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ VFPCompareAndSetFlags(src, src);
+ }
+
+ // Store a non-zero value if src is NaN.
+ str(dst, MemOperand(dst), ne); // x != x iff isnan(x)
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ QwNeonRegister src_q = liftoff::GetSimd128Register(src);
+ QwNeonRegister tmp_q = liftoff::GetSimd128Register(tmp_s128);
+ if (lane_kind == kF32) {
+ vpadd(tmp_q.low(), src_q.low(), src_q.high());
+ LowDwVfpRegister tmp_d =
+ LowDwVfpRegister::from_code(tmp_s128.low_fp().code());
+ vadd(tmp_d.low(), tmp_d.low(), tmp_d.high());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vadd(tmp_q.low(), src_q.low(), src_q.high());
+ }
+ emit_set_if_nan(dst, tmp_q.low(), lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a52370f2935..e10a18a5607 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -452,6 +452,13 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
+void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
+ int offset, ExternalPointerTag tag,
+ Register isolate_root) {
+ LoadExternalPointerField(dst, FieldMemOperand(instance, offset), tag,
+ isolate_root);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
@@ -1173,12 +1180,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.S(), src.W());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.W(), scratch.S());
+ PopcntHelper(dst.W(), src.W());
return true;
}
@@ -1193,12 +1195,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.D(), src.gp().X());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.gp().X(), scratch.D());
+ PopcntHelper(dst.gp().X(), src.gp().X());
return true;
}
@@ -1717,13 +1714,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
UseScratchRegisterScope temps(this);
MemOperand src_op{
liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (dst != src) {
Mov(dst.fp().Q(), src.fp().Q());
}
+ *protected_load_pc = pc_offset();
if (mem_type == MachineType::Int8()) {
ld1(dst.fp().B(), laneidx, src_op);
} else if (mem_type == MachineType::Int16()) {
@@ -3259,14 +3256,35 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (kind == kF32) {
+ Fcmp(src.S(), src.S());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // If it's a NaN, it must be non-zero, so store that as the set value.
+ Str(src.S(), MemOperand(dst));
+ } else {
+ DCHECK_EQ(kind, kF64);
+ Fcmp(src.D(), src.D());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // Double-precision NaNs must be non-zero in the most-significant 32
+ // bits, so store that.
+ St1(src.V4S(), 1, MemOperand(dst));
+ }
+ Bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ DoubleRegister tmp_fp = tmp_s128.fp();
+ if (lane_kind == kF32) {
+ Fmaxv(tmp_fp.S(), src.fp().V4S());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ Fmaxp(tmp_fp.D(), src.fp().V2D());
+ }
+ emit_set_if_nan(dst, tmp_fp, lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index bb2fed83c65..2d922d3b2e5 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2718,40 +2718,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-enum class ShiftSignedness { kSigned, kUnsigned };
-
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm is used for both signed and unsigned shifts, the only
- // difference is the actual shift and pack in the end. This is the same
- // algorithm as used in code-generator-ia32.cc
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
- XMMRegister tmp_simd =
- assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
-
- // Unpack the bytes into words, do logical shifts, and repack.
- assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- assm->mov(tmp, rhs.gp());
- // Take shift value modulo 8.
- assm->and_(tmp, 7);
- assm->add(tmp, Immediate(8));
- assm->Movd(tmp_simd, tmp);
- if (is_signed) {
- assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
Register tmp =
@@ -2762,7 +2728,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->cmov(zero, dst.gp(), tmp);
}
-template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2809,23 +2775,19 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
- movss(dst.fp(), src_op);
+ Movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
- movsd(dst.fp(), src_op);
+ Movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- Vbroadcastss(dst.fp(), src_op);
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2875,12 +2837,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
S128Store32Lane(dst_op, src.fp(), lane);
} else {
DCHECK_EQ(MachineRepresentation::kWord64, rep);
- if (lane == 0) {
- Movlps(dst_op, src.fp());
- } else {
- DCHECK_EQ(1, lane);
- Movhps(dst_op, src.fp());
- }
+ S128Store64Lane(dst_op, src.fp(), lane);
}
}
@@ -2951,16 +2908,12 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Pshufd(dst.fp(), dst.fp(), uint8_t{0});
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -3180,17 +3133,11 @@ void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// Different register alias requirements depending on CpuFeatures supported:
- if (CpuFeatures::IsSupported(AVX)) {
- // 1. AVX, no requirements.
+ if (CpuFeatures::IsSupported(AVX) || CpuFeatures::IsSupported(SSE4_2)) {
+ // 1. AVX, or SSE4_2 no requirements (I64x2GtS takes care of aliasing).
I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- // 2. SSE4_2, dst == lhs.
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
} else {
- // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ // 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
LiftoffRegister tmp = GetUnusedRegister(
RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
@@ -3366,89 +3313,48 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- mov(tmp.gp(), rhs.gp());
- and_(tmp.gp(), Immediate(7));
- add(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp_simd.fp());
- Packuswb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- sub(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psllw(dst.fp(), dst.fp(), tmp_simd.fp());
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), tmp.gp(), liftoff::kScratchDoubleReg,
+ tmp_simd.fp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp.gp(), mask);
- Movd(liftoff::kScratchDoubleReg, tmp.gp());
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, tmp.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(liftoff::kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
Register tmp = GetUnusedRegister(kGpReg, {}).gp();
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7;
- liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 3>(
- this, dst, lhs, rhs);
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp, mask);
- Movd(liftoff::kScratchDoubleReg, tmp);
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, tmp, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3951,19 +3857,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
- Movaps(tmp1.fp(), lhs.fp());
- Movaps(tmp2.fp(), rhs.fp());
- // Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), byte{32});
- Pmuludq(tmp1.fp(), tmp1.fp(), rhs.fp());
- // Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), byte{32});
- Pmuludq(tmp2.fp(), tmp2.fp(), lhs.fp());
- Paddq(tmp2.fp(), tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), tmp2.fp(), byte{32});
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
- this, dst, lhs, rhs);
- Paddq(dst.fp(), dst.fp(), tmp2.fp());
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
@@ -4021,28 +3915,14 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
- Andps(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), dst.fp(), byte{1});
- Andps(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Absps(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Negps(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -4104,61 +3984,12 @@ void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- minps(liftoff::kScratchDoubleReg, dst.fp());
- minps(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- minps(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minps(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ F32x4Min(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- maxps(liftoff::kScratchDoubleReg, dst.fp());
- maxps(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- maxps(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxps(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorps(dst.fp(), liftoff::kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ F32x4Max(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4177,28 +4008,14 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Abspd(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{63});
- Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), dst.fp(), byte{63});
- Xorpd(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Negpd(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -4300,26 +4117,8 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
- } else {
- movaps(liftoff::kScratchDoubleReg, src.fp());
- cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(liftoff::kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(liftoff::kScratchDoubleReg, dst.fp());
- Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4SConvertF32x4(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4787,22 +4586,14 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4836,19 +4627,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
or_(Operand(dst, 0), tmp_gp);
}
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
index d445655dcac..5b43a2a41d1 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -46,6 +46,18 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
+#elif V8_TARGET_ARCH_LOONG64
+
+// t6-t8 and s3-s4: scratch registers, s6: root
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, s0,
+ s1, s2, s5, s7, s8);
+
+// f29: zero, f30-f31: macro-assembler scratch float Registers.
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
+ f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
+
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
@@ -95,8 +107,8 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedFpRegs in frame-constants-riscv64.h
constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
- fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
+ DoubleRegister::ListOf(ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
+ fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index 19611fb0eef..cea6c9361d2 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -32,7 +32,9 @@ namespace wasm {
enum LiftoffCondition {
kEqual,
+ kEqualZero = kEqual, // When used in a unary operation.
kUnequal,
+ kNotEqualZero = kUnequal, // When used in a unary operation.
kSignedLessThan,
kSignedLessEqual,
kSignedGreaterThan,
@@ -43,8 +45,8 @@ enum LiftoffCondition {
kUnsignedGreaterEqual
};
-inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
+inline constexpr LiftoffCondition Negate(LiftoffCondition cond) {
+ switch (cond) {
case kEqual:
return kUnequal;
case kUnequal:
@@ -68,6 +70,31 @@ inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
}
}
+inline constexpr LiftoffCondition Flip(LiftoffCondition cond) {
+ switch (cond) {
+ case kEqual:
+ return kEqual;
+ case kUnequal:
+ return kUnequal;
+ case kSignedLessThan:
+ return kSignedGreaterThan;
+ case kSignedLessEqual:
+ return kSignedGreaterEqual;
+ case kSignedGreaterEqual:
+ return kSignedLessEqual;
+ case kSignedGreaterThan:
+ return kSignedLessThan;
+ case kUnsignedLessThan:
+ return kUnsignedGreaterThan;
+ case kUnsignedLessEqual:
+ return kUnsignedGreaterEqual;
+ case kUnsignedGreaterEqual:
+ return kUnsignedLessEqual;
+ case kUnsignedGreaterThan:
+ return kUnsignedLessThan;
+ }
+}
+
class LiftoffAssembler : public TurboAssembler {
public:
// Each slot in our stack frame currently has exactly 8 bytes.
@@ -668,6 +695,9 @@ class LiftoffAssembler : public TurboAssembler {
int size);
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
+ inline void LoadExternalPointer(Register dst, Register instance, int offset,
+ ExternalPointerTag tag,
+ Register isolate_root);
inline void SpillInstance(Register instance);
inline void ResetOSRTarget();
inline void FillInstanceInto(Register dst);
@@ -975,8 +1005,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg);
- inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
- Register lhs, int imm);
+ inline void emit_i32_cond_jumpi(LiftoffCondition, Label*, Register lhs,
+ int imm);
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_eqz(Register dst, Register src);
inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
@@ -1456,12 +1486,12 @@ class LiftoffAssembler : public TurboAssembler {
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
- // Set the i32 at address dst to 1 if src is a NaN.
+ // Set the i32 at address dst to a non-zero value if src is a NaN.
inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
// Set the i32 at address dst to a non-zero value if src contains a NaN.
- inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
- Register tmp_gp, DoubleRegister tmp_fp,
+ inline void emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp, LiftoffRegister tmp_s128,
ValueKind lane_kind);
////////////////////////////////////
@@ -1506,6 +1536,10 @@ class LiftoffAssembler : public TurboAssembler {
private:
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
+ // Spill one or two fp registers to get a pair of adjacent fp registers.
+ LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
+
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalKinds = 16;
union {
@@ -1521,10 +1555,6 @@ class LiftoffAssembler : public TurboAssembler {
int ool_spill_space_size_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
-
- V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
- // Spill one or two fp registers to get a pair of adjacent fp registers.
- LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
@@ -1711,6 +1741,8 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index eeed531cf83..fc5684f4273 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -306,31 +306,18 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return;
#endif
#define LIST_FEATURE(name, ...) kFeature_##name,
constexpr WasmFeatures kExperimentalFeatures{
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
- constexpr WasmFeatures kStagedFeatures{
- FOREACH_WASM_STAGING_FEATURE_FLAG(LIST_FEATURE)};
#undef LIST_FEATURE
// Bailout is allowed if any experimental feature is enabled.
if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
- // Staged features should be feature complete in Liftoff according to
- // https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
- // listed here explicitly, with a bug assigned to each of them.
-
- // TODO(7581): Fully implement reftypes in Liftoff.
- STATIC_ASSERT(kStagedFeatures.has_reftypes());
- if (reason == kRefTypes) {
- DCHECK(env->enabled_features.has_reftypes());
- return;
- }
-
// Otherwise, bailout is not allowed.
FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
}
@@ -373,6 +360,29 @@ class LiftoffCompiler {
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
+ class MostlySmallValueKindSig : public Signature<ValueKind> {
+ public:
+ MostlySmallValueKindSig(Zone* zone, const FunctionSig* sig)
+ : Signature<ValueKind>(sig->return_count(), sig->parameter_count(),
+ MakeKinds(inline_storage_, zone, sig)) {}
+
+ private:
+ static constexpr size_t kInlineStorage = 8;
+
+ static ValueKind* MakeKinds(ValueKind* storage, Zone* zone,
+ const FunctionSig* sig) {
+ const size_t size = sig->parameter_count() + sig->return_count();
+ if (V8_UNLIKELY(size > kInlineStorage)) {
+ storage = zone->NewArray<ValueKind>(size);
+ }
+ std::transform(sig->all().begin(), sig->all().end(), storage,
+ [](ValueType type) { return type.kind(); });
+ return storage;
+ }
+
+ ValueKind inline_storage_[kInlineStorage];
+ };
+
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
struct SpilledRegistersForInspection : public ZoneObject {
@@ -800,7 +810,7 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(decoder, 0);
- if (FLAG_wasm_dynamic_tiering) {
+ if (env_->dynamic_tiering == DynamicTiering::kEnabled) {
// TODO(arobin): Avoid spilling registers unconditionally.
__ SpillAllRegisters();
CODE_COMMENT("dynamic tiering");
@@ -832,8 +842,8 @@ class LiftoffCompiler {
// Check if the number of calls is a power of 2.
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
- // Unary "unequal" means "different from zero".
- __ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
+ __ emit_cond_jump(kNotEqualZero, &no_tierup, kI32,
+ old_number_of_calls.gp());
TierUpFunction(decoder);
// After the runtime call, the instance cache register is clobbered (we
// reset it already in {SpillAllRegisters} above, but then we still access
@@ -1009,13 +1019,11 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
{});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &do_break, kI32, flag);
+ __ emit_cond_jump(kNotEqualZero, &do_break, kI32, flag);
// Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kI32, flag);
+ __ emit_cond_jump(kEqualZero, &no_break, kI32, flag);
__ bind(&do_break);
EmitBreakpoint(decoder);
@@ -1254,6 +1262,46 @@ class LiftoffCompiler {
}
}
+ void JumpIfFalse(FullDecoder* decoder, Label* false_dst) {
+ LiftoffCondition cond =
+ test_and_reset_outstanding_op(kExprI32Eqz) ? kNotEqualZero : kEqualZero;
+
+ if (!has_outstanding_op()) {
+ // Unary comparison.
+ Register value = __ PopToRegister().gp();
+ __ emit_cond_jump(cond, false_dst, kI32, value);
+ return;
+ }
+
+ // Binary comparison of i32 values.
+ cond = Negate(GetCompareCondition(outstanding_op_));
+ outstanding_op_ = kNoOutstandingOp;
+ LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
+ if (rhs_slot.is_const()) {
+ // Compare to a constant.
+ int32_t rhs_imm = rhs_slot.i32_const();
+ __ cache_state()->stack_state.pop_back();
+ Register lhs = __ PopToRegister().gp();
+ __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm);
+ return;
+ }
+
+ Register rhs = __ PopToRegister().gp();
+ LiftoffAssembler::VarState lhs_slot = __ cache_state()->stack_state.back();
+ if (lhs_slot.is_const()) {
+ // Compare a constant to an arbitrary value.
+ int32_t lhs_imm = lhs_slot.i32_const();
+ __ cache_state()->stack_state.pop_back();
+ // Flip the condition, because {lhs} and {rhs} are swapped.
+ __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm);
+ return;
+ }
+
+ // Compare two arbitrary values.
+ Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
+ __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs);
+ }
+
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -1261,25 +1309,8 @@ class LiftoffCompiler {
// Allocate the else state.
if_block->else_state = std::make_unique<ElseState>();
- // Test the condition, jump to else if zero.
- Register value = __ PopToRegister().gp();
- if (!has_outstanding_op()) {
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
- } else if (outstanding_op_ == kExprI32Eqz) {
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, if_block->else_state->label.get(), kI32,
- value);
- outstanding_op_ = kNoOutstandingOp;
- } else {
- // Otherwise, it's an i32 compare opcode.
- LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
- Register rhs = value;
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, if_block->else_state->label.get(), kI32, lhs,
- rhs);
- outstanding_op_ = kNoOutstandingOp;
- }
+ // Test the condition on the value stack, jump to else if zero.
+ JumpIfFalse(decoder, if_block->else_state->label.get());
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
@@ -2313,7 +2344,7 @@ class LiftoffCompiler {
__ PushRegister(kind, value);
}
- void GlobalSet(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value&,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
@@ -2493,23 +2524,9 @@ class LiftoffCompiler {
}
Label cont_false;
- Register value = __ PopToRegister().gp();
- if (!has_outstanding_op()) {
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &cont_false, kI32, value);
- } else if (outstanding_op_ == kExprI32Eqz) {
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &cont_false, kI32, value);
- outstanding_op_ = kNoOutstandingOp;
- } else {
- // Otherwise, it's an i32 compare opcode.
- LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
- Register rhs = value;
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
- outstanding_op_ = kNoOutstandingOp;
- }
+ // Test the condition on the value stack, jump to {cont_false} if zero.
+ JumpIfFalse(decoder, &cont_false);
BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
@@ -2693,8 +2710,7 @@ class LiftoffCompiler {
__ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
+ __ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
@@ -2757,14 +2773,17 @@ class LiftoffCompiler {
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned;
+ if (index != no_reg) pinned.set(index);
// Get one register for computing the effective offset (offset + index).
LiftoffRegister effective_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
DCHECK_GE(kMaxUInt32, offset);
__ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
- __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ if (index != no_reg) {
+ // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ }
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -2808,30 +2827,6 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
- Register AddMemoryMasking(Register index, uintptr_t* offset,
- LiftoffRegList* pinned) {
- if (!FLAG_untrusted_code_mitigations ||
- env_->bounds_checks == kTrapHandler) {
- return index;
- }
- CODE_COMMENT("mask memory index");
- // Make sure that we can overwrite {index}.
- if (__ cache_state()->is_used(LiftoffRegister(index))) {
- Register old_index = index;
- pinned->clear(LiftoffRegister{old_index});
- index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
- if (index != old_index) {
- __ Move(index, old_index, kPointerKind);
- }
- }
- Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
- if (*offset) __ emit_ptrsize_addi(index, index, *offset);
- __ emit_ptrsize_and(index, index, tmp);
- *offset = 0;
- return index;
- }
-
bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
int access_size, uintptr_t* offset) {
if (!index_slot.is_const()) return false;
@@ -2892,7 +2887,6 @@ class LiftoffCompiler {
CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -2937,7 +2931,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -2977,7 +2970,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load lane");
Register addr = GetMemoryStart(pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -3023,7 +3015,6 @@ class LiftoffCompiler {
if (index == no_reg) return;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
@@ -3058,7 +3049,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store lane to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
@@ -4186,8 +4176,9 @@ class LiftoffCompiler {
Load64BitExceptionValue(value, values_array, index, pinned);
break;
case kF64: {
- RegClass rc = reg_class_for(kI64);
- LiftoffRegister tmp_reg = pinned.set(__ GetUnusedRegister(rc, pinned));
+ RegClass rc_i64 = reg_class_for(kI64);
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(rc_i64, pinned));
Load64BitExceptionValue(tmp_reg, values_array, index, pinned);
__ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg,
nullptr);
@@ -4340,7 +4331,6 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
@@ -4363,7 +4353,6 @@ class LiftoffCompiler {
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
@@ -4411,7 +4400,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
(asm_.*emit_fn)(addr, index, offset, value, result, type);
@@ -4434,7 +4422,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
@@ -4467,7 +4454,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
@@ -4514,7 +4500,6 @@ class LiftoffCompiler {
pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4531,8 +4516,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
- // We have to set the correct register for the index. It may have changed
- // above in {AddMemoryMasking}.
+ // We have to set the correct register for the index.
index.MakeRegister(LiftoffRegister(index_plus_offset));
static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
@@ -4562,7 +4546,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4914,7 +4897,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back(2);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- __ SmiUntag(kReturnRegister0);
+ __ SmiToInt32(kReturnRegister0);
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
@@ -5055,7 +5038,7 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
- static_cast<int>(wasm::kV8MaxWasmArrayLength));
+ WasmArray::MaxLength(imm.array_type));
}
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size = element_size_bytes(elem_kind);
@@ -5184,6 +5167,8 @@ class LiftoffCompiler {
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
+ // TODO(7748): Unify implementation with TF: Implement this with
+ // GenerateCCall. Remove runtime function and builtin in wasm.tq.
CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
// Builtin parameter order:
@@ -5200,7 +5185,50 @@ class LiftoffCompiler {
void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
- UNREACHABLE();
+ ValueKind rtt_kind = rtt.type.kind();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ // Allocate the array.
+ {
+ LiftoffAssembler::VarState rtt_var =
+ __ cache_state()->stack_state.end()[-1];
+
+ LiftoffRegList pinned;
+
+ LiftoffRegister elem_size_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(elem_size_reg, WasmValue(element_size_bytes(elem_kind)));
+ LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
+
+ LiftoffRegister length_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(length_reg,
+ WasmValue(static_cast<int32_t>(elements.size())));
+ LiftoffAssembler::VarState length_var(kI32, length_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmAllocateArray_Uninitialized,
+ MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
+ {rtt_var, length_var, elem_size_var},
+ decoder->position());
+ // Drop the RTT.
+ __ DropValues(1);
+ }
+
+ // Initialize the array with stack arguments.
+ LiftoffRegister array(kReturnRegister0);
+ if (!CheckSupportedType(decoder, elem_kind, "array.init")) return;
+ for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(array);
+ LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister offset_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(offset_reg, WasmValue(i << element_size_log2(elem_kind)));
+ StoreObjectField(array.gp(), offset_reg.gp(),
+ wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
+ element, pinned, elem_kind);
+ }
+
+ // Push the array onto the stack.
+ __ PushRegister(kRef, array);
}
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
@@ -5648,20 +5676,11 @@ class LiftoffCompiler {
}
private:
- ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
- ValueKind* reps =
- zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
- ValueKind* ptr = reps;
- for (ValueType type : sig->all()) *ptr++ = type.kind();
- return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
- reps);
- }
-
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5691,7 +5710,7 @@ class LiftoffCompiler {
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
- __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5701,12 +5720,12 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target);
- FinishCall(decoder, sig, call_descriptor);
+ __ CallIndirect(&sig, call_descriptor, target);
+ FinishCall(decoder, &sig, call_descriptor);
}
} else {
// A direct call within this module just gets the current instance.
- __ PrepareCall(sig, call_descriptor);
+ __ PrepareCall(&sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
if (tail_call) {
@@ -5720,7 +5739,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallNativeWasmCode(addr);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
}
@@ -5728,8 +5747,8 @@ class LiftoffCompiler {
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5778,28 +5797,6 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
- // Mask the index to prevent SSCA.
- if (FLAG_untrusted_code_mitigations) {
- CODE_COMMENT("Mask indirect call index");
- // mask = ((index - size) & ~index) >> 31
- // Reuse allocated registers; note: size is still stored in {tmp_const}.
- Register diff = table;
- Register neg_index = tmp_const;
- Register mask = scratch;
- // 1) diff = index - size
- __ emit_i32_sub(diff, index, tmp_const);
- // 2) neg_index = ~index
- __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
- __ emit_i32_xor(neg_index, neg_index, index);
- // 3) mask = diff & neg_index
- __ emit_i32_and(mask, diff, neg_index);
- // 4) mask = mask >> 31
- __ emit_i32_sari(mask, mask, 31);
-
- // Apply mask.
- __ emit_i32_and(index, index, mask);
- }
-
CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
if (imm.table_imm.index == 0) {
@@ -5875,7 +5872,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch;
- __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5885,16 +5882,16 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target);
+ __ CallIndirect(&sig, call_descriptor, target);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
void CallRef(FullDecoder* decoder, ValueType func_ref_type,
const FunctionSig* type_sig, TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, type_sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
compiler::CallDescriptor* call_descriptor =
@@ -5965,11 +5962,9 @@ class LiftoffCompiler {
#ifdef V8_HEAP_SANDBOX
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
- __ LoadExternalPointerField(
- target.gp(),
- FieldOperand(func_data.gp(), WasmFunctionData::kForeignAddressOffset),
- kForeignForeignAddressTag, temp.gp(),
- TurboAssembler::IsolateRootLocation::kInScratchRegister);
+ __ LoadExternalPointer(target.gp(), func_data.gp(),
+ WasmFunctionData::kForeignAddressOffset,
+ kForeignForeignAddressTag, temp.gp());
#else
__ Load(
target, func_data.gp(), no_reg,
@@ -5999,7 +5994,7 @@ class LiftoffCompiler {
// is in {instance}.
Register target_reg = target.gp();
Register instance_reg = instance.gp();
- __ PrepareCall(sig, call_descriptor, &target_reg, &instance_reg);
+ __ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -6009,9 +6004,9 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target_reg);
+ __ CallIndirect(&sig, call_descriptor, target_reg);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
@@ -6151,14 +6146,61 @@ class LiftoffCompiler {
ValueKind lane_kind) {
RegClass rc = reg_class_for(kS128);
LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
+ LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned));
LiftoffRegister nondeterminism_addr =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
nondeterminism_addr,
WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
- __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
- tmp_fp.fp(), lane_kind);
+ __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(),
+ tmp_s128, lane_kind);
+ }
+
+ bool has_outstanding_op() const {
+ return outstanding_op_ != kNoOutstandingOp;
+ }
+
+ bool test_and_reset_outstanding_op(WasmOpcode opcode) {
+ DCHECK_NE(kNoOutstandingOp, opcode);
+ if (outstanding_op_ != opcode) return false;
+ outstanding_op_ = kNoOutstandingOp;
+ return true;
+ }
+
+ void TraceCacheState(FullDecoder* decoder) const {
+ if (!FLAG_trace_liftoff) return;
+ StdoutStream os;
+ for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
+ --control_depth) {
+ auto* cache_state =
+ control_depth == -1 ? __ cache_state()
+ : &decoder->control_at(control_depth)
+ ->label_state;
+ os << PrintCollection(cache_state->stack_state);
+ if (control_depth != -1) PrintF("; ");
+ }
+ os << "\n";
+ }
+
+ void DefineSafepoint() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepoint(safepoint);
+ }
+
+ void DefineSafepointWithCalleeSavedRegisters() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
+ }
+
+ Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ cache_state()->TrySetCachedInstanceRegister(
+ pinned | LiftoffRegList::ForRegs(fallback));
+ if (instance == no_reg) instance = fallback;
+ __ LoadInstanceFromFrame(instance);
+ }
+ return instance;
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
@@ -6223,46 +6265,6 @@ class LiftoffCompiler {
int32_t* max_steps_;
int32_t* nondeterminism_;
- bool has_outstanding_op() const {
- return outstanding_op_ != kNoOutstandingOp;
- }
-
- void TraceCacheState(FullDecoder* decoder) const {
- if (!FLAG_trace_liftoff) return;
- StdoutStream os;
- for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
- --control_depth) {
- auto* cache_state =
- control_depth == -1 ? __ cache_state()
- : &decoder->control_at(control_depth)
- ->label_state;
- os << PrintCollection(cache_state->stack_state);
- if (control_depth != -1) PrintF("; ");
- }
- os << "\n";
- }
-
- void DefineSafepoint() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
- __ cache_state()->DefineSafepoint(safepoint);
- }
-
- void DefineSafepointWithCalleeSavedRegisters() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
- __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
- }
-
- Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
- Register instance = __ cache_state()->cached_instance;
- if (instance == no_reg) {
- instance = __ cache_state()->TrySetCachedInstanceRegister(
- pinned | LiftoffRegList::ForRegs(fallback));
- if (instance == no_reg) instance = fallback;
- __ LoadInstanceFromFrame(instance);
- }
- return instance;
- }
-
DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
diff --git a/chromium/v8/src/wasm/baseline/liftoff-register.h b/chromium/v8/src/wasm/baseline/liftoff-register.h
index 63ac2acf8bc..74eb10ca341 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-register.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-register.h
@@ -313,9 +313,9 @@ class LiftoffRegister {
}
private:
- storage_t code_;
-
explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
+
+ storage_t code_;
};
ASSERT_TRIVIALLY_COPYABLE(LiftoffRegister);
@@ -467,10 +467,10 @@ class LiftoffRegList {
}
private:
- storage_t regs_ = 0;
-
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
+
+ storage_t regs_ = 0;
};
ASSERT_TRIVIALLY_COPYABLE(LiftoffRegList);
diff --git a/chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
new file mode 100644
index 00000000000..f22e0136014
--- /dev/null
+++ b/chromium/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -0,0 +1,2817 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+template <typename T>
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, T offset_imm) {
+ if (is_int32(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->add_d(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, Operand(offset_imm));
+ assm->add_d(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->add_d(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->Ld_w(dst.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->Ld_d(dst.gp(), src);
+ break;
+ case kF32:
+ assm->Fld_s(dst.fp(), src);
+ break;
+ case kF64:
+ assm->Fld_d(dst.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueKind kind) {
+ MemOperand dst(base, offset);
+ switch (kind) {
+ case kI32:
+ assm->St_w(src.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->St_d(src.gp(), dst);
+ break;
+ case kF32:
+ assm->Fst_s(src.fp(), dst);
+ break;
+ case kF64:
+ assm->Fst_d(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->St_w(reg.gp(), MemOperand(sp, 0));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ assm->Push(reg.gp());
+ break;
+ case kF32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_s(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kF64:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_d(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ addi_d(sp, sp, 0);
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld_d(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld_d(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld_d(scratch, MemOperand(sp, i * 8));
+ St_d(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ addi_d(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Add_d(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Add_d(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset;
+ CHECK(is_int26(imm32));
+ patching_assembler.b(imm32 >> 2);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld_d(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld_d(stack_limit, MemOperand(stack_limit, 0));
+ Add_d(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Add_d(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Add_d(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 3 * kInstrSize;
+ imm32 = func_start_offset - pc_offset();
+ CHECK(is_int26(imm32));
+ b(imm32 >> 2);
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference(kind);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Ld_b(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Ld_w(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld_d(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int32_t offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld_d(dst, MemOperand(instance, offset));
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ St_d(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::ResetOSRTarget() {}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ UseScratchRegisterScope temps(this);
+ Operand offset_op =
+ offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.Acquire();
+ Add_d(effective_offset, offset_reg, Operand(offset_imm));
+ offset_op = Operand(effective_offset);
+ }
+ if (offset_op.is_reg()) {
+ St_d(src.gp(), MemOperand(dst_addr, offset_op.rm()));
+ } else {
+ St_d(src.gp(), MemOperand(dst_addr, offset_imm));
+ }
+
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem,
+ bool i64_offset) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ld_bu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Ld_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ld_hu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ld_h(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ld_wu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ld_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Ld_d(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Fld_s(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Fld_d(dst.fp(), src_op);
+ break;
+ case LoadType::kS128Load:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ St_b(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::St_h(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::St_w(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::St_d(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Fst_s(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Fst_d(src.fp(), dst_op);
+ break;
+ case StoreType::kS128Store:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { dbar(0); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, kind);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, kind);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ St_w(reg.gp(), dst);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ St_d(reg.gp(), dst);
+ break;
+ case kF32:
+ Fst_s(reg.fp(), dst);
+ break;
+ case kF64:
+ TurboAssembler::Fst_d(reg.fp(), dst);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ St_w(tmp.gp(), dst);
+ break;
+ }
+ case kI64:
+ case kRef:
+ case kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ St_d(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ Ld_w(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ // TODO(LOONG_dev): LOONG64 Check, MIPS64 dosn't need, ARM64/LOONG64 need?
+ case kRtt:
+ case kRttWithDepth:
+ Ld_d(reg.gp(), src);
+ break;
+ case kF32:
+ Fld_s(reg.fp(), src);
+ break;
+ case kF64:
+ TurboAssembler::Fld_d(reg.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ St_d(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ St_w(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add_d(a0, fp, Operand(-start - size));
+ Add_d(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ St_d(zero_reg, MemOperand(a0, 0));
+ addi_d(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz_d(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz_d(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt_d(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Div_wu(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_wu(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, add_w)
+I32_BINOP(sub, sub_w)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add_w)
+I32_BINOP_I(sub, Sub_w)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz_w(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz_w(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt_w(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction, instruction1) \
+ I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction1(dst, src, amount & 0x1f); \
+ }
+
+I32_SHIFTOP_I(shl, sll_w, slli_w)
+I32_SHIFTOP_I(sar, sra_w, srai_w)
+I32_SHIFTOP_I(shr, srl_w, srli_w)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
+}
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(
+ kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, Add_d)
+I64_BINOP(sub, Sub_d)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction, instructioni) \
+ I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ instructioni(dst.gp(), src.gp(), amount & 63); \
+ }
+
+I64_SHIFTOP_I(shl, sll_d, slli_d)
+I64_SHIFTOP_I(sar, sra_d, srai_d)
+I64_SHIFTOP_I(shr, srl_d, srli_d)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ bstrpick_d(dst, src, 31, 0);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI32SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_w_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_w(kScratchDoubleReg, dst.gp());
+ ffint_s_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ fcvt_s_d(converted_back.fp(), converted_back.fp());
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_w_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ ffint_d_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ slli_w(dst.gp(), src.gp(), 0);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI64SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_l_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_s_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF32: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_l_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_d_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF64: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64ReinterpretF64:
+ movfr2gr_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ movgr2fr_d(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i32");
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueKind kind,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(kind == kI32 || kind == kI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference(kind) &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltui(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ sltui(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs.gp() || dst == rhs.gp()) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+namespace liftoff {
+
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+ bool* predicate) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kUnequal:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF32(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f32.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF32(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF64(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f64.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF64(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueKind kind) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ bailout(kSimd, "emit_" #name1 "_extadd_pairwise_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "emit_i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_alltrue");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (FLAG_debug_code) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kSystemPointerSize;
+ addi_d(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ St_d(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ unsigned slot_size = 8;
+ addi_d(sp, sp, -(num_fp_regs * slot_size));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += slot_size;
+ }
+ DCHECK_EQ(offset, num_fp_regs * slot_size);
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += 8;
+ }
+ if (fp_offset) addi_d(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld_d(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ addi_d(sp, sp, gp_offset);
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ Drop(static_cast<int>(num_stack_slots));
+ Ret();
+}
+
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueKind out_argument_kind, int stack_bytes,
+ ExternalReference ext_ref) {
+ addi_d(sp, sp, -stack_bytes);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On LoongArch, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mov(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
+ }
+
+ addi_d(sp, sp, stack_bytes);
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ addi_d(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addi_d(sp, sp, size);
+}
+
+void LiftoffAssembler::MaybeOSR() {}
+
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label not_nan;
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ BranchFalseShortF(&not_nan);
+ li(scratch, 1);
+ St_w(scratch, MemOperand(dst, 0));
+ bind(&not_nan);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp,
+ LiftoffRegister tmp_s128,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->Push(kScratchReg);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ }
+ break;
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ liftoff::push(asm_, src.reg(), src.kind());
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->Push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4ab036da8e6..35eabecbf04 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -3067,20 +3067,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sw(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ sw(scratch, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 0a23c190e92..e47da841485 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -3235,22 +3235,35 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sd(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ Sw(dst, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (lane_kind == kF32) {
+ fcun_w(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ fcun_d(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ }
+ BranchMSA(&not_nan, MSA_BRANCH_V, all_zero, tmp_s128.fp().toW(),
+ USE_DELAY_SLOT);
+ li(tmp_gp, 1);
+ Sw(tmp_gp, MemOperand(dst));
+ bind(&not_nan);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 8e3808d2599..938fa41ea9e 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -41,7 +42,8 @@ namespace liftoff {
//
//
-constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kInstanceOffset =
+ (FLAG_enable_embedded_constant_pool ? 3 : 2) * kSystemPointerSize;
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
@@ -106,33 +108,105 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
- bailout(kUnsupportedArchitecture, "PrepareTailCall");
+ Register scratch = ip;
+ // Push the return address and frame pointer to complete the stack frame.
+ AddS64(sp, sp, Operand(-2 * kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp, kSystemPointerSize), r0);
+ StoreU64(scratch, MemOperand(sp, kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp), r0);
+ StoreU64(scratch, MemOperand(sp), r0);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize), r0);
+ StoreU64(scratch,
+ MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize), r0);
+ }
+
+ // Set the new stack and frame pointer.
+ AddS64(sp, fp, Operand(-stack_param_delta * kSystemPointerSize), r0);
+ Pop(r0, fp);
+ mtlr(r0);
}
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset,
- SafepointTableBuilder*) {
- int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
-
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int frame_size =
+ GetTotalFrameSize() -
+ (FLAG_enable_embedded_constant_pool ? 3 : 2) * kSystemPointerSize;
+
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ patching_assembler.addi(sp, sp, Operand(-frame_size));
return;
}
-#endif
- if (!is_int16(-frame_size)) {
- bailout(kOtherReason, "PPC subi overflow");
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int jump_offset = pc_offset() - offset;
+ if (!is_int26(jump_offset)) {
+ bailout(kUnsupportedArchitecture, "branch offset overflow");
return;
}
- Assembler patching_assembler(
- AssemblerOptions{},
- ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
- patching_assembler.addi(sp, sp, Operand(-frame_size));
+ patching_assembler.b(jump_offset, LeaveLK);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = ip;
+ LoadU64(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset),
+ r0);
+ LoadU64(stack_limit, MemOperand(stack_limit), r0);
+ AddS64(stack_limit, stack_limit, Operand(frame_size), r0);
+ CmpU64(sp, stack_limit);
+ bge(&continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ SubS64(sp, sp, Operand(frame_size), r0);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ jump_offset = offset - pc_offset() + kInstrSize;
+ if (!is_int26(jump_offset)) {
+ bailout(kUnsupportedArchitecture, "branch offset overflow");
+ return;
+ }
+ b(jump_offset, LeaveLK);
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -169,14 +243,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kF32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
- MovIntToFloat(reg.fp(), scratch);
+ mov(scratch, Operand(value.to_f32_boxed().get_bits()));
+ MovIntToFloat(reg.fp(), scratch, ip);
break;
}
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f64_boxed().get_scalar()));
+ mov(scratch, Operand(value.to_f64_boxed().get_bits()));
MovInt64ToDouble(reg.fp(), scratch);
break;
}
@@ -412,43 +486,124 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
+ lwsync();
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ lwsync();
+ Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true);
+ sync();
}
+#ifdef V8_TARGET_BIG_ENDIAN
+constexpr bool is_be = true;
+#else
+constexpr bool is_be = false;
+#endif
+
+#define ATOMIC_OP(instr) \
+ { \
+ Register offset = r0; \
+ if (offset_imm != 0) { \
+ mov(ip, Operand(offset_imm)); \
+ if (offset_reg != no_reg) { \
+ add(ip, ip, offset_reg); \
+ } \
+ offset = ip; \
+ } else { \
+ if (offset_reg != no_reg) { \
+ offset = offset_reg; \
+ } \
+ } \
+ \
+ MemOperand dst = MemOperand(offset, dst_addr); \
+ \
+ switch (type.value()) { \
+ case StoreType::kI32Store8: \
+ case StoreType::kI64Store8: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ instr(dst, lhs, rhs); \
+ }; \
+ AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store16: \
+ case StoreType::kI64Store16: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU16(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU16(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store: \
+ case StoreType::kI64Store32: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU32(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU32(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI64Store: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU64(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU64(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ } \
+ }
+
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ ATOMIC_OP(add);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ ATOMIC_OP(sub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ ATOMIC_OP(and_);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ ATOMIC_OP(orx);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ ATOMIC_OP(xor_);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
@@ -594,16 +749,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
switch (kind) {
case kI32:
case kF32:
- LoadU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
- StoreU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ LoadU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ StoreU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
break;
case kI64:
case kOptRef:
case kRef:
case kRtt:
case kF64:
- LoadU64(ip, liftoff::GetStackSlot(dst_offset), r0);
- StoreU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ LoadU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ StoreU64(ip, liftoff::GetStackSlot(dst_offset), r0);
break;
case kS128:
bailout(kSimd, "simd op");
@@ -750,20 +905,25 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
#define SIGN_EXT(r) extsw(r, r)
#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
#define REGISTER_AND_WITH_1F \
([&](Register rhs) { \
andi(r0, rhs, Operand(31)); \
return r0; \
})
+#define REGISTER_AND_WITH_3F \
+ ([&](Register rhs) { \
+ andi(r0, rhs, Operand(63)); \
+ return r0; \
+ })
+
#define LFR_TO_REG(reg) reg.gp()
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
#define UNOP_LIST(V) \
- V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
- void) \
- V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
- void) \
+ V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
void) \
V(f32_floor, frim, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
@@ -772,16 +932,12 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
true, bool) \
V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
true, bool) \
- V(f32_nearest_int, frin, DoubleRegister, DoubleRegister, , , \
- ROUND_F64_TO_F32, true, bool) \
V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
- V(f64_nearest_int, frin, DoubleRegister, DoubleRegister, , , USE, true, \
- bool) \
V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
@@ -816,89 +972,89 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
-#define BINOP_LIST(V) \
- V(f32_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
- , ROUND_F64_TO_F32, , void) \
- V(f64_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
- , USE, , void) \
- V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
- V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
- V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
- V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
- V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
- V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
- V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
- V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
- V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
- V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
- V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
- V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
- V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+#define BINOP_LIST(V) \
+ V(f32_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
+ , , , ROUND_F64_TO_F32, , void) \
+ V(f64_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
+ , , , USE, , void) \
+ V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
+ V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
@@ -921,61 +1077,331 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont, done, trap_div_unrepresentable;
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- bailout(kUnsupportedArchitecture, "emit_type_conversion");
- return true;
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ extsw(dst.gp(), src.gp());
+ return true;
+ case kExprI64SConvertI32:
+ extsw(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ ZeroExtWord32(dst.gp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ frsp(dst.fp(), src.fp());
+ return true;
+ case kExprF64ConvertF32:
+ fmr(dst.fp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ ConvertIntToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32UConvertI32: {
+ ConvertUnsignedIntToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64SConvertI32: {
+ ConvertIntToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64UConvertI32: {
+ ConvertUnsignedIntToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64SConvertI64: {
+ ConvertInt64ToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64UConvertI64: {
+ ConvertUnsignedInt64ToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32SConvertI64: {
+ ConvertInt64ToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32UConvertI64: {
+ ConvertUnsignedInt64ToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprI32SConvertF64:
+ case kExprI32SConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctiwz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI32UConvertF64:
+ case kExprI32UConvertF32: {
+ ConvertDoubleToUnsignedInt64(src.fp(), r0, kScratchDoubleReg,
+ kRoundToZero);
+ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ boverflow(trap, cr7);
+ ZeroExtWord32(dst.gp(), r0);
+ CmpU64(dst.gp(), r0);
+ bne(trap);
+ return true;
+ }
+ case kExprI64SConvertF64:
+ case kExprI64SConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctidz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI64UConvertF64:
+ case kExprI64UConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctiduz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI32SConvertSatF64:
+ case kExprI32SConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiwz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32UConvertSatF64:
+ case kExprI32UConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiwuz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64SConvertSatF64:
+ case kExprI64SConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctidz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64UConvertSatF64:
+ case kExprI64UConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiduz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32ReinterpretF32: {
+ MovFloatToInt(dst.gp(), src.fp(), kScratchDoubleReg);
+ return true;
+ }
+ case kExprI64ReinterpretF64: {
+ MovDoubleToInt64(dst.gp(), src.fp());
+ return true;
+ }
+ case kExprF32ReinterpretI32: {
+ MovIntToFloat(dst.fp(), src.gp(), r0);
+ return true;
+ }
+ case kExprF64ReinterpretI64: {
+ MovInt64ToDouble(dst.fp(), src.gp());
+ return true;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
@@ -1025,8 +1451,13 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
Condition cond = liftoff::ToCondition(liftoff_cond);
- CmpS32(lhs, Operand(imm), r0);
+ if (use_signed) {
+ CmpS32(lhs, Operand(imm), r0);
+ } else {
+ CmpU32(lhs, Operand(imm), r0);
+ }
b(cond, label);
}
@@ -1083,11 +1514,19 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- fcmpu(lhs, rhs);
- Label done;
- mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ fcmpu(lhs, rhs, cr0);
+ Label nan, done;
+ bunordered(&nan, cr0);
mov(dst, Operand::Zero());
+ b(NegateCondition(liftoff::ToCondition(liftoff_cond)), &done, cr0);
+ mov(dst, Operand(1));
+ b(&done);
+ bind(&nan);
+ if (liftoff_cond == kUnequal) {
+ mov(dst, Operand(1));
+ } else {
+ mov(dst, Operand::Zero());
+ }
bind(&done);
}
@@ -1114,7 +1553,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj, r0);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target, cr0); // branch if SMI
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
@@ -2254,30 +2695,46 @@ void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- bailout(kUnsupportedArchitecture, "StackCheck");
+ LoadU64(limit_address, MemOperand(limit_address), r0);
+ CmpU64(sp, limit_address);
+ ble(ool_code);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
+ PrepareCallCFunction(0, 0, ip);
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- bailout(kUnsupportedArchitecture, "AssertUnreachable");
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PushRegisters");
+ MultiPush(regs.GetGpList());
+ MultiPushDoubles(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PopRegisters");
+ MultiPopDoubles(regs.GetFpList());
+ MultiPop(regs.GetGpList());
}
void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
LiftoffRegList all_spills,
LiftoffRegList ref_spills,
int spill_offset) {
- bailout(kRefTypes, "RecordSpillsInSafepoint");
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
@@ -2289,37 +2746,120 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* rets,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
- bailout(kUnsupportedArchitecture, "CallC");
+ int total_size = RoundUp(stack_bytes, kSystemPointerSize);
+
+ int size = total_size;
+ constexpr int kStackPageSize = 4 * KB;
+
+ // Reserve space in the stack.
+ while (size > kStackPageSize) {
+ SubS64(sp, sp, Operand(kStackPageSize), r0);
+ StoreU64(r0, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+
+ SubS64(sp, sp, Operand(size), r0);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
+ StoreU32(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kI64:
+ StoreU64(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF32:
+ StoreF32(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF64:
+ StoreF64(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += element_size_bytes(param_kind);
+ }
+
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mr(r3, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, r0);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r3;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ switch (out_argument_kind) {
+ case kI32:
+ LoadS32(result_reg->gp(), MemOperand(sp));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(result_reg->gp(), MemOperand(sp));
+ break;
+ case kF32:
+ LoadF32(result_reg->fp(), MemOperand(sp));
+ break;
+ case kF64:
+ LoadF64(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ AddS64(sp, sp, Operand(total_size), r0);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
+ Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- bailout(kUnsupportedArchitecture, "CallIndirect");
+ DCHECK(target != no_reg);
+ Call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
- bailout(kUnsupportedArchitecture, "TailCallIndirect");
+ DCHECK(target != no_reg);
+ Jump(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- bailout(kUnsupportedArchitecture, "CallRuntimeStub");
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- bailout(kUnsupportedArchitecture, "AllocateStackSlot");
+ SubS64(sp, sp, Operand(size), r0);
+ mr(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
+ AddS64(sp, sp, Operand(size));
}
void LiftoffAssembler::MaybeOSR() {}
@@ -2329,15 +2869,114 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
- asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.kind()) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI64: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->Push(scratch);
+ break;
+ }
+ case kF32: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->LoadF32(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize));
+ asm_->StoreF32(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
+ asm_->LoadF64(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ switch (src.kind()) {
+ case kI64:
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ asm_->push(src.reg().gp());
+ break;
+ case kF32:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF32(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kF64:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+
+ switch (src.kind()) {
+ case kI32:
+ asm_->mov(scratch, Operand(src.i32_const()));
+ break;
+ case kI64:
+ asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index fef59471c1d..616f10fa8f6 100644
--- a/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -79,16 +79,16 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
if (offset == no_reg) return MemOperand(addr, offset_imm32);
- assm->Add64(kScratchReg, addr, offset);
- return MemOperand(kScratchReg, offset_imm32);
+ assm->Add64(kScratchReg2, addr, offset);
+ return MemOperand(kScratchReg2, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
- assm->li(kScratchReg, offset_imm);
- assm->Add64(kScratchReg, kScratchReg, addr);
+ assm->li(kScratchReg2, offset_imm);
+ assm->Add64(kScratchReg2, kScratchReg2, addr);
if (offset != no_reg) {
- assm->Add64(kScratchReg, kScratchReg, offset);
+ assm->Add64(kScratchReg2, kScratchReg2, offset);
}
- return MemOperand(kScratchReg, 0);
+ return MemOperand(kScratchReg2, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
@@ -128,10 +128,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usd(src.gp(), dst);
break;
case kF32:
- assm->UStoreFloat(src.fp(), dst);
+ assm->UStoreFloat(src.fp(), dst, kScratchReg);
break;
case kF64:
- assm->UStoreDouble(src.fp(), dst);
+ assm->UStoreDouble(src.fp(), dst, kScratchReg);
break;
default:
UNREACHABLE();
@@ -335,7 +335,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// space if we first allocate the frame and then do the stack check (we will
// need some remaining stack space for throwing the exception). That's why we
// check the available stack space before we allocate the frame. To do this we
- // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // replace the {__ Add64(sp, sp, -frame_size)} with a jump to OOL code that
// does this "extended stack check".
//
// The OOL code can simply be generated here with the normal assembler,
@@ -376,7 +376,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
Add64(sp, sp, Operand(-frame_size));
// Jump back to the start of the function, from {pc_offset()} to
- // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // right after the reserved space for the {__ Add64(sp, sp, -framesize)}
// (which is a Branch now).
int func_start_offset = offset + 2 * kInstrSize;
imm32 = func_start_offset - pc_offset();
@@ -552,11 +552,20 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
TurboAssembler::Uld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ TurboAssembler::ULoadFloat(dst.fp(), src_op, kScratchReg);
break;
case LoadType::kF64Load:
- TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ TurboAssembler::ULoadDouble(dst.fp(), src_op, kScratchReg);
break;
+ case LoadType::kS128Load: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
+ if (src_op.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src_op.rm(), src_op.offset());
+ }
+ vl(dst.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -607,11 +616,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ TurboAssembler::UStoreFloat(src.fp(), dst_op, kScratchReg);
break;
case StoreType::kF64Store:
- TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ TurboAssembler::UStoreDouble(src.fp(), dst_op, kScratchReg);
break;
+ case StoreType::kS128Store: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
+ if (dst_op.offset() != 0) {
+ Add64(kScratchReg, dst_op.rm(), dst_op.offset());
+ }
+ vs(src.fp().toV(), dst_reg, 0, VSew::E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -747,24 +765,26 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U:
+ fence(PSR | PSW, PSR | PSW);
lbu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
+ fence(PSR | PSW, PSR | PSW);
lhu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load:
- lr_w(true, true, dst.gp(), src_reg);
- return;
case LoadType::kI64Load32U:
- lr_w(true, true, dst.gp(), src_reg);
- slli(dst.gp(), dst.gp(), 32);
- srli(dst.gp(), dst.gp(), 32);
+ fence(PSR | PSW, PSR | PSW);
+ lw(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI64Load:
- lr_d(true, true, dst.gp(), src_reg);
+ fence(PSR | PSW, PSR | PSW);
+ ld(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
default:
UNREACHABLE();
@@ -780,22 +800,22 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- sync();
+ fence(PSR | PSW, PSW);
sb(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- sync();
+ fence(PSR | PSW, PSW);
sh(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- sc_w(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sw(src.gp(), dst_reg, 0);
return;
case StoreType::kI64Store:
- sc_d(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sd(src.gp(), dst_reg, 0);
return;
default:
UNREACHABLE();
@@ -948,7 +968,11 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
- TurboAssembler::Move(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
@@ -971,9 +995,15 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst);
break;
- case kS128:
- bailout(kSimd, "Spill S128");
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
+ if (dst.offset() != 0) {
+ Add64(kScratchReg, dst.rm(), dst.offset());
+ }
+ vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
break;
+ }
default:
UNREACHABLE();
}
@@ -1021,6 +1051,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kF64:
TurboAssembler::LoadDouble(reg.fp(), src);
break;
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ }
+ vl(reg.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1072,7 +1111,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true;
}
@@ -1154,7 +1193,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src);
+ TurboAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -1663,7 +1702,37 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "emit_i8x16_shuffle");
+ VRegister dst_v = dst.fp().toV();
+ VRegister lhs_v = lhs.fp().toV();
+ VRegister rhs_v = rhs.fp().toV();
+
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(shuffle));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(shuffle)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ VU.set(kScratchReg, E8, m1);
+ VRegister temp =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp().toV();
+ if (dst_v == lhs_v) {
+ vmv_vv(temp, lhs_v);
+ lhs_v = temp;
+ } else if (dst_v == rhs_v) {
+ vmv_vv(temp, rhs_v);
+ rhs_v = temp;
+ }
+ vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg);
+ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg,
+ -16); // The indices in range [16, 31] select the i - 16-th element
+ // of rhs
+ vrgather_vv(kSimd128ScratchReg2, rhs_v, kSimd128ScratchReg);
+ vor_vv(dst_v, dst_v, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -1679,52 +1748,60 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_splat");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_splat");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ fmv_x_w(kScratchReg, src.fp());
+ vmv_vx(dst.fp().toV(), kScratchReg);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ fmv_x_d(kScratchReg, src.fp());
+ vmv_vx(dst.fp().toV(), kScratchReg);
}
#define SIMD_BINOP(name1, name2) \
@@ -1756,7 +1833,11 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
@@ -1781,112 +1862,124 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_eq");
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_ne");
+ VU.set(kScratchReg, E32, m1);
+ vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_lt");
+ VU.set(kScratchReg, E32, m1);
+ vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_le");
+ VU.set(kScratchReg, E32, m1);
+ vmfle_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
@@ -1906,7 +1999,10 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f32x4.demote_f64x2_zero");
+ VU.set(kScratchReg, E32, m1);
+ vfncvt_f_f_w(dst.fp().toV(), src.fp().toV());
+ vmv_vi(v0, 12);
+ vmerge_vx(dst.fp().toV(), zero_reg, dst.fp().toV());
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
@@ -1941,69 +2037,102 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kSimd, "emit_s128_const");
+ WasmRvvS128const(dst.fp().toV(), imms);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kSimd, "emit_s128_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_or");
+ VU.set(kScratchReg, E8, m1);
+ vor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_xor");
+ VU.set(kScratchReg, E8, m1);
+ vxor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), rhs.fp().toV());
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), dst.fp().toV());
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- bailout(kSimd, "emit_s128_select");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(kSimd128ScratchReg, src1.fp().toV(), mask.fp().toV());
+ vnot_vv(kSimd128ScratchReg2, mask.fp().toV());
+ vand_vv(kSimd128ScratchReg2, src2.fp().toV(), kSimd128ScratchReg2);
+ vor_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_neg");
+ VU.set(kScratchReg, E8, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v128_anytrue");
+ VU.set(kScratchReg, E8, m1);
+ Label t;
+ vmv_sx(kSimd128ScratchReg, zero_reg);
+ vredmaxu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beq(dst.gp(), zero_reg, &t);
+ li(dst.gp(), 1);
+ bind(&t);
}
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_alltrue");
+ VU.set(kScratchReg, E8, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_bitmask");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shl");
+ VU.set(kScratchReg, E8, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E8, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
@@ -2030,36 +2159,42 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add");
+ VU.set(kScratchReg, E8, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
@@ -2093,22 +2228,37 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_alltrue");
+ VU.set(kScratchReg, E16, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_bitmask");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shl");
+ VU.set(kScratchReg, E16, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E16, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
@@ -2135,7 +2285,8 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add");
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
@@ -2152,7 +2303,8 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub");
+ VU.set(kScratchReg, E16, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
@@ -2203,22 +2355,39 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_alltrue");
+ VU.set(kScratchReg, E32, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_bitmask");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shl");
+ VU.set(kScratchReg, E32, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shli");
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
@@ -2245,12 +2414,14 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2295,17 +2466,32 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_alltrue");
+ VU.set(kScratchReg, E64, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shl");
+ VU.set(kScratchReg, E64, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shli");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
@@ -2332,12 +2518,14 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_sub");
+ VU.set(kScratchReg, E64, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2347,12 +2535,14 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vfabs_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_neg");
+ VU.set(kScratchReg, E32, m1);
+ vfneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -2362,13 +2552,13 @@ void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_ceil");
+ Ceil_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_floor");
+ Floor_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2386,32 +2576,53 @@ bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_mul");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfmul_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_div");
+ VU.set(kScratchReg, E32, m1);
+ vfdiv_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_min");
+ const int32_t kNaN = 0x7FC00000;
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_max");
+ const int32_t kNaN = 0x7FC00000;
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2426,12 +2637,14 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_abs");
+ VU.set(kScratchReg, E64, m1);
+ vfabs_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_neg");
+ VU.set(kScratchReg, E64, m1);
+ vfneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -2441,13 +2654,13 @@ void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_ceil");
+ Ceil_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_floor");
+ Floor_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2465,12 +2678,14 @@ bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_sub");
+ VU.set(kScratchReg, E64, m1);
+ vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2505,22 +2720,34 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vfcvt_x_f_v(dst.fp().toV(), src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vfcvt_xu_f_v(dst.fp().toV(), src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfcvt_f_x_v(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfcvt_f_xu_v(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
@@ -2637,7 +2864,11 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vsub_vv(dst.fp().toV(), kSimd128RegZero, src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
@@ -2667,7 +2898,9 @@ void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_extract_lane");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(v31, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), v31);
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
@@ -2692,28 +2925,40 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_replace_lane");
+ VU.set(kScratchReg, E8, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_replace_lane");
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_replace_lane");
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i64x2_replace_lane");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
@@ -2730,9 +2975,9 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_replace_lane");
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
bailout(kSimd, "emit_s128_set_if_nan");
}
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 722b0b074b4..52e8bb683dc 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -122,26 +123,72 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset,
- SafepointTableBuilder*) {
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
constexpr int LayInstrSize = 6;
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
- return;
- }
-#endif
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, LayInstrSize + kGap));
- patching_assembler.lay(sp, MemOperand(sp, -frame_size));
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ patching_assembler.lay(sp, MemOperand(sp, -frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int jump_offset = pc_offset() - offset;
+ patching_assembler.branchOnCond(al, jump_offset, true, true);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = ip;
+ LoadU64(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset),
+ r0);
+ LoadU64(stack_limit, MemOperand(stack_limit), r0);
+ AddU64(stack_limit, Operand(frame_size));
+ CmpU64(sp, stack_limit);
+ bge(&continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ lay(sp, MemOperand(sp, -frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ jump_offset = offset - pc_offset() + 6;
+ branchOnCond(al, jump_offset, true);
}
void LiftoffAssembler::FinishCode() {}
@@ -2057,8 +2104,13 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
Condition cond = liftoff::ToCondition(liftoff_cond);
- CmpS32(lhs, Operand(imm));
+ if (use_signed) {
+ CmpS32(lhs, Operand(imm));
+ } else {
+ CmpU32(lhs, Operand(imm));
+ }
b(cond, label);
}
@@ -2143,81 +2195,116 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_min, F64x2Min) \
- V(f64x2_max, F64x2Max) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_ne, F64x2Ne) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_ne, F32x4Ne) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_mul, I64x2Mul) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_ne, I64x2Ne) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i64x2_ge_s, I64x2GeS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_ne, I32x4Ne) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_ge_s, I32x4GeS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i32x4_ge_u, I32x4GeU) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_ne, I16x8Ne) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_ge_s, I16x8GeS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_ge_u, I16x8GeU) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_ne, I8x16Ne) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_ge_s, I8x16GeS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_ge_u, I8x16GeU) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name, op) \
+#define SIMD_BINOP_RR_LIST(V) \
+ V(f64x2_add, F64x2Add, fp) \
+ V(f64x2_sub, F64x2Sub, fp) \
+ V(f64x2_mul, F64x2Mul, fp) \
+ V(f64x2_div, F64x2Div, fp) \
+ V(f64x2_min, F64x2Min, fp) \
+ V(f64x2_max, F64x2Max, fp) \
+ V(f64x2_eq, F64x2Eq, fp) \
+ V(f64x2_ne, F64x2Ne, fp) \
+ V(f64x2_lt, F64x2Lt, fp) \
+ V(f64x2_le, F64x2Le, fp) \
+ V(f32x4_add, F32x4Add, fp) \
+ V(f32x4_sub, F32x4Sub, fp) \
+ V(f32x4_mul, F32x4Mul, fp) \
+ V(f32x4_div, F32x4Div, fp) \
+ V(f32x4_min, F32x4Min, fp) \
+ V(f32x4_max, F32x4Max, fp) \
+ V(f32x4_eq, F32x4Eq, fp) \
+ V(f32x4_ne, F32x4Ne, fp) \
+ V(f32x4_lt, F32x4Lt, fp) \
+ V(f32x4_le, F32x4Le, fp) \
+ V(i64x2_add, I64x2Add, fp) \
+ V(i64x2_sub, I64x2Sub, fp) \
+ V(i64x2_mul, I64x2Mul, fp) \
+ V(i64x2_eq, I64x2Eq, fp) \
+ V(i64x2_ne, I64x2Ne, fp) \
+ V(i64x2_gt_s, I64x2GtS, fp) \
+ V(i64x2_ge_s, I64x2GeS, fp) \
+ V(i64x2_shl, I64x2Shl, gp) \
+ V(i64x2_shr_s, I64x2ShrS, gp) \
+ V(i64x2_shr_u, I64x2ShrU, gp) \
+ V(i32x4_add, I32x4Add, fp) \
+ V(i32x4_sub, I32x4Sub, fp) \
+ V(i32x4_mul, I32x4Mul, fp) \
+ V(i32x4_eq, I32x4Eq, fp) \
+ V(i32x4_ne, I32x4Ne, fp) \
+ V(i32x4_gt_s, I32x4GtS, fp) \
+ V(i32x4_ge_s, I32x4GeS, fp) \
+ V(i32x4_gt_u, I32x4GtU, fp) \
+ V(i32x4_ge_u, I32x4GeU, fp) \
+ V(i32x4_min_s, I32x4MinS, fp) \
+ V(i32x4_min_u, I32x4MinU, fp) \
+ V(i32x4_max_s, I32x4MaxS, fp) \
+ V(i32x4_max_u, I32x4MaxU, fp) \
+ V(i32x4_shl, I32x4Shl, gp) \
+ V(i32x4_shr_s, I32x4ShrS, gp) \
+ V(i32x4_shr_u, I32x4ShrU, gp) \
+ V(i16x8_add, I16x8Add, fp) \
+ V(i16x8_sub, I16x8Sub, fp) \
+ V(i16x8_mul, I16x8Mul, fp) \
+ V(i16x8_eq, I16x8Eq, fp) \
+ V(i16x8_ne, I16x8Ne, fp) \
+ V(i16x8_gt_s, I16x8GtS, fp) \
+ V(i16x8_ge_s, I16x8GeS, fp) \
+ V(i16x8_gt_u, I16x8GtU, fp) \
+ V(i16x8_ge_u, I16x8GeU, fp) \
+ V(i16x8_min_s, I16x8MinS, fp) \
+ V(i16x8_min_u, I16x8MinU, fp) \
+ V(i16x8_max_s, I16x8MaxS, fp) \
+ V(i16x8_max_u, I16x8MaxU, fp) \
+ V(i16x8_shl, I16x8Shl, gp) \
+ V(i16x8_shr_s, I16x8ShrS, gp) \
+ V(i16x8_shr_u, I16x8ShrU, gp) \
+ V(i8x16_add, I8x16Add, fp) \
+ V(i8x16_sub, I8x16Sub, fp) \
+ V(i8x16_eq, I8x16Eq, fp) \
+ V(i8x16_ne, I8x16Ne, fp) \
+ V(i8x16_gt_s, I8x16GtS, fp) \
+ V(i8x16_ge_s, I8x16GeS, fp) \
+ V(i8x16_gt_u, I8x16GtU, fp) \
+ V(i8x16_ge_u, I8x16GeU, fp) \
+ V(i8x16_min_s, I8x16MinS, fp) \
+ V(i8x16_min_u, I8x16MinU, fp) \
+ V(i8x16_max_s, I8x16MaxS, fp) \
+ V(i8x16_max_u, I8x16MaxU, fp) \
+ V(i8x16_shl, I8x16Shl, gp) \
+ V(i8x16_shr_s, I8x16ShrS, gp) \
+ V(i8x16_shr_u, I8x16ShrU, gp)
+
+#define EMIT_SIMD_BINOP_RR(name, op, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- op(dst.fp(), lhs.fp(), rhs.fp()); \
+ op(dst.fp(), lhs.fp(), rhs.stype()); \
+ }
+SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
+#undef EMIT_SIMD_BINOP_RR
+#undef SIMD_BINOP_RR_LIST
+
+#define SIMD_BINOP_RI_LIST(V) \
+ V(i64x2_shli, I64x2Shl) \
+ V(i64x2_shri_s, I64x2ShrS) \
+ V(i64x2_shri_u, I64x2ShrU) \
+ V(i32x4_shli, I32x4Shl) \
+ V(i32x4_shri_s, I32x4ShrS) \
+ V(i32x4_shri_u, I32x4ShrU) \
+ V(i16x8_shli, I16x8Shl) \
+ V(i16x8_shri_s, I16x8ShrS) \
+ V(i16x8_shri_u, I16x8ShrU) \
+ V(i8x16_shli, I8x16Shl) \
+ V(i8x16_shri_s, I8x16ShrS) \
+ V(i8x16_shri_u, I8x16ShrU)
+
+#define EMIT_SIMD_BINOP_RI(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t rhs) { \
+ op(dst.fp(), lhs.fp(), Operand(rhs)); \
}
-SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
-#undef EMIT_SIMD_BINOP
-#undef SIMD_BINOP_LIST
+SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
+#undef EMIT_SIMD_BINOP_RI
+#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
@@ -2424,38 +2511,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
-void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
-}
-
-void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_u");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_u");
-}
-
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2520,38 +2575,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
-void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
-}
-
-void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_u");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_u");
-}
-
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2607,38 +2630,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
-void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
-}
-
-void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_u");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_u");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2736,38 +2727,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
-void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
-}
-
-void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_u");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_u");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3134,14 +3093,40 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (kind == kF32) {
+ cebr(src, src);
+ bunordered(&return_nan);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ cdbr(src, src);
+ bunordered(&return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src, MemOperand(dst), r0);
+ bind(&done);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (lane_kind == kF32) {
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(2));
+ b(Condition(0x5), &return_nan); // If any or all are NaN.
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(3));
+ b(Condition(0x5), &return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src.fp(), MemOperand(dst), r0);
+ bind(&done);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d5cda7b3c48..890afa2eda9 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -357,6 +357,14 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
LoadTaggedPointerField(dst, Operand(instance, offset));
}
+void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
+ int offset, ExternalPointerTag tag,
+ Register isolate_root) {
+ LoadExternalPointerField(dst, FieldOperand(instance, offset), tag,
+ isolate_root,
+ IsolateRootLocation::kInScratchRegister);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
movq(liftoff::GetInstanceOperand(), instance);
}
@@ -1317,7 +1325,9 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- if (dst.gp() == rhs.gp()) {
+ if (lhs.gp() == rhs.gp()) {
+ xorq(dst.gp(), dst.gp());
+ } else if (dst.gp() == rhs.gp()) {
negq(dst.gp());
addq(dst.gp(), lhs.gp());
} else {
@@ -2181,7 +2191,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
namespace liftoff {
-template <void (TurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
+template <void (SharedTurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister rhs) {
Label cont;
@@ -2335,29 +2345,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm as the one in code-generator-x64.cc.
- assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- // Prepare shift value
- assm->movq(kScratchRegister, rhs.gp());
- // Take shift value modulo 8.
- assm->andq(kScratchRegister, Immediate(7));
- assm->addq(kScratchRegister, Immediate(8));
- assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
- if (is_signed) {
- assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packsswb(dst.fp(), kScratchDoubleReg);
- } else {
- assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packuswb(dst.fp(), kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
assm->xorq(dst.gp(), dst.gp());
@@ -2365,7 +2352,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->setcc(not_equal, dst.gp());
}
-template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2414,21 +2401,11 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), dst.fp(), src_op, 0);
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst.fp(), src_op);
- } else {
- movss(dst.fp(), src_op);
- shufps(dst.fp(), dst.fp(), byte{0});
- }
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2440,18 +2417,17 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (mem_type == MachineType::Int8()) {
- Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int16()) {
- Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int32()) {
- Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else {
DCHECK_EQ(MachineType::Int64(), mem_type);
- Pinsrq(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrq(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
}
}
@@ -2515,26 +2491,24 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
+ I8x16Popcnt(dst.fp(), src.fp(), kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2, kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2753,17 +2727,11 @@ void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// Different register alias requirements depending on CpuFeatures supported:
- if (CpuFeatures::IsSupported(AVX)) {
- // 1. AVX, no requirements.
+ if (CpuFeatures::IsSupported(AVX) || CpuFeatures::IsSupported(SSE4_2)) {
+ // 1. AVX, or SSE4_2 no requirements (I64x2GtS takes care of aliasing).
I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- // 2. SSE4_2, dst == lhs.
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), kScratchDoubleReg);
} else {
- // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ // 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp(),
kScratchDoubleReg);
@@ -2857,9 +2825,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
memcpy(vals, imms, sizeof(vals));
- TurboAssembler::Move(dst.fp(), vals[0]);
- movq(kScratchRegister, vals[1]);
- Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
+ TurboAssembler::Move(dst.fp(), vals[1], vals[0]);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -2927,89 +2893,37 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- movq(kScratchRegister, rhs.gp());
- andq(kScratchRegister, Immediate(7));
- addq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psrlw(kScratchDoubleReg, tmp_simd.fp());
- Packuswb(kScratchDoubleReg, kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- subq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psllw(dst.fp(), tmp_simd.fp());
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7; // i.InputInt3(1);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlw(dst.fp(), lhs.fp(), byte{shift});
- } else if (dst != lhs) {
- Movaps(dst.fp(), lhs.fp());
- psrlw(dst.fp(), byte{shift});
- }
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3220,14 +3134,13 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- Pmaddubsw(dst.fp(), src.fp(), op);
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3259,7 +3172,7 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3376,14 +3289,12 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- Pmaddwd(dst.fp(), src.fp(), op);
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), kScratchDoubleReg);
}
namespace liftoff {
@@ -3504,19 +3415,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
- Movaps(tmp1.fp(), lhs.fp());
- Movaps(tmp2.fp(), rhs.fp());
- // Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), byte{32});
- Pmuludq(tmp1.fp(), rhs.fp());
- // Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), byte{32});
- Pmuludq(tmp2.fp(), lhs.fp());
- Paddq(tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), byte{32});
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
- this, dst, lhs, rhs);
- Paddq(dst.fp(), tmp2.fp());
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
@@ -3574,28 +3473,12 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrld(kScratchDoubleReg, static_cast<byte>(1));
- Andps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), static_cast<byte>(1));
- Andps(dst.fp(), src.fp());
- }
+ Absps(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Negps(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -3657,61 +3540,12 @@ void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminps(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- minps(kScratchDoubleReg, dst.fp());
- minps(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- minps(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minps(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orps(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordps(dst.fp(), kScratchDoubleReg);
- Orps(kScratchDoubleReg, dst.fp());
- Psrld(dst.fp(), byte{10});
- Andnps(dst.fp(), kScratchDoubleReg);
+ F32x4Min(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxps(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- maxps(kScratchDoubleReg, dst.fp());
- maxps(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- maxps(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxps(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorps(dst.fp(), kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orps(kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subps(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordps(dst.fp(), kScratchDoubleReg);
- Psrld(dst.fp(), byte{10});
- Andnps(dst.fp(), kScratchDoubleReg);
+ F32x4Max(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3730,28 +3564,12 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Abspd(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllq(kScratchDoubleReg, static_cast<byte>(63));
- Xorpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), static_cast<byte>(63));
- Xorpd(dst.fp(), src.fp());
- }
+ Negpd(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -3842,7 +3660,7 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -3852,26 +3670,7 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), kScratchDoubleReg);
- } else {
- movaps(kScratchDoubleReg, src.fp());
- cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(kScratchDoubleReg, dst.fp());
- Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), kScratchDoubleReg);
+ I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4012,12 +3811,14 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4322,11 +4123,7 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
@@ -4334,11 +4131,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) {
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4376,19 +4169,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
orl(Operand(dst, 0), tmp_gp);
}
diff --git a/chromium/v8/src/wasm/c-api.cc b/chromium/v8/src/wasm/c-api.cc
index 5a1ab579e77..e2163510308 100644
--- a/chromium/v8/src/wasm/c-api.cc
+++ b/chromium/v8/src/wasm/c-api.cc
@@ -26,12 +26,13 @@
#include <iostream>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/builtins/builtins.h"
#include "src/compiler/wasm-compiler.h"
#include "src/objects/js-collection-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
@@ -396,6 +397,11 @@ auto Engine::make(own<Config>&& config) -> own<Engine> {
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(engine->platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
return make_own(seal<Engine>(engine));
}
@@ -1945,7 +1951,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
i::Handle<i::FixedArray> backing_store;
i::Handle<i::WasmTableObject> table_obj = i::WasmTableObject::New(
isolate, i::Handle<i::WasmInstanceObject>(), i_type, minimum, has_maximum,
- maximum, &backing_store);
+ maximum, &backing_store, isolate->factory()->null_value());
if (ref) {
i::Handle<i::JSReceiver> init = impl(ref)->v8_object();
diff --git a/chromium/v8/src/wasm/c-api.h b/chromium/v8/src/wasm/c-api.h
index 0dba237d301..97a8d2d5f6c 100644
--- a/chromium/v8/src/wasm/c-api.h
+++ b/chromium/v8/src/wasm/c-api.h
@@ -9,7 +9,8 @@
#ifndef V8_WASM_C_API_H_
#define V8_WASM_C_API_H_
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "third_party/wasm-api/wasm.hh"
diff --git a/chromium/v8/src/wasm/code-space-access.cc b/chromium/v8/src/wasm/code-space-access.cc
index 0f71c9a2245..83cb5ddea14 100644
--- a/chromium/v8/src/wasm/code-space-access.cc
+++ b/chromium/v8/src/wasm/code-space-access.cc
@@ -12,6 +12,12 @@ namespace internal {
namespace wasm {
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
+// The thread-local counter (above) is only valid if a single thread only works
+// on one module at a time. This second thread-local checks that.
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
+ nullptr;
+#endif
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
@@ -20,6 +26,12 @@ CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: native_module_(native_module) {
+#ifdef DEBUG
+ if (code_space_write_nesting_level_ == 0) {
+ current_native_module_ = native_module;
+ }
+ DCHECK_EQ(native_module, current_native_module_);
+#endif // DEBUG
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
if (code_space_write_nesting_level_ == 0) SetWritable();
code_space_write_nesting_level_++;
diff --git a/chromium/v8/src/wasm/code-space-access.h b/chromium/v8/src/wasm/code-space-access.h
index 96f852e63bd..788bb8eca37 100644
--- a/chromium/v8/src/wasm/code-space-access.h
+++ b/chromium/v8/src/wasm/code-space-access.h
@@ -55,6 +55,9 @@ class V8_NODISCARD CodeSpaceWriteScope final {
private:
static thread_local int code_space_write_nesting_level_;
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+ static thread_local NativeModule* current_native_module_;
+#endif
void SetWritable() const;
void SetExecutable() const;
diff --git a/chromium/v8/src/wasm/compilation-environment.h b/chromium/v8/src/wasm/compilation-environment.h
index 773090c4e5c..574fe25ccac 100644
--- a/chromium/v8/src/wasm/compilation-environment.h
+++ b/chromium/v8/src/wasm/compilation-environment.h
@@ -45,6 +45,8 @@ enum BoundsCheckStrategy : int8_t {
kNoBoundsChecks
};
+enum class DynamicTiering { kEnabled, kDisabled };
+
// The {CompilationEnv} encapsulates the module data that is used during
// compilation. CompilationEnvs are shareable across multiple compilations.
struct CompilationEnv {
@@ -70,10 +72,13 @@ struct CompilationEnv {
// Features enabled for this compilation.
const WasmFeatures enabled_features;
+ const DynamicTiering dynamic_tiering;
+
constexpr CompilationEnv(const WasmModule* module,
BoundsCheckStrategy bounds_checks,
RuntimeExceptionSupport runtime_exception_support,
- const WasmFeatures& enabled_features)
+ const WasmFeatures& enabled_features,
+ DynamicTiering dynamic_tiering)
: module(module),
bounds_checks(bounds_checks),
runtime_exception_support(runtime_exception_support),
@@ -88,7 +93,8 @@ struct CompilationEnv {
uintptr_t{module->maximum_pages})
: kV8MaxWasmMemoryPages) *
kWasmPageSize),
- enabled_features(enabled_features) {}
+ enabled_features(enabled_features),
+ dynamic_tiering(dynamic_tiering) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -105,6 +111,7 @@ class WireBytesStorage {
enum class CompilationEvent : uint8_t {
kFinishedBaselineCompilation,
kFinishedExportWrappers,
+ kFinishedCompilationChunk,
kFinishedTopTierCompilation,
kFailedCompilation,
kFinishedRecompilation
@@ -148,6 +155,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void set_compilation_id(int compilation_id);
+ DynamicTiering dynamic_tiering() const;
+
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
@@ -162,7 +171,8 @@ class V8_EXPORT_PRIVATE CompilationState {
// such that it can keep it alive (by regaining a {std::shared_ptr}) in
// certain scopes.
static std::unique_ptr<CompilationState> New(
- const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>);
+ const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>,
+ DynamicTiering dynamic_tiering);
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index 20c6b30ffcd..3d5ec7f933a 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -397,6 +397,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
"invalid value type 's128', enable with --experimental-wasm-simd");
return kWasmBottom;
}
+ if (!VALIDATE(CheckHardwareSupportsSimd())) {
+ DecodeError<validate>(decoder, pc, "Wasm SIMD unsupported");
+ return kWasmBottom;
+ }
return kWasmS128;
}
// Although these codes are included in ValueTypeCode, they technically
@@ -945,6 +949,8 @@ struct ControlBase : public PcForErrors<validate> {
F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(StructNewWithRtt, const StructIndexImmediate<validate>& imm, \
const Value& rtt, const Value args[], Value* result) \
+ F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
+ const Value& rtt, Value* result) \
F(ArrayInit, const ArrayIndexImmediate<validate>& imm, \
const base::Vector<Value>& elements, const Value& rtt, Value* result) \
F(RttCanon, uint32_t type_index, Value* result) \
@@ -1047,8 +1053,6 @@ struct ControlBase : public PcForErrors<validate> {
F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
const Value& value, const Value& count) \
- F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
- const Value& rtt, Value* result) \
F(StructGet, const Value& struct_object, \
const FieldImmediate<validate>& field, bool is_signed, Value* result) \
F(StructSet, const Value& struct_object, \
@@ -1330,11 +1334,10 @@ class WasmDecoder : public Decoder {
}
bool CanReturnCall(const FunctionSig* target_sig) {
- if (target_sig == nullptr) return false;
- size_t num_returns = sig_->return_count();
- if (num_returns != target_sig->return_count()) return false;
- for (size_t i = 0; i < num_returns; ++i) {
- if (sig_->GetReturn(i) != target_sig->GetReturn(i)) return false;
+ if (sig_->return_count() != target_sig->return_count()) return false;
+ auto target_sig_it = target_sig->returns().begin();
+ for (ValueType ret_type : sig_->returns()) {
+ if (!IsSubtypeOf(*target_sig_it++, ret_type, this->module_)) return false;
}
return true;
}
@@ -1849,8 +1852,10 @@ class WasmDecoder : public Decoder {
opcode =
decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
switch (opcode) {
+ case kExprStructNew:
case kExprStructNewWithRtt:
- case kExprStructNewDefault: {
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt: {
StructIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
@@ -1861,8 +1866,10 @@ class WasmDecoder : public Decoder {
FieldImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayNew:
case kExprArrayNewWithRtt:
case kExprArrayNewDefault:
+ case kExprArrayNewDefaultWithRtt:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -1871,6 +1878,13 @@ class WasmDecoder : public Decoder {
ArrayIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
+ ArrayIndexImmediate<validate> array_imm(decoder, pc + length);
+ IndexImmediate<validate> length_imm(
+ decoder, pc + length + array_imm.length, "array length");
+ return length + array_imm.length + length_imm.length;
+ }
case kExprArrayCopy: {
ArrayIndexImmediate<validate> dst_imm(decoder, pc + length);
ArrayIndexImmediate<validate> src_imm(decoder,
@@ -1887,7 +1901,11 @@ class WasmDecoder : public Decoder {
}
case kExprRttCanon:
case kExprRttSub:
- case kExprRttFreshSub: {
+ case kExprRttFreshSub:
+ case kExprRefTestStatic:
+ case kExprRefCastStatic:
+ case kExprBrOnCastStatic:
+ case kExprBrOnCastStaticFail: {
IndexImmediate<validate> imm(decoder, pc + length, "type index");
return length + imm.length;
}
@@ -2041,20 +2059,26 @@ class WasmDecoder : public Decoder {
case kGCPrefix: {
opcode = this->read_prefixed_opcode<validate>(pc);
switch (opcode) {
- case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt:
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
+ case kExprArrayNewDefault:
case kExprArrayLen:
case kExprRttSub:
case kExprRttFreshSub:
+ case kExprRefTestStatic:
+ case kExprRefCastStatic:
+ case kExprBrOnCastStatic:
+ case kExprBrOnCastStaticFail:
return {1, 1};
case kExprStructSet:
return {2, 0};
- case kExprArrayNewDefault:
+ case kExprArrayNew:
+ case kExprArrayNewDefaultWithRtt:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -2068,6 +2092,7 @@ class WasmDecoder : public Decoder {
case kExprArrayCopy:
return {5, 0};
case kExprRttCanon:
+ case kExprStructNewDefault:
return {0, 1};
case kExprArrayNewWithRtt:
return {3, 1};
@@ -2076,6 +2101,18 @@ class WasmDecoder : public Decoder {
CHECK(Validate(pc + 2, imm));
return {imm.struct_type->field_count() + 1, 1};
}
+ case kExprStructNew: {
+ StructIndexImmediate<validate> imm(this, pc + 2);
+ CHECK(Validate(pc + 2, imm));
+ return {imm.struct_type->field_count(), 1};
+ }
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
+ ArrayIndexImmediate<validate> array_imm(this, pc + 2);
+ IndexImmediate<validate> length_imm(this, pc + 2 + array_imm.length,
+ "array length");
+ return {length_imm.index + (opcode == kExprArrayInit ? 1 : 0), 1};
+ }
default:
UNREACHABLE();
}
@@ -2224,6 +2261,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int non_defaultable = 0;
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->enabled_.has_nn_locals() ||
+ this->enabled_.has_unsafe_nn_locals() ||
this->local_type(index).is_defaultable())) {
this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
@@ -2613,9 +2651,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
c->reachability = control_at(1)->innerReachability();
const WasmTagSig* sig = imm.tag->sig;
EnsureStackSpace(static_cast<int>(sig->parameter_count()));
- for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
- Push(CreateValue(sig->GetParam(i)));
- }
+ for (ValueType type : sig->parameters()) Push(CreateValue(type));
base::Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
@@ -2634,19 +2670,15 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
// +1 because the current try block is not included in the count.
- Control* target = control_at(imm.depth + 1);
- if (imm.depth + 1 < control_depth() - 1 && !target->is_try()) {
- this->DecodeError(
- "delegate target must be a try block or the function block");
- return 0;
- }
- if (target->is_try_catch() || target->is_try_catchall()) {
- this->DecodeError(
- "cannot delegate inside the catch handler of the target");
- return 0;
+ uint32_t target_depth = imm.depth + 1;
+ while (target_depth < control_depth() - 1 &&
+ (!control_at(target_depth)->is_try() ||
+ control_at(target_depth)->is_try_catch() ||
+ control_at(target_depth)->is_try_catchall())) {
+ target_depth++;
}
FallThrough();
- CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, target_depth, c);
current_catch_ = c->previous_catch;
EndControl();
PopControl();
@@ -2692,19 +2724,19 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// the stack as it is.
break;
case kOptRef: {
- Value result = CreateValue(
- ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
- // The result of br_on_null has the same value as the argument (but a
- // non-nullable type).
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
- CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
- CALL_INTERFACE(Forward, ref_object, &result);
- c->br_merge()->reached = true;
- }
- // In unreachable code, we still have to push a value of the correct
- // type onto the stack.
- Drop(ref_object);
- Push(result);
+ Value result = CreateValue(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ // The result of br_on_null has the same value as the argument (but a
+ // non-nullable type).
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ CALL_INTERFACE(Forward, ref_object, &result);
+ c->br_merge()->reached = true;
+ }
+ // In unreachable code, we still have to push a value of the correct
+ // type onto the stack.
+ Drop(ref_object);
+ Push(result);
break;
}
default:
@@ -3302,7 +3334,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
- "tail call return types mismatch");
+ "tail call type error");
return 0;
}
ArgVector args = PeekArgs(imm.sig);
@@ -3605,8 +3637,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_NOINLINE int EnsureStackArguments_Slow(int count, uint32_t limit) {
if (!VALIDATE(control_.back().unreachable())) {
- int index = count - stack_size() - 1;
- NotEnoughArgumentsError(index);
+ NotEnoughArgumentsError(count, stack_size() - limit);
}
// Silently create unreachable values out of thin air underneath the
// existing stack values. To do so, we have to move existing stack values
@@ -4003,22 +4034,32 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
+ case kExprStructNew:
case kExprStructNewWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Peek(0, imm.struct_type->field_count());
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(imm.struct_type->field_count(), rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprStructNew
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, imm.struct_type->field_count());
+ if (opcode == kExprStructNew) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprStructNewWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ imm.struct_type->field_count(), rtt,
+ "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
ArgVector args = PeekArgs(imm.struct_type, 1);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4029,8 +4070,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
- case kExprStructNewDefault: {
- NON_CONST_ONLY
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
@@ -4038,26 +4079,34 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
this->DecodeError(
- "struct.new_default_with_rtt: immediate struct type %d has "
- "field %d of non-defaultable type %s",
- imm.index, i, ftype.name().c_str());
+ "%s: struct type %d has field %d of non-defaultable type %s",
+ WasmOpcodes::OpcodeName(opcode), imm.index, i,
+ ftype.name().c_str());
return 0;
}
}
}
- Value rtt = Peek(0, 0);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(0, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(0, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprStructNewDefault
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 0);
+ if (opcode == kExprStructNewDefault) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprStructNewDefaultWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(0, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 0, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructNewDefault, imm, rtt, &value);
@@ -4131,23 +4180,32 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(2);
return opcode_length + field.length;
}
+ case kExprArrayNew:
case kExprArrayNewWithRtt: {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Peek(0, 2);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(2, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(2, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprArrayNew
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 2);
+ if (opcode == kExprArrayNew) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprArrayNewWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(2, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 2, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value length = Peek(1, 1, kWasmI32);
Value initial_value =
@@ -4159,30 +4217,39 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
- case kExprArrayNewDefault: {
+ case kExprArrayNewDefault:
+ case kExprArrayNewDefaultWithRtt: {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
this->DecodeError(
- "array.new_default_with_rtt: immediate array type %d has "
- "non-defaultable element type %s",
- imm.index, imm.array_type->element_type().name().c_str());
- return 0;
- }
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
+ "%s: array type %d has non-defaultable element type %s",
+ WasmOpcodes::OpcodeName(opcode), imm.index,
+ imm.array_type->element_type().name().c_str());
return 0;
}
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(1, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprArrayNewDefault
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 1);
+ if (opcode == kExprArrayNewDefault) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprArrayNewDefaultWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 1, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value length = Peek(1, 0, kWasmI32);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4264,7 +4331,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayCopy: {
NON_CONST_ONLY
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
ArrayIndexImmediate<validate> dst_imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, dst_imm)) return 0;
if (!VALIDATE(dst_imm.array_type->mutability())) {
@@ -4298,12 +4364,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(5);
return opcode_length + dst_imm.length + src_imm.length;
}
- case kExprArrayInit: {
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
- if (decoding_mode != kInitExpression) {
- this->DecodeError("array.init is only allowed in init. expressions");
- return 0;
- }
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
ArrayIndexImmediate<validate> array_imm(this,
this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
@@ -4317,12 +4379,18 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
length_imm.index, kV8MaxWasmArrayInitLength);
return 0;
}
+ Value rtt = opcode == kExprArrayInit
+ ? Peek(0, elem_count, ValueType::Rtt(array_imm.index))
+ : CreateValue(ValueType::Rtt(array_imm.index));
+ if (opcode == kExprArrayInitStatic) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, array_imm.index, &rtt);
+ Push(rtt);
+ }
ValueType element_type = array_imm.array_type->element_type();
std::vector<ValueType> element_types(elem_count,
element_type.Unpacked());
FunctionSig element_sig(0, elem_count, element_types.data());
ArgVector elements = PeekArgs(&element_sig, 1);
- Value rtt = Peek(0, elem_count, ValueType::Rtt(array_imm.index));
Value result =
CreateValue(ValueType::Ref(array_imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayInit, array_imm, elements, rtt,
@@ -4362,14 +4430,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- Value value = CreateValue(ValueType::Rtt(imm.index, 0));
+ Value value = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &value);
Push(value);
return opcode_length + imm.length;
}
case kExprRttFreshSub:
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
- V8_FALLTHROUGH;
case kExprRttSub: {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
@@ -4402,16 +4469,29 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
return opcode_length + imm.length;
}
- case kExprRefTest: {
+ case kExprRefTest:
+ case kExprRefTestStatic: {
NON_CONST_ONLY
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
- Value rtt = Peek(0, 1);
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprRefTestStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprRefTest);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ }
Value obj = Peek(1, 0);
Value value = CreateValue(kWasmI32);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
- }
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4426,6 +4506,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
CALL_INTERFACE(RefTest, obj, rtt, &value);
} else {
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(Drop);
// Unrelated types. Will always fail.
CALL_INTERFACE(I32Const, &value, 0);
}
@@ -4434,14 +4516,27 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprRefCast: {
+ case kExprRefCast:
+ case kExprRefCastStatic: {
NON_CONST_ONLY
- Value rtt = Peek(0, 1);
- Value obj = Peek(1, 0);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprRefCastStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprRefCast);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
+ Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4480,7 +4575,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprBrOnCast: {
+ case kExprBrOnCast:
+ case kExprBrOnCastStatic: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
@@ -4488,10 +4584,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprBrOnCastStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprBrOnCast);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
@@ -4538,7 +4646,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
- case kExprBrOnCastFail: {
+ case kExprBrOnCastFail:
+ case kExprBrOnCastStaticFail: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
@@ -4546,10 +4655,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprBrOnCastStaticFail) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprBrOnCastFail);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
@@ -4729,7 +4850,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + branch_depth.length;
}
default:
- this->DecodeError("invalid gc opcode");
+ this->DecodeError("invalid gc opcode: %x", opcode);
return 0;
}
}
@@ -4974,9 +5095,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_INLINE ReturnVector CreateReturnValues(const FunctionSig* sig) {
size_t return_count = sig->return_count();
ReturnVector values(return_count);
- for (size_t i = 0; i < return_count; ++i) {
- values[i] = CreateValue(sig->GetReturn(i));
- }
+ std::transform(sig->returns().begin(), sig->returns().end(), values.begin(),
+ [this](ValueType type) { return CreateValue(type); });
return values;
}
V8_INLINE void PushReturns(ReturnVector values) {
@@ -5001,10 +5121,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
PopTypeError(index, val, ("type " + expected.name()).c_str());
}
- V8_NOINLINE void NotEnoughArgumentsError(int index) {
+ V8_NOINLINE void NotEnoughArgumentsError(int needed, int actual) {
+ DCHECK_LT(0, needed);
+ DCHECK_LE(0, actual);
+ DCHECK_LT(actual, needed);
this->DecodeError(
- "not enough arguments on the stack for %s, expected %d more",
- SafeOpcodeNameAt(this->pc_), index + 1);
+ "not enough arguments on the stack for %s (need %d, got %d)",
+ SafeOpcodeNameAt(this->pc_), needed, actual);
}
V8_INLINE Value Peek(int depth, int index, ValueType expected) {
@@ -5023,7 +5146,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Peeking past the current control start in reachable code.
if (!VALIDATE(decoding_mode == kFunctionBody &&
control_.back().unreachable())) {
- NotEnoughArgumentsError(index);
+ NotEnoughArgumentsError(depth + 1, stack_size() - limit);
}
return UnreachableValue(this->pc_);
}
diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc
index 19a862d0d4f..d5a82073d2b 100644
--- a/chromium/v8/src/wasm/function-body-decoder.cc
+++ b/chromium/v8/src/wasm/function-body-decoder.cc
@@ -63,12 +63,13 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
- WasmFeatures no_features = WasmFeatures::None();
+ WasmFeatures unused_detected_features;
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidation> decoder(no_zone, no_module, no_features,
- &no_features, no_sig, pc, end, 0);
+ WasmDecoder<Decoder::kNoValidation> decoder(
+ no_zone, no_module, WasmFeatures::All(), &unused_detected_features,
+ no_sig, pc, end, 0);
return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
@@ -253,8 +254,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
i.pc() + 1, module);
os << " @" << i.pc_offset();
CHECK(decoder.Validate(i.pc() + 1, imm));
- for (uint32_t i = 0; i < imm.out_arity(); i++) {
- os << " " << imm.out_type(i).name();
+ for (uint32_t j = 0; j < imm.out_arity(); j++) {
+ os << " " << imm.out_type(j).name();
}
control_depth++;
break;
diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc
index cd9d941a002..e520a7d6806 100644
--- a/chromium/v8/src/wasm/function-compiler.cc
+++ b/chromium/v8/src/wasm/function-compiler.cc
@@ -134,7 +134,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kTurbofan:
result = compiler::ExecuteTurbofanWasmCompilation(
- env, func_body, func_index_, counters, detected);
+ env, wire_bytes_storage, func_body, func_index_, counters, detected);
result.for_debugging = for_debugging_;
break;
}
@@ -142,30 +142,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
return result;
}
-namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(3, 4)
-void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- base::ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = base::VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
- Handle<AbstractCode>::cast(code), name_str));
-}
-} // namespace
-
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
NativeModule* native_module,
@@ -243,17 +219,19 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
- Handle<Code> code;
if (use_generic_wrapper_) {
- code = isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
- } else {
- CompilationJob::Status status = job_->FinalizeJob(isolate_);
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
- code = job_->compilation_info()->code();
+ return isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
}
- if (!use_generic_wrapper_ && must_record_function_compilation(isolate_)) {
- RecordWasmHeapStubCompilation(
- isolate_, code, "%s", job_->compilation_info()->GetDebugName().get());
+
+ CompilationJob::Status status = job_->FinalizeJob(isolate_);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (isolate_->logger()->is_listening_to_code_events() ||
+ isolate_->is_profiling()) {
+ Handle<String> name = isolate_->factory()->NewStringFromAsciiChecked(
+ job_->compilation_info()->GetDebugName().get());
+ PROFILE(isolate_, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code), name));
}
return code;
}
diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc
index 84f34cc0ed8..30775b66ac6 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.cc
+++ b/chromium/v8/src/wasm/graph-builder-interface.cc
@@ -109,9 +109,11 @@ class WasmGraphBuildingInterface {
: ControlBase(std::forward<Args>(args)...) {}
};
- explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
- int func_index)
- : builder_(builder), func_index_(func_index) {}
+ WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
+ int func_index, InlinedStatus inlined_status)
+ : builder_(builder),
+ func_index_(func_index),
+ inlined_status_(inlined_status) {}
void StartFunction(FullDecoder* decoder) {
// Get the branch hints map for this function (if available)
@@ -138,7 +140,9 @@ class WasmGraphBuildingInterface {
while (index < num_locals) {
ValueType type = decoder->local_type(index);
TFNode* node;
- if (decoder->enabled_.has_nn_locals() && !type.is_defaultable()) {
+ if ((decoder->enabled_.has_nn_locals() ||
+ decoder->enabled_.has_unsafe_nn_locals()) &&
+ !type.is_defaultable()) {
DCHECK(type.is_reference());
// TODO(jkummerow): Consider using "the hole" instead, to make any
// illegal uses more obvious.
@@ -153,7 +157,9 @@ class WasmGraphBuildingInterface {
}
LoadContextIntoSsa(ssa_env);
- if (FLAG_trace_wasm) builder_->TraceFunctionEntry(decoder->position());
+ if (FLAG_trace_wasm && inlined_status_ == kRegularFunction) {
+ builder_->TraceFunctionEntry(decoder->position());
+ }
}
// Reload the instance cache entries into the Ssa Environment.
@@ -163,7 +169,11 @@ class WasmGraphBuildingInterface {
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
- void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
+ void FinishFunction(FullDecoder*) {
+ if (inlined_status_ == kRegularFunction) {
+ builder_->PatchInStackCheckIfNeeded();
+ }
+ }
void OnFirstError(FullDecoder*) {}
@@ -185,7 +195,7 @@ class WasmGraphBuildingInterface {
TFNode* loop_node = builder_->Loop(control());
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
uint32_t nesting_depth = 0;
for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) {
if (decoder->control_at(depth)->is_loop()) {
@@ -295,7 +305,7 @@ class WasmGraphBuildingInterface {
// However, if loop unrolling is enabled, we must create a loop exit and
// wrap the fallthru values on the stack.
if (block->is_loop()) {
- if (FLAG_wasm_loop_unrolling && block->reachable()) {
+ if (emit_loop_exits() && block->reachable()) {
BuildLoopExits(decoder, block);
WrapLocalsAtLoopExit(decoder, block);
uint32_t arity = block->end_merge.arity;
@@ -423,7 +433,7 @@ class WasmGraphBuildingInterface {
void Trap(FullDecoder* decoder, TrapReason reason) {
ValueVector values;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
values);
}
@@ -462,7 +472,7 @@ class WasmGraphBuildingInterface {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
NodeVector values(ret_count);
SsaEnv* internal_env = ssa_env_;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
auto stack_values = CopyStackValues(decoder, ret_count, drop_values);
@@ -475,7 +485,7 @@ class WasmGraphBuildingInterface {
: decoder->stack_value(ret_count + drop_values);
GetNodes(values.begin(), stack_base, ret_count);
}
- if (FLAG_trace_wasm) {
+ if (FLAG_trace_wasm && inlined_status_ == kRegularFunction) {
builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
builder_->Return(base::VectorOf(values));
@@ -487,7 +497,7 @@ class WasmGraphBuildingInterface {
DoReturn(decoder, drop_values);
} else {
Control* target = decoder->control_at(depth);
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
@@ -614,55 +624,127 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- enum CallMode { kCallDirect, kCallIndirect, kCallRef };
-
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck, nullptr,
- imm.sig, imm.index, args, returns);
+ DoCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args, returns);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck,
- Value{nullptr, kWasmBottom}, imm.sig, imm.index, args);
+ DoReturnCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args);
}
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallIndirect, imm.table_imm.index,
- CheckForNull::kWithoutNullCheck, index.node, imm.sig,
- imm.sig_imm.index, args, returns);
+ DoCall(
+ decoder,
+ CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
+ imm.sig, args, returns);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallIndirect, imm.table_imm.index,
- CheckForNull::kWithoutNullCheck, index, imm.sig,
- imm.sig_imm.index, args);
+ DoReturnCall(
+ decoder,
+ CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
+ imm.sig, args);
}
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoCall(decoder, kCallRef, 0, null_check, func_ref.node, sig, sig_index,
- args, returns);
+ if (!FLAG_wasm_inlining) {
+ DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args, returns);
+ return;
+ }
+
+ // Check for equality against a function at a specific index, and if
+ // successful, just emit a direct call.
+ // TODO(12166): For now, we check against function 0. Decide the index based
+ // on liftoff feedback.
+ const uint32_t expected_function_index = 0;
+
+ TFNode* success_control;
+ TFNode* failure_control;
+ builder_->CompareToExternalFunctionAtIndex(
+ func_ref.node, expected_function_index, &success_control,
+ &failure_control);
+ TFNode* initial_effect = effect();
+
+ builder_->SetControl(success_control);
+ ssa_env_->control = success_control;
+ Value* returns_direct =
+ decoder->zone()->NewArray<Value>(sig->return_count());
+ DoCall(decoder, CallInfo::CallDirect(expected_function_index),
+ decoder->module_->signature(sig_index), args, returns_direct);
+ TFNode* control_direct = control();
+ TFNode* effect_direct = effect();
+
+ builder_->SetEffectControl(initial_effect, failure_control);
+ ssa_env_->effect = initial_effect;
+ ssa_env_->control = failure_control;
+ Value* returns_ref = decoder->zone()->NewArray<Value>(sig->return_count());
+ DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args, returns_ref);
+
+ TFNode* control_ref = control();
+ TFNode* effect_ref = effect();
+
+ TFNode* control_args[] = {control_direct, control_ref};
+ TFNode* control = builder_->Merge(2, control_args);
+
+ TFNode* effect_args[] = {effect_direct, effect_ref, control};
+ TFNode* effect = builder_->EffectPhi(2, effect_args);
+
+ ssa_env_->control = control;
+ ssa_env_->effect = effect;
+ builder_->SetEffectControl(effect, control);
+
+ for (uint32_t i = 0; i < sig->return_count(); i++) {
+ TFNode* phi_args[] = {returns_direct[i].node, returns_ref[i].node,
+ control};
+ returns[i].node = builder_->Phi(sig->GetReturn(i), 2, phi_args);
+ }
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoReturnCall(decoder, kCallRef, 0, null_check, func_ref, sig, sig_index,
+ if (!FLAG_wasm_inlining) {
+ DoReturnCall(decoder,
+ CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args);
+ return;
+ }
+
+ // Check for equality against a function at a specific index, and if
+ // successful, just emit a direct call.
+ // TODO(12166): For now, we check against function 0. Decide the index based
+ // on liftoff feedback.
+ const uint32_t expected_function_index = 0;
+
+ TFNode* success_control;
+ TFNode* failure_control;
+ builder_->CompareToExternalFunctionAtIndex(
+ func_ref.node, expected_function_index, &success_control,
+ &failure_control);
+ TFNode* initial_effect = effect();
+
+ builder_->SetControl(success_control);
+ ssa_env_->control = success_control;
+ DoReturnCall(decoder, CallInfo::CallDirect(expected_function_index), sig,
+ args);
+
+ builder_->SetEffectControl(initial_effect, failure_control);
+ ssa_env_->effect = initial_effect;
+ ssa_env_->control = failure_control;
+ DoReturnCall(decoder,
+ CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)), sig,
args);
}
@@ -795,7 +877,7 @@ class WasmGraphBuildingInterface {
}
DCHECK(decoder->control_at(depth)->is_try());
TryInfo* target_try = decoder->control_at(depth)->try_info;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
ValueVector stack_values;
BuildNestedLoopExits(decoder, depth, true, stack_values,
&block->try_info->exception);
@@ -922,23 +1004,17 @@ class WasmGraphBuildingInterface {
void StructGet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field, bool is_signed,
Value* result) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
result->node = builder_->StructGet(
struct_object.node, field.struct_imm.struct_type, field.field_imm.index,
- null_check, is_signed, decoder->position());
+ NullCheckFor(struct_object.type), is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field,
const Value& field_value) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->StructSet(struct_object.node, field.struct_imm.struct_type,
- field.field_imm.index, field_value.node, null_check,
- decoder->position());
+ field.field_imm.index, field_value.node,
+ NullCheckFor(struct_object.type), decoder->position());
}
void ArrayNewWithRtt(FullDecoder* decoder,
@@ -967,43 +1043,40 @@ class WasmGraphBuildingInterface {
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
bool is_signed, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayGet(array_obj.node, imm.array_type, index.node,
- null_check, is_signed, decoder->position());
+ result->node = builder_->ArrayGet(array_obj.node, imm.array_type,
+ index.node, NullCheckFor(array_obj.type),
+ is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
const Value& value) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->ArraySet(array_obj.node, imm.array_type, index.node, value.node,
- null_check, decoder->position());
+ NullCheckFor(array_obj.type), decoder->position());
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayLen(array_obj.node, null_check, decoder->position());
+ result->node = builder_->ArrayLen(
+ array_obj.node, NullCheckFor(array_obj.type), decoder->position());
}
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
- builder_->ArrayCopy(dst.node, dst_index.node, src.node, src_index.node,
+ builder_->ArrayCopy(dst.node, dst_index.node, NullCheckFor(dst.type),
+ src.node, src_index.node, NullCheckFor(src.type),
length.node, decoder->position());
}
void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
- UNREACHABLE();
+ NodeVector element_nodes(elements.size());
+ for (uint32_t i = 0; i < elements.size(); i++) {
+ element_nodes[i] = elements[i].node;
+ }
+ result->node = builder_->ArrayInit(imm.index, imm.array_type, rtt.node,
+ VectorOf(element_nodes));
}
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
@@ -1177,6 +1250,7 @@ class WasmGraphBuildingInterface {
const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
+ InlinedStatus inlined_status_;
TFNode* effect() { return builder_->effect(); }
@@ -1188,6 +1262,14 @@ class WasmGraphBuildingInterface {
->try_info;
}
+ // Loop exits are only used during loop unrolling and are then removed, as
+ // they cannot be handled by later optimization stages. Since unrolling comes
+ // before inlining in the compilation pipeline, we should not emit loop exits
+ // in inlined functions. Also, we should not do so when unrolling is disabled.
+ bool emit_loop_exits() {
+ return FLAG_wasm_loop_unrolling && inlined_status_ == kRegularFunction;
+ }
+
void GetNodes(TFNode** nodes, Value* values, size_t count) {
for (size_t i = 0; i < count; ++i) {
nodes[i] = values[i].node;
@@ -1255,7 +1337,7 @@ class WasmGraphBuildingInterface {
exception_env->effect = if_exception;
SetEnv(exception_env);
TryInfo* try_info = current_try_info(decoder);
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
ValueVector values;
BuildNestedLoopExits(decoder, decoder->control_depth_of_current_catch(),
true, values, &if_exception);
@@ -1267,7 +1349,7 @@ class WasmGraphBuildingInterface {
} else {
DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
try_info->exception = builder_->CreateOrMergeIntoPhi(
- MachineRepresentation::kWord32, try_info->catch_env->control,
+ MachineRepresentation::kTaggedPointer, try_info->catch_env->control,
try_info->exception, if_exception);
}
@@ -1445,36 +1527,102 @@ class WasmGraphBuildingInterface {
return result;
}
- void DoCall(FullDecoder* decoder, CallMode call_mode, uint32_t table_index,
- CheckForNull null_check, TFNode* caller_node,
- const FunctionSig* sig, uint32_t sig_index, const Value args[],
- Value returns[]) {
+ class CallInfo {
+ public:
+ enum CallMode { kCallDirect, kCallIndirect, kCallRef };
+
+ static CallInfo CallDirect(uint32_t callee_index) {
+ return {kCallDirect, callee_index, nullptr, 0,
+ CheckForNull::kWithoutNullCheck};
+ }
+
+ static CallInfo CallIndirect(const Value& index_value, uint32_t table_index,
+ uint32_t sig_index) {
+ return {kCallIndirect, sig_index, &index_value, table_index,
+ CheckForNull::kWithoutNullCheck};
+ }
+
+ static CallInfo CallRef(const Value& funcref_value,
+ CheckForNull null_check) {
+ return {kCallRef, 0, &funcref_value, 0, null_check};
+ }
+
+ CallMode call_mode() { return call_mode_; }
+
+ uint32_t sig_index() {
+ DCHECK_EQ(call_mode_, kCallIndirect);
+ return callee_or_sig_index_;
+ }
+
+ uint32_t callee_index() {
+ DCHECK_EQ(call_mode_, kCallDirect);
+ return callee_or_sig_index_;
+ }
+
+ CheckForNull null_check() {
+ DCHECK_EQ(call_mode_, kCallRef);
+ return null_check_;
+ }
+
+ const Value* index_or_callee_value() {
+ DCHECK_NE(call_mode_, kCallDirect);
+ return index_or_callee_value_;
+ }
+
+ uint32_t table_index() {
+ DCHECK_EQ(call_mode_, kCallIndirect);
+ return table_index_;
+ }
+
+ private:
+ CallInfo(CallMode call_mode, uint32_t callee_or_sig_index,
+ const Value* index_or_callee_value, uint32_t table_index,
+ CheckForNull null_check)
+ : call_mode_(call_mode),
+ callee_or_sig_index_(callee_or_sig_index),
+ index_or_callee_value_(index_or_callee_value),
+ table_index_(table_index),
+ null_check_(null_check) {}
+ CallMode call_mode_;
+ uint32_t callee_or_sig_index_;
+ const Value* index_or_callee_value_;
+ uint32_t table_index_;
+ CheckForNull null_check_;
+ };
+
+ void DoCall(FullDecoder* decoder, CallInfo call_info, const FunctionSig* sig,
+ const Value args[], Value returns[]) {
size_t param_count = sig->parameter_count();
size_t return_count = sig->return_count();
NodeVector arg_nodes(param_count + 1);
base::SmallVector<TFNode*, 1> return_nodes(return_count);
- arg_nodes[0] = caller_node;
+ arg_nodes[0] = (call_info.call_mode() == CallInfo::kCallDirect)
+ ? nullptr
+ : call_info.index_or_callee_value()->node;
+
for (size_t i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
- switch (call_mode) {
- case kCallIndirect:
+ switch (call_info.call_mode()) {
+ case CallInfo::kCallIndirect:
CheckForException(
decoder, builder_->CallIndirect(
- table_index, sig_index, base::VectorOf(arg_nodes),
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes),
base::VectorOf(return_nodes), decoder->position()));
break;
- case kCallDirect:
+ case CallInfo::kCallDirect:
CheckForException(
- decoder, builder_->CallDirect(sig_index, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes),
- decoder->position()));
+ decoder, builder_->CallDirect(
+ call_info.callee_index(), base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes), decoder->position()));
break;
- case kCallRef:
+ case CallInfo::kCallRef:
CheckForException(
- decoder, builder_->CallRef(sig_index, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes), null_check,
- decoder->position()));
+ decoder,
+ builder_->CallRef(sig, base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes),
+ call_info.null_check(), decoder->position()));
break;
}
for (size_t i = 0; i < return_count; ++i) {
@@ -1485,18 +1633,23 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- void DoReturnCall(FullDecoder* decoder, CallMode call_mode,
- uint32_t table_index, CheckForNull null_check,
- Value index_or_caller_value, const FunctionSig* sig,
- uint32_t sig_index, const Value args[]) {
+ void DoReturnCall(FullDecoder* decoder, CallInfo call_info,
+ const FunctionSig* sig, const Value args[]) {
size_t arg_count = sig->parameter_count();
ValueVector arg_values(arg_count + 1);
- arg_values[0] = index_or_caller_value;
- for (uint32_t i = 0; i < arg_count; i++) {
- arg_values[i + 1] = args[i];
+ if (call_info.call_mode() == CallInfo::kCallDirect) {
+ arg_values[0].node = nullptr;
+ } else {
+ arg_values[0] = *call_info.index_or_callee_value();
+ // This is not done by copy assignment.
+ arg_values[0].node = call_info.index_or_callee_value()->node;
+ }
+ if (arg_count > 0) {
+ std::memcpy(arg_values.data() + 1, args, arg_count * sizeof(Value));
}
- if (FLAG_wasm_loop_unrolling) {
+
+ if (emit_loop_exits()) {
BuildNestedLoopExits(decoder, decoder->control_depth(), false,
arg_values);
}
@@ -1504,22 +1657,24 @@ class WasmGraphBuildingInterface {
NodeVector arg_nodes(arg_count + 1);
GetNodes(arg_nodes.data(), base::VectorOf(arg_values));
- switch (call_mode) {
- case kCallIndirect:
- CheckForException(
- decoder, builder_->ReturnCallIndirect(table_index, sig_index,
- base::VectorOf(arg_nodes),
- decoder->position()));
+ switch (call_info.call_mode()) {
+ case CallInfo::kCallIndirect:
+ CheckForException(decoder,
+ builder_->ReturnCallIndirect(
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes), decoder->position()));
break;
- case kCallDirect:
- CheckForException(
- decoder, builder_->ReturnCall(sig_index, base::VectorOf(arg_nodes),
- decoder->position()));
+ case CallInfo::kCallDirect:
+ CheckForException(decoder,
+ builder_->ReturnCall(call_info.callee_index(),
+ base::VectorOf(arg_nodes),
+ decoder->position()));
break;
- case kCallRef:
- CheckForException(decoder, builder_->ReturnCallRef(
- sig_index, base::VectorOf(arg_nodes),
- null_check, decoder->position()));
+ case CallInfo::kCallRef:
+ CheckForException(
+ decoder, builder_->ReturnCallRef(sig, base::VectorOf(arg_nodes),
+ call_info.null_check(),
+ decoder->position()));
break;
}
}
@@ -1547,7 +1702,6 @@ class WasmGraphBuildingInterface {
WRAP_CACHE_FIELD(mem_start);
WRAP_CACHE_FIELD(mem_size);
- WRAP_CACHE_FIELD(mem_mask);
#undef WRAP_CACHE_FIELD
}
}
@@ -1555,7 +1709,7 @@ class WasmGraphBuildingInterface {
void BuildNestedLoopExits(FullDecoder* decoder, uint32_t depth_limit,
bool wrap_exit_values, ValueVector& stack_values,
TFNode** exception_value = nullptr) {
- DCHECK(FLAG_wasm_loop_unrolling);
+ DCHECK(emit_loop_exits());
Control* control = nullptr;
// We are only interested in exits from the innermost loop.
for (uint32_t i = 0; i < depth_limit; i++) {
@@ -1584,7 +1738,7 @@ class WasmGraphBuildingInterface {
}
void TerminateThrow(FullDecoder* decoder) {
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
@@ -1597,6 +1751,12 @@ class WasmGraphBuildingInterface {
builder_->TerminateThrow(effect(), control());
}
}
+
+ CheckForNull NullCheckFor(ValueType type) {
+ DCHECK(type.is_object_reference());
+ return type.is_nullable() ? CheckForNull::kWithNullCheck
+ : CheckForNull::kWithoutNullCheck;
+ }
};
} // namespace
@@ -1607,10 +1767,11 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins,
- int func_index) {
+ int func_index, InlinedStatus inlined_status) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder, func_index);
+ &zone, module, enabled, detected, body, builder, func_index,
+ inlined_status);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
@@ -1618,7 +1779,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
if (node_origins) {
builder->RemoveBytecodePositionDecorator();
}
- if (FLAG_wasm_loop_unrolling) {
+ if (FLAG_wasm_loop_unrolling && inlined_status == kRegularFunction) {
*loop_infos = decoder.interface().loop_infos();
}
return decoder.toResult(nullptr);
diff --git a/chromium/v8/src/wasm/graph-builder-interface.h b/chromium/v8/src/wasm/graph-builder-interface.h
index 6c668e2b0a0..49d9dd353cb 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.h
+++ b/chromium/v8/src/wasm/graph-builder-interface.h
@@ -27,12 +27,15 @@ struct FunctionBody;
class WasmFeatures;
struct WasmModule;
+enum InlinedStatus { kInlinedFunction, kRegularFunction };
+
V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins, int func_index);
+ compiler::NodeOriginTable* node_origins, int func_index,
+ InlinedStatus inlined_status);
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/init-expr-interface.cc b/chromium/v8/src/wasm/init-expr-interface.cc
index 52c45bd18b7..818145d0954 100644
--- a/chromium/v8/src/wasm/init-expr-interface.cc
+++ b/chromium/v8/src/wasm/init-expr-interface.cc
@@ -89,6 +89,48 @@ void InitExprInterface::StructNewWithRtt(
ValueType::Ref(HeapType(imm.index), kNonNullable));
}
+namespace {
+WasmValue DefaultValueForType(ValueType type, Isolate* isolate) {
+ switch (type.kind()) {
+ case kI32:
+ case kI8:
+ case kI16:
+ return WasmValue(0);
+ case kI64:
+ return WasmValue(int64_t{0});
+ case kF32:
+ return WasmValue(0.0f);
+ case kF64:
+ return WasmValue(0.0);
+ case kS128:
+ return WasmValue(Simd128());
+ case kOptRef:
+ return WasmValue(isolate->factory()->null_value(), type);
+ case kVoid:
+ case kRtt:
+ case kRttWithDepth:
+ case kRef:
+ case kBottom:
+ UNREACHABLE();
+ }
+}
+} // namespace
+
+void InitExprInterface::StructNewDefault(
+ FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
+ const Value& rtt, Value* result) {
+ if (isolate_ == nullptr) return;
+ std::vector<WasmValue> field_values(imm.struct_type->field_count());
+ for (uint32_t i = 0; i < field_values.size(); i++) {
+ field_values[i] = DefaultValueForType(imm.struct_type->field(i), isolate_);
+ }
+ result->runtime_value =
+ WasmValue(isolate_->factory()->NewWasmStruct(
+ imm.struct_type, field_values.data(),
+ Handle<Map>::cast(rtt.runtime_value.to_ref())),
+ ValueType::Ref(HeapType(imm.index), kNonNullable));
+}
+
void InitExprInterface::ArrayInit(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements,
diff --git a/chromium/v8/src/wasm/jump-table-assembler.cc b/chromium/v8/src/wasm/jump-table-assembler.cc
index db2514791bc..4dc808fe33e 100644
--- a/chromium/v8/src/wasm/jump-table-assembler.cc
+++ b/chromium/v8/src/wasm/jump-table-assembler.cc
@@ -268,6 +268,36 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+#elif V8_TARGET_ARCH_LOONG64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ DCHECK(is_int32(func_index));
+ int start = pc_offset();
+ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr
+ // Jump produces max 4 instructions.
+ Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
+}
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ PatchAndJump(target);
+ return true;
+}
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
diff --git a/chromium/v8/src/wasm/jump-table-assembler.h b/chromium/v8/src/wasm/jump-table-assembler.h
index 3963de9824a..433608decba 100644
--- a/chromium/v8/src/wasm/jump-table-assembler.h
+++ b/chromium/v8/src/wasm/jump-table-assembler.h
@@ -224,6 +224,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kJumpTableLineSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
#else
#error Unknown architecture.
#endif
diff --git a/chromium/v8/src/wasm/memory-protection-key.cc b/chromium/v8/src/wasm/memory-protection-key.cc
index 441826e7075..c3e844ff1c4 100644
--- a/chromium/v8/src/wasm/memory-protection-key.cc
+++ b/chromium/v8/src/wasm/memory-protection-key.cc
@@ -166,7 +166,7 @@ bool SetPermissionsAndMemoryProtectionKey(
DISABLE_CFI_ICALL
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions) {
- CHECK_NE(kNoMemoryProtectionKey, key);
+ DCHECK_NE(kNoMemoryProtectionKey, key);
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
typedef int (*pkey_set_t)(int, unsigned int);
@@ -177,8 +177,27 @@ void SetPermissionsForMemoryProtectionKey(
int ret = pkey_set(key, permissions);
CHECK_EQ(0 /* success */, ret);
#else
- // On platforms without PKU support, we should have failed the CHECK above
- // because the key must be {kNoMemoryProtectionKey}.
+ // On platforms without PKU support, this method cannot be called because
+ // no protection key can have been allocated.
+ UNREACHABLE();
+#endif
+}
+
+DISABLE_CFI_ICALL
+bool MemoryProtectionKeyWritable(int key) {
+ DCHECK_NE(kNoMemoryProtectionKey, key);
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_get_t)(int);
+ static auto* pkey_get = bit_cast<pkey_get_t>(dlsym(RTLD_DEFAULT, "pkey_get"));
+ // If a valid key was allocated, {pkey_get()} must also be available.
+ DCHECK_NOT_NULL(pkey_get);
+
+ int permissions = pkey_get(key);
+ return permissions == kNoRestrictions;
+#else
+ // On platforms without PKU support, this method cannot be called because
+ // no protection key can have been allocated.
UNREACHABLE();
#endif
}
diff --git a/chromium/v8/src/wasm/memory-protection-key.h b/chromium/v8/src/wasm/memory-protection-key.h
index c4353575679..7a9ba721941 100644
--- a/chromium/v8/src/wasm/memory-protection-key.h
+++ b/chromium/v8/src/wasm/memory-protection-key.h
@@ -82,6 +82,10 @@ bool SetPermissionsAndMemoryProtectionKey(
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions);
+// Returns {true} if the protection key {key} is write-enabled for the current
+// thread.
+bool MemoryProtectionKeyWritable(int key);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index 303270059de..2611c2d9e9c 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -16,6 +16,7 @@
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
@@ -528,7 +529,8 @@ bool CompilationUnitQueues::Queue::ShouldPublish(
class CompilationStateImpl {
public:
CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters);
+ std::shared_ptr<Counters> async_counters,
+ DynamicTiering dynamic_tiering);
~CompilationStateImpl() {
if (compile_job_->IsValid()) compile_job_->CancelAndDetach();
}
@@ -637,6 +639,8 @@ class CompilationStateImpl {
return outstanding_recompilation_functions_ == 0;
}
+ DynamicTiering dynamic_tiering() const { return dynamic_tiering_; }
+
Counters* counters() const { return async_counters_.get(); }
void SetWireBytesStorage(
@@ -662,7 +666,7 @@ class CompilationStateImpl {
private:
uint8_t SetupCompilationProgressForFunction(
- bool lazy_module, const WasmModule* module,
+ bool lazy_module, NativeModule* module,
const WasmFeatures& enabled_features, int func_index);
// Returns the potentially-updated {function_progress}.
@@ -701,6 +705,10 @@ class CompilationStateImpl {
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
+ // Cache the dynamic tiering configuration to be consistent for the whole
+ // compilation.
+ const DynamicTiering dynamic_tiering_;
+
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@@ -745,6 +753,9 @@ class CompilationStateImpl {
int outstanding_baseline_units_ = 0;
int outstanding_export_wrappers_ = 0;
int outstanding_top_tier_functions_ = 0;
+ // The amount of generated top tier code since the last
+ // {kFinishedCompilationChunk} event.
+ size_t bytes_since_last_chunk_ = 0;
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
@@ -860,13 +871,17 @@ void CompilationState::set_compilation_id(int compilation_id) {
Impl(this)->set_compilation_id(compilation_id);
}
+DynamicTiering CompilationState::dynamic_tiering() const {
+ return Impl(this)->dynamic_tiering();
+}
+
// static
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters) {
- return std::unique_ptr<CompilationState>(
- reinterpret_cast<CompilationState*>(new CompilationStateImpl(
- std::move(native_module), std::move(async_counters))));
+ std::shared_ptr<Counters> async_counters, DynamicTiering dynamic_tiering) {
+ return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
+ new CompilationStateImpl(std::move(native_module),
+ std::move(async_counters), dynamic_tiering)));
}
// End of PIMPL implementation of {CompilationState}.
@@ -926,13 +941,18 @@ struct ExecutionTierPair {
};
ExecutionTierPair GetRequestedExecutionTiers(
- const WasmModule* module, const WasmFeatures& enabled_features,
+ NativeModule* native_module, const WasmFeatures& enabled_features,
uint32_t func_index) {
+ const WasmModule* module = native_module->module();
ExecutionTierPair result;
result.baseline_tier = WasmCompilationUnit::GetBaselineExecutionTier(module);
- if (module->origin != kWasmOrigin || !FLAG_wasm_tier_up) {
+ bool dynamic_tiering =
+ Impl(native_module->compilation_state())->dynamic_tiering() ==
+ DynamicTiering::kEnabled;
+ bool tier_up_enabled = !dynamic_tiering && FLAG_wasm_tier_up;
+ if (module->origin != kWasmOrigin || !tier_up_enabled) {
result.top_tier = result.baseline_tier;
return result;
}
@@ -975,8 +995,7 @@ class CompilationUnitBuilder {
return;
}
ExecutionTierPair tiers = GetRequestedExecutionTiers(
- native_module_->module(), native_module_->enabled_features(),
- func_index);
+ native_module_, native_module_->enabled_features(), func_index);
// Compile everything for non-debugging initially. If needed, we will tier
// down when the module is fully compiled. Synchronization would be pretty
// difficult otherwise.
@@ -1141,7 +1160,7 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
ExecutionTierPair tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
+ GetRequestedExecutionTiers(native_module, enabled_features, func_index);
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
@@ -1530,13 +1549,13 @@ class CompilationTimeCallback {
native_module_(std::move(native_module)),
compile_mode_(compile_mode) {}
- void operator()(CompilationEvent event) {
+ void operator()(CompilationEvent compilation_event) {
DCHECK(base::TimeTicks::IsHighResolution());
std::shared_ptr<NativeModule> native_module = native_module_.lock();
if (!native_module) return;
auto now = base::TimeTicks::Now();
auto duration = now - start_time_;
- if (event == CompilationEvent::kFinishedBaselineCompilation) {
+ if (compilation_event == CompilationEvent::kFinishedBaselineCompilation) {
// Reset {start_time_} to measure tier-up time.
start_time_ = now;
if (compile_mode_ != kSynchronous) {
@@ -1561,7 +1580,7 @@ class CompilationTimeCallback {
native_module->baseline_compilation_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
- if (event == CompilationEvent::kFinishedTopTierCompilation) {
+ if (compilation_event == CompilationEvent::kFinishedTopTierCompilation) {
TimedHistogram* histogram = async_counters_->wasm_tier_up_module_time();
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
@@ -1573,7 +1592,7 @@ class CompilationTimeCallback {
native_module->tier_up_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
- if (event == CompilationEvent::kFailedCompilation) {
+ if (compilation_event == CompilationEvent::kFailedCompilation) {
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -1646,12 +1665,8 @@ void CompileNativeModule(Isolate* isolate,
return;
}
- if (!FLAG_predictable) {
- // For predictable mode, do not finalize wrappers yet to make sure we catch
- // validation errors first.
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
- }
+ compilation_state->FinalizeJSToWasmWrappers(isolate, native_module->module(),
+ export_wrappers_out);
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedBaselineCompilation);
@@ -1663,9 +1678,6 @@ void CompileNativeModule(Isolate* isolate,
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
isolate->allocator(), thrower, lazy_module);
CHECK(thrower->error());
- } else if (FLAG_predictable) {
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
}
}
@@ -2101,8 +2113,12 @@ class AsyncCompileJob::CompilationStateCallback {
: nullptr);
}
break;
+ case CompilationEvent::kFinishedCompilationChunk:
+ DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_ ||
+ CompilationEvent::kFinishedCompilationChunk == last_event_);
+ break;
case CompilationEvent::kFinishedTopTierCompilation:
- DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
+ DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_);
// At this point, the job will already be gone, thus do not access it
// here.
break;
@@ -2828,11 +2844,12 @@ bool AsyncStreamingProcessor::Deserialize(
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters, DynamicTiering dynamic_tiering)
: native_module_(native_module.get()),
native_module_weak_(std::move(native_module)),
async_counters_(std::move(async_counters)),
- compilation_unit_queues_(native_module->num_functions()) {}
+ compilation_unit_queues_(native_module->num_functions()),
+ dynamic_tiering_(dynamic_tiering) {}
void CompilationStateImpl::InitCompileJob() {
DCHECK_NULL(compile_job_);
@@ -2865,12 +2882,12 @@ bool CompilationStateImpl::cancelled() const {
}
uint8_t CompilationStateImpl::SetupCompilationProgressForFunction(
- bool lazy_module, const WasmModule* module,
+ bool lazy_module, NativeModule* native_module,
const WasmFeatures& enabled_features, int func_index) {
ExecutionTierPair requested_tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
- CompileStrategy strategy =
- GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+ GetRequestedExecutionTiers(native_module, enabled_features, func_index);
+ CompileStrategy strategy = GetCompileStrategy(
+ native_module->module(), enabled_features, func_index, lazy_module);
bool required_for_baseline = strategy == CompileStrategy::kEager;
bool required_for_top_tier = strategy != CompileStrategy::kLazy;
@@ -2923,7 +2940,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
continue;
}
uint8_t function_progress = SetupCompilationProgressForFunction(
- lazy_module, module, enabled_features, func_index);
+ lazy_module, native_module_, enabled_features, func_index);
compilation_progress_.push_back(function_progress);
}
DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
@@ -3057,7 +3074,7 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
native_module_->UseLazyStub(func_index);
}
compilation_progress_[declared_function_index(module, func_index)] =
- SetupCompilationProgressForFunction(lazy_module, module,
+ SetupCompilationProgressForFunction(lazy_module, native_module_,
enabled_features, func_index);
}
}
@@ -3197,6 +3214,10 @@ void CompilationStateImpl::CommitTopTierCompilationUnit(
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
WasmCompilationUnit unit, size_t priority) {
compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
+ {
+ base::MutexGuard guard(&callbacks_mutex_);
+ outstanding_top_tier_functions_++;
+ }
compile_job_->NotifyConcurrencyIncrease();
}
@@ -3309,6 +3330,9 @@ void CompilationStateImpl::OnFinishedUnits(
DCHECK_GT(outstanding_baseline_units_, 0);
outstanding_baseline_units_--;
}
+ if (code->tier() == ExecutionTier::kTurbofan) {
+ bytes_since_last_chunk_ += code->instructions().size();
+ }
if (reached_tier < required_top_tier &&
required_top_tier <= code->tier()) {
DCHECK_GT(outstanding_top_tier_functions_, 0);
@@ -3362,12 +3386,19 @@ void CompilationStateImpl::TriggerCallbacks(
triggered_events.Add(CompilationEvent::kFinishedExportWrappers);
if (outstanding_baseline_units_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation);
- if (outstanding_top_tier_functions_ == 0) {
+ if (dynamic_tiering_ == DynamicTiering::kDisabled &&
+ outstanding_top_tier_functions_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedTopTierCompilation);
}
}
}
+ if (dynamic_tiering_ == DynamicTiering::kEnabled &&
+ static_cast<size_t>(FLAG_wasm_caching_threshold) <
+ bytes_since_last_chunk_) {
+ triggered_events.Add(CompilationEvent::kFinishedCompilationChunk);
+ bytes_since_last_chunk_ = 0;
+ }
if (compile_failed_.load(std::memory_order_relaxed)) {
// *Only* trigger the "failed" event.
triggered_events =
@@ -3378,9 +3409,11 @@ void CompilationStateImpl::TriggerCallbacks(
// Don't trigger past events again.
triggered_events -= finished_events_;
- // Recompilation can happen multiple times, thus do not store this.
- finished_events_ |=
- triggered_events - CompilationEvent::kFinishedRecompilation;
+ // Recompilation can happen multiple times, thus do not store this. There can
+ // also be multiple compilation chunks.
+ finished_events_ |= triggered_events -
+ CompilationEvent::kFinishedRecompilation -
+ CompilationEvent::kFinishedCompilationChunk;
for (auto event :
{std::make_pair(CompilationEvent::kFailedCompilation,
@@ -3391,6 +3424,8 @@ void CompilationStateImpl::TriggerCallbacks(
"wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
"wasm.TopTierFinished"),
+ std::make_pair(CompilationEvent::kFinishedCompilationChunk,
+ "wasm.CompilationChunkFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation,
"wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
@@ -3401,7 +3436,11 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
- if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
+ // With dynamic tiering, we don't know if we can ever delete the callback.
+ // TODO(https://crbug.com/v8/12289): Release some callbacks also when dynamic
+ // tiering is enabled.
+ if (dynamic_tiering_ == DynamicTiering::kDisabled &&
+ outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
outstanding_top_tier_functions_ == 0 &&
outstanding_recompilation_functions_ == 0) {
// Clear the callbacks because no more events will be delivered.
@@ -3665,13 +3704,17 @@ WasmCode* CompileImportWrapper(
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, source_positions, expected_arity);
- std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
- result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), GetCodeKind(result),
- ExecutionTier::kNone, kNoDebugging);
- WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ WasmCode* published_code;
+ {
+ CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
+ ExecutionTier::kNone, kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
(*cache_scope)[key] = published_code;
published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h
index e8bd2597bc6..16ac753547c 100644
--- a/chromium/v8/src/wasm/module-compiler.h
+++ b/chromium/v8/src/wasm/module-compiler.h
@@ -44,9 +44,11 @@ class CompilationResultResolver;
class ErrorThrower;
class ModuleCompiler;
class NativeModule;
+class StreamingDecoder;
class WasmCode;
struct WasmModule;
+V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index b014f8a8c7f..8129882ce89 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -550,34 +550,40 @@ class ModuleDecoderImpl : public Decoder {
}
void DecodeTypeSection() {
- uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
- module_->types.reserve(signatures_count);
- for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
+ uint32_t types_count = consume_count("types count", kV8MaxWasmTypes);
+ module_->types.reserve(types_count);
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
uint8_t kind = consume_u8("type kind");
switch (kind) {
- case kWasmFunctionTypeCode: {
+ case kWasmFunctionTypeCode:
+ case kWasmFunctionSubtypeCode: {
const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->add_signature(s);
- break;
- }
- case kWasmFunctionExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
- errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
- break;
- }
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->add_signature(s);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_signature(super_index)) {
- errorf(pc(), "invalid function supertype index: %d", super_index);
- break;
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmFunctionSubtypeCode) {
+ if (!enabled_features_.has_gc()) {
+ errorf(pc(),
+ "invalid function type definition, enable with "
+ "--experimental-wasm-gc");
+ break;
+ }
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kFunc) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
}
+ module_->add_signature(s, super_index);
break;
}
- case kWasmStructTypeCode: {
+ case kWasmStructTypeCode:
+ case kWasmStructSubtypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
"invalid struct type definition, enable with "
@@ -585,27 +591,26 @@ class ModuleDecoderImpl : public Decoder {
break;
}
const StructType* s = consume_struct(module_->signature_zone.get());
- module_->add_struct_type(s);
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmStructSubtypeCode) {
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kData) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
+ }
+ module_->add_struct_type(s, super_index);
// TODO(7748): Should we canonicalize struct types, like
// {signature_map} does for function signatures?
break;
}
- case kWasmStructExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
- errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
- break;
- }
- const StructType* s = consume_struct(module_->signature_zone.get());
- module_->add_struct_type(s);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_struct(super_index)) {
- errorf(pc(), "invalid struct supertype: %d", super_index);
- break;
- }
- break;
- }
- case kWasmArrayTypeCode: {
+ case kWasmArrayTypeCode:
+ case kWasmArraySubtypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
"invalid array type definition, enable with "
@@ -613,22 +618,20 @@ class ModuleDecoderImpl : public Decoder {
break;
}
const ArrayType* type = consume_array(module_->signature_zone.get());
- module_->add_array_type(type);
- break;
- }
- case kWasmArrayExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
- errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
- break;
- }
- const ArrayType* type = consume_array(module_->signature_zone.get());
- module_->add_array_type(type);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_array(super_index)) {
- errorf(pc(), "invalid array supertype: %d", super_index);
- break;
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmArraySubtypeCode) {
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kData) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
}
+ module_->add_array_type(type, super_index);
break;
}
default:
@@ -636,6 +639,46 @@ class ModuleDecoderImpl : public Decoder {
break;
}
}
+ // Check validity of explicitly defined supertypes.
+ const WasmModule* module = module_.get();
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ uint32_t explicit_super = module_->supertype(i);
+ if (explicit_super == kNoSuperType) continue;
+ if (explicit_super == kGenericSuperType) continue;
+ DCHECK_LT(explicit_super, types_count); // {consume_super_type} checks.
+ // Only types that have an explicit supertype themselves can be explicit
+ // supertypes of other types.
+ if (!module->has_supertype(explicit_super)) {
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ continue;
+ }
+ int depth = GetSubtypingDepth(module, i);
+ if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) {
+ errorf("type %d: subtyping depth is greater than allowed", i);
+ continue;
+ }
+ if (depth == -1) {
+ errorf("type %d: cyclic inheritance", i);
+ continue;
+ }
+ switch (module_->type_kinds[i]) {
+ case kWasmStructTypeCode:
+ if (!module->has_struct(explicit_super)) break;
+ if (!StructIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ case kWasmArrayTypeCode:
+ if (!module->has_array(explicit_super)) break;
+ if (!ArrayIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ case kWasmFunctionTypeCode:
+ if (!module->has_signature(explicit_super)) break;
+ if (!FunctionIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ default:
+ UNREACHABLE();
+ }
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ }
module_->signature_map.Freeze();
}
@@ -1106,7 +1149,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
- if (name_type == NameSectionKindCode::kModule) {
+ if (name_type == NameSectionKindCode::kModuleCode) {
WireBytesRef name = consume_string(&inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) {
module_->name = name;
@@ -1784,6 +1827,15 @@ class ModuleDecoderImpl : public Decoder {
return result;
}
+ HeapType consume_super_type() {
+ uint32_t type_length;
+ HeapType result = value_type_reader::read_heap_type<kFullValidation>(
+ this, this->pc(), &type_length, module_.get(),
+ origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
+ consume_bytes(type_length, "supertype");
+ return result;
+ }
+
ValueType consume_storage_type() {
uint8_t opcode = read_u8<kFullValidation>(this->pc());
switch (opcode) {
@@ -2360,7 +2412,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
uint32_t name_payload_len = decoder.consume_u32v("name payload length");
if (!decoder.checkAvailable(name_payload_len)) break;
- if (name_type != NameSectionKindCode::kFunction) {
+ if (name_type != NameSectionKindCode::kFunctionCode) {
decoder.consume_bytes(name_payload_len, "name subsection payload");
continue;
}
diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc
index f56ab55cd7a..4eb13352d83 100644
--- a/chromium/v8/src/wasm/module-instantiate.cc
+++ b/chromium/v8/src/wasm/module-instantiate.cc
@@ -65,9 +65,10 @@ class CompileImportWrapperJob final : public JobTask {
}
void Run(JobDelegate* delegate) override {
- CodeSpaceWriteScope code_space_write_scope(native_module_);
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
+ // TODO(wasm): Batch code publishing, to avoid repeated locking and
+ // permission switching.
CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
key->expected_arity, cache_scope_);
if (delegate->ShouldYield()) return;
@@ -162,6 +163,7 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmStruct::EncodeInstanceSizeInMap(real_instance_size, *map);
return map;
}
@@ -187,9 +189,46 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmArray::EncodeElementSizeInMap(type->element_type().element_size_bytes(),
+ *map);
return map;
}
+void CreateMapForType(Isolate* isolate, const WasmModule* module,
+ int type_index, Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> maps) {
+ // Recursive calls for supertypes may already have created this map.
+ if (maps->get(type_index).IsMap()) return;
+ Handle<Map> rtt_parent;
+ // If the type with {type_index} has an explicit supertype, make sure the
+ // map for that supertype is created first, so that the supertypes list
+ // that's cached on every RTT can be set up correctly.
+ uint32_t supertype = module->supertype(type_index);
+ if (supertype != kNoSuperType && supertype != kGenericSuperType) {
+ // This recursion is safe, because kV8MaxRttSubtypingDepth limits the
+ // number of recursive steps, so we won't overflow the stack.
+ CreateMapForType(isolate, module, supertype, instance, maps);
+ rtt_parent = handle(Map::cast(maps->get(supertype)), isolate);
+ }
+ Handle<Map> map;
+ switch (module->type_kinds[type_index]) {
+ case kWasmStructTypeCode:
+ map = CreateStructMap(isolate, module, type_index, rtt_parent, instance);
+ break;
+ case kWasmArrayTypeCode:
+ map = CreateArrayMap(isolate, module, type_index, rtt_parent, instance);
+ break;
+ case kWasmFunctionTypeCode:
+ // TODO(7748): Think about canonicalizing rtts to make them work for
+ // identical function types.
+ map = Map::Copy(isolate, isolate->wasm_exported_function_map(),
+ "fresh function map for function type canonical rtt "
+ "initialization");
+ break;
+ }
+ maps->set(type_index, *map);
+}
+
namespace {
// TODO(7748): Consider storing this array in Maps'
@@ -614,9 +653,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
for (int i = module_->num_imported_tables; i < table_count; i++) {
const WasmTable& table = module_->tables[i];
+ // Initialize tables with null for now. We will initialize non-defaultable
+ // tables later, in {InitializeIndirectFunctionTables}.
Handle<WasmTableObject> table_obj = WasmTableObject::New(
isolate_, instance, table.type, table.initial_size,
- table.has_maximum_size, table.maximum_size, nullptr);
+ table.has_maximum_size, table.maximum_size, nullptr,
+ isolate_->factory()->null_value());
tables->set(i, *table_obj);
}
instance->set_tables(*tables);
@@ -657,28 +699,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (enabled_.has_gc()) {
Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
static_cast<int>(module_->type_kinds.size()));
- for (int map_index = 0;
- map_index < static_cast<int>(module_->type_kinds.size());
- map_index++) {
- Handle<Map> map;
- switch (module_->type_kinds[map_index]) {
- case kWasmStructTypeCode:
- map = CreateStructMap(isolate_, module_, map_index, Handle<Map>(),
- instance);
- break;
- case kWasmArrayTypeCode:
- map = CreateArrayMap(isolate_, module_, map_index, Handle<Map>(),
- instance);
- break;
- case kWasmFunctionTypeCode:
- // TODO(7748): Think about canonicalizing rtts to make them work for
- // identical function types.
- map = Map::Copy(isolate_, isolate_->wasm_exported_function_map(),
- "fresh function map for function type canonical rtt "
- "initialization");
- break;
- }
- maps->set(map_index, *map);
+ for (uint32_t index = 0; index < module_->type_kinds.size(); index++) {
+ CreateMapForType(isolate_, module_, index, instance, maps);
}
instance->set_managed_object_maps(*maps);
}
@@ -826,6 +848,39 @@ MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
return result;
}
+namespace {
+bool HasDefaultToNumberBehaviour(Isolate* isolate,
+ Handle<JSFunction> function) {
+ // Disallow providing a [Symbol.toPrimitive] member.
+ LookupIterator to_primitive_it{isolate, function,
+ isolate->factory()->to_primitive_symbol()};
+ if (to_primitive_it.state() != LookupIterator::NOT_FOUND) return false;
+
+ // The {valueOf} member must be the default "ObjectPrototypeValueOf".
+ LookupIterator value_of_it{isolate, function,
+ isolate->factory()->valueOf_string()};
+ if (value_of_it.state() != LookupIterator::DATA) return false;
+ Handle<Object> value_of = value_of_it.GetDataValue();
+ if (!value_of->IsJSFunction()) return false;
+ Builtin value_of_builtin_id =
+ Handle<JSFunction>::cast(value_of)->code().builtin_id();
+ if (value_of_builtin_id != Builtin::kObjectPrototypeValueOf) return false;
+
+ // The {toString} member must be the default "FunctionPrototypeToString".
+ LookupIterator to_string_it{isolate, function,
+ isolate->factory()->toString_string()};
+ if (to_string_it.state() != LookupIterator::DATA) return false;
+ Handle<Object> to_string = to_string_it.GetDataValue();
+ if (!to_string->IsJSFunction()) return false;
+ Builtin to_string_builtin_id =
+ Handle<JSFunction>::cast(to_string)->code().builtin_id();
+ if (to_string_builtin_id != Builtin::kFunctionPrototypeToString) return false;
+
+ // Just a default function, which will convert to "Nan". Accept this.
+ return true;
+}
+} // namespace
+
// Look up an import value in the {ffi_} object specifically for linking an
// asm.js module. This only performs non-observable lookups, which allows
// falling back to JavaScript proper (and hence re-executing all lookups) if
@@ -840,7 +895,6 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
// Perform lookup of the given {import_name} without causing any observable
// side-effect. We only accept accesses that resolve to data properties,
// which is indicated by the asm.js spec in section 7 ("Linking") as well.
- Handle<Object> result;
PropertyKey key(isolate_, Handle<Name>::cast(import_name));
LookupIterator it(isolate_, ffi_.ToHandleChecked(), key);
switch (it.state()) {
@@ -854,14 +908,23 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
case LookupIterator::NOT_FOUND:
// Accepting missing properties as undefined does not cause any
// observable difference from JavaScript semantics, we are lenient.
- result = isolate_->factory()->undefined_value();
- break;
- case LookupIterator::DATA:
- result = it.GetDataValue();
- break;
+ return isolate_->factory()->undefined_value();
+ case LookupIterator::DATA: {
+ Handle<Object> value = it.GetDataValue();
+ // For legacy reasons, we accept functions for imported globals (see
+ // {ProcessImportedGlobal}), but only if we can easily determine that
+ // their Number-conversion is side effect free and returns NaN (which is
+ // the case as long as "valueOf" (or others) are not overwritten).
+ if (value->IsJSFunction() &&
+ module_->import_table[index].kind == kExternalGlobal &&
+ !HasDefaultToNumberBehaviour(isolate_,
+ Handle<JSFunction>::cast(value))) {
+ return ReportLinkError("function has special ToNumber behaviour", index,
+ import_name);
+ }
+ return value;
+ }
}
-
- return result;
}
// Load data segments into the memory.
@@ -1035,7 +1098,8 @@ bool InstanceBuilder::ProcessImportedFunction(
if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
NativeModule* native_module = instance->module_object().native_module();
@@ -1336,9 +1400,9 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::zero()) here is what using the observable
- // conversion via {ToPrimitive} would produce as well.
- // TODO(wasm): Still observable if Function.prototype.valueOf or friends
- // are patched, we might need to check for that as well.
+ // conversion via {ToPrimitive} would produce as well. {LookupImportAsm}
+ // checked via {HasDefaultToNumberBehaviour} that "valueOf" or friends have
+ // not been patched.
if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
if (value->IsPrimitive()) {
MaybeHandle<Object> converted = global.type == kWasmI32
@@ -1439,7 +1503,8 @@ void InstanceBuilder::CompileImportWrappers(
compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity);
diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc
index 22bc7d259a5..c332f3f94af 100644
--- a/chromium/v8/src/wasm/streaming-decoder.cc
+++ b/chromium/v8/src/wasm/streaming-decoder.cc
@@ -35,7 +35,7 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(base::Vector<const uint8_t> bytes) override;
- void Finish() override;
+ void Finish(bool can_use_compiled_module) override;
void Abort() override;
@@ -258,7 +258,7 @@ size_t AsyncStreamingDecoder::DecodingState::ReadBytes(
return num_bytes;
}
-void AsyncStreamingDecoder::Finish() {
+void AsyncStreamingDecoder::Finish(bool can_use_compiled_module) {
TRACE_STREAMING("Finish\n");
DCHECK(!stream_finished_);
stream_finished_ = true;
@@ -268,9 +268,12 @@ void AsyncStreamingDecoder::Finish() {
base::Vector<const uint8_t> wire_bytes =
base::VectorOf(wire_bytes_for_deserializing_);
// Try to deserialize the module from wire bytes and module bytes.
- if (processor_->Deserialize(compiled_module_bytes_, wire_bytes)) return;
+ if (can_use_compiled_module &&
+ processor_->Deserialize(compiled_module_bytes_, wire_bytes))
+ return;
- // Deserialization failed. Restart decoding using |wire_bytes|.
+ // Compiled module bytes are invalidated by can_use_compiled_module = false
+ // or the deserialization failed. Restart decoding using |wire_bytes|.
compiled_module_bytes_ = {};
DCHECK(!deserializing());
OnBytesReceived(wire_bytes);
@@ -312,33 +315,29 @@ void AsyncStreamingDecoder::Abort() {
namespace {
-class TopTierCompiledCallback {
+class CompilationChunkFinishedCallback {
public:
- TopTierCompiledCallback(
+ CompilationChunkFinishedCallback(
std::weak_ptr<NativeModule> native_module,
AsyncStreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
callback_(std::move(callback)) {}
void operator()(CompilationEvent event) const {
- if (event != CompilationEvent::kFinishedTopTierCompilation) return;
+ if (event != CompilationEvent::kFinishedCompilationChunk &&
+ event != CompilationEvent::kFinishedTopTierCompilation) {
+ return;
+ }
// If the native module is still alive, get back a shared ptr and call the
// callback.
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
callback_(native_module);
}
-#ifdef DEBUG
- DCHECK(!called_);
- called_ = true;
-#endif
}
private:
const std::weak_ptr<NativeModule> native_module_;
const AsyncStreamingDecoder::ModuleCompiledCallback callback_;
-#ifdef DEBUG
- mutable bool called_ = false;
-#endif
};
} // namespace
@@ -347,7 +346,7 @@ void AsyncStreamingDecoder::NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) {
if (!module_compiled_callback_) return;
auto* comp_state = native_module->compilation_state();
- comp_state->AddCallback(TopTierCompiledCallback{
+ comp_state->AddCallback(CompilationChunkFinishedCallback{
std::move(native_module), std::move(module_compiled_callback_)});
module_compiled_callback_ = {};
}
diff --git a/chromium/v8/src/wasm/streaming-decoder.h b/chromium/v8/src/wasm/streaming-decoder.h
index 2c5e1eae3c0..6f4601b9f47 100644
--- a/chromium/v8/src/wasm/streaming-decoder.h
+++ b/chromium/v8/src/wasm/streaming-decoder.h
@@ -78,7 +78,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// The buffer passed into OnBytesReceived is owned by the caller.
virtual void OnBytesReceived(base::Vector<const uint8_t> bytes) = 0;
- virtual void Finish() = 0;
+ virtual void Finish(bool can_use_compiled_module = true) = 0;
virtual void Abort() = 0;
@@ -96,6 +96,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
}
// Passes previously compiled module bytes from the embedder's cache.
+ // The content shouldn't be used until Finish(true) is called.
bool SetCompiledModuleBytes(
base::Vector<const uint8_t> compiled_module_bytes) {
compiled_module_bytes_ = compiled_module_bytes;
@@ -124,6 +125,8 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
std::string url_;
ModuleCompiledCallback module_compiled_callback_;
+ // The content of `compiled_module_bytes_` shouldn't be used until
+ // Finish(true) is called.
base::Vector<const uint8_t> compiled_module_bytes_;
};
diff --git a/chromium/v8/src/wasm/sync-streaming-decoder.cc b/chromium/v8/src/wasm/sync-streaming-decoder.cc
index 73c22cb5a32..ebe1ead525e 100644
--- a/chromium/v8/src/wasm/sync-streaming-decoder.cc
+++ b/chromium/v8/src/wasm/sync-streaming-decoder.cc
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
buffer_size_ += bytes.size();
}
- void Finish() override {
+ void Finish(bool can_use_compiled_module) override {
// We copy all received chunks into one byte buffer.
auto bytes = std::make_unique<uint8_t[]>(buffer_size_);
uint8_t* destination = bytes.get();
@@ -43,7 +43,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
CHECK_EQ(destination - bytes.get(), buffer_size_);
// Check if we can deserialize the module from cache.
- if (deserializing()) {
+ if (can_use_compiled_module && deserializing()) {
HandleScope scope(isolate_);
SaveAndSwitchContext saved_context(isolate_, *context_);
diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h
index c12496759fc..29482d007b5 100644
--- a/chromium/v8/src/wasm/value-type.h
+++ b/chromium/v8/src/wasm/value-type.h
@@ -284,8 +284,8 @@ constexpr bool is_defaultable(ValueKind kind) {
// representation (for reference types), and an inheritance depth (for rtts
// only). Those are encoded into 32 bits using base::BitField. The underlying
// ValueKind enumeration includes four elements which do not strictly correspond
-// to value types: the two packed types i8 and i16, the type of void blocks
-// (stmt), and a bottom value (for internal use).
+// to value types: the two packed types i8 and i16, the void type (for control
+// structures), and a bottom value (for internal use).
class ValueType {
public:
/******************************* Constructors *******************************/
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index d080d1285ed..27687f6e1de 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -191,7 +191,7 @@ std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!has_trap_handler_index());
- if (kind() != WasmCode::kFunction) return;
+ if (kind() != WasmCode::kWasmFunction) return;
if (protected_instructions_size_ == 0) return;
Address base = instruction_start();
@@ -217,6 +217,42 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
isolate->is_profiling();
}
+std::string WasmCode::DebugName() const {
+ if (IsAnonymous()) {
+ return "anonymous function";
+ }
+
+ ModuleWireBytes wire_bytes(native_module()->wire_bytes());
+ const WasmModule* module = native_module()->module();
+ WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
+ WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ std::string name_buffer;
+ if (kind() == kWasmToJsWrapper) {
+ name_buffer = "wasm-to-js:";
+ size_t prefix_len = name_buffer.size();
+ constexpr size_t kMaxSigLength = 128;
+ name_buffer.resize(prefix_len + kMaxSigLength);
+ const FunctionSig* sig = module->functions[index()].sig;
+ size_t sig_length = PrintSignature(
+ base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
+ name_buffer.resize(prefix_len + sig_length);
+ // If the import has a name, also append that (separated by "-").
+ if (!name.empty()) {
+ name_buffer += '-';
+ name_buffer.append(name.begin(), name.size());
+ }
+ } else if (name.empty()) {
+ name_buffer.resize(32);
+ name_buffer.resize(
+ SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
+ "wasm-function[%d]", index()));
+ } else {
+ name_buffer.append(name.begin(), name.end());
+ }
+ return name_buffer;
+}
+
void WasmCode::LogCode(Isolate* isolate, const char* source_url,
int script_id) const {
DCHECK(ShouldBeLogged(isolate));
@@ -224,9 +260,8 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
ModuleWireBytes wire_bytes(native_module_->wire_bytes());
const WasmModule* module = native_module_->module();
- WireBytesRef name_ref =
- module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
- WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ std::string fn_name = DebugName();
+ WasmName name = base::VectorOf(fn_name);
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
@@ -244,37 +279,16 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
- std::string name_buffer;
- if (kind() == kWasmToJsWrapper) {
- name_buffer = "wasm-to-js:";
- size_t prefix_len = name_buffer.size();
- constexpr size_t kMaxSigLength = 128;
- name_buffer.resize(prefix_len + kMaxSigLength);
- const FunctionSig* sig = module->functions[index_].sig;
- size_t sig_length = PrintSignature(
- base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
- name_buffer.resize(prefix_len + sig_length);
- // If the import has a name, also append that (separated by "-").
- if (!name.empty()) {
- name_buffer += '-';
- name_buffer.append(name.begin(), name.size());
- }
- name = base::VectorOf(name_buffer);
- } else if (name.empty()) {
- name_buffer.resize(32);
- name_buffer.resize(
- SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
- "wasm-function[%d]", index()));
- name = base::VectorOf(name_buffer);
+ // Record source positions before adding code, otherwise when code is added,
+ // there are no source positions to associate with the added code.
+ if (!source_positions().empty()) {
+ LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
+ source_positions()));
}
+
int code_offset = module->functions[index_].code.offset();
PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
source_url, code_offset, script_id));
-
- if (!source_positions().empty()) {
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
- source_positions()));
- }
}
void WasmCode::Validate() const {
@@ -331,15 +345,16 @@ void WasmCode::Validate() const {
#endif
}
-void WasmCode::MaybePrint(const char* name) const {
+void WasmCode::MaybePrint() const {
// Determines whether flags want this code to be printed.
bool function_index_matches =
(!IsAnonymous() &&
FLAG_print_wasm_code_function_index == static_cast<int>(index()));
- if (FLAG_print_code ||
- (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
- : FLAG_print_wasm_stub_code)) {
- Print(name);
+ if (FLAG_print_code || (kind() == kWasmFunction
+ ? (FLAG_print_wasm_code || function_index_matches)
+ : FLAG_print_wasm_stub_code)) {
+ std::string name = DebugName();
+ Print(name.c_str());
}
}
@@ -361,7 +376,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (name) os << "name: " << name << "\n";
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
- if (kind() == kFunction) {
+ if (kind() == kWasmFunction) {
DCHECK(is_liftoff() || tier() == ExecutionTier::kTurbofan);
const char* compiler =
is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
@@ -435,8 +450,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << " registers: ";
uint32_t register_bits = entry.register_bits();
int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
- for (int i = bits - 1; i >= 0; --i) {
- os << ((register_bits >> i) & 1);
+ for (int j = bits - 1; j >= 0; --j) {
+ os << ((register_bits >> j) & 1);
}
}
os << "\n";
@@ -455,7 +470,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
switch (kind) {
- case WasmCode::kFunction:
+ case WasmCode::kWasmFunction:
return "wasm function";
case WasmCode::kWasmToCapiWrapper:
return "wasm-to-capi";
@@ -664,12 +679,13 @@ class CheckWritableMemoryRegions {
DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
[](auto region) { return region.is_empty(); }));
- // Regions are sorted and disjoint.
- std::accumulate(writable_memory_.begin(), writable_memory_.end(),
- Address{0}, [](Address previous_end, auto region) {
- DCHECK_LT(previous_end, region.begin());
- return region.end();
- });
+ // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
+ // so USE is required to prevent build failures in debug builds).
+ USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
+ Address{0}, [](Address previous_end, auto region) {
+ DCHECK_LT(previous_end, region.begin());
+ return region.end();
+ }));
}
private:
@@ -954,6 +970,7 @@ BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
} // namespace
NativeModule::NativeModule(const WasmFeatures& enabled,
+ DynamicTiering dynamic_tiering,
VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
@@ -972,8 +989,8 @@ NativeModule::NativeModule(const WasmFeatures& enabled,
DCHECK_NOT_NULL(shared_this);
DCHECK_NULL(*shared_this);
shared_this->reset(this);
- compilation_state_ =
- CompilationState::New(*shared_this, std::move(async_counters));
+ compilation_state_ = CompilationState::New(
+ *shared_this, std::move(async_counters), dynamic_tiering);
compilation_state_->InitCompileJob();
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
@@ -1032,18 +1049,15 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- base::RecursiveMutexGuard lock(&allocation_mutex_);
- for (auto& owned_entry : owned_code_) {
- owned_entry.second->LogCode(isolate, source_url.get(), script.id());
- }
- for (auto& owned_entry : new_owned_code_) {
- owned_entry->LogCode(isolate, source_url.get(), script.id());
+ WasmCodeRefScope code_ref_scope;
+ for (auto& code : SnapshotAllOwnedCode()) {
+ code->LogCode(isolate, source_url.get(), script.id());
}
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- return {module(), bounds_checks_, kRuntimeExceptionSupport,
- enabled_features_};
+ return {module(), bounds_checks_, kRuntimeExceptionSupport, enabled_features_,
+ compilation_state()->dynamic_tiering()};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
@@ -1116,22 +1130,22 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
std::unique_ptr<WasmCode> new_code{
- new WasmCode{this, // native_module
- kAnonymousFuncIndex, // index
- dst_code_bytes, // instructions
- stack_slots, // stack_slots
- 0, // tagged_parameter_slots
- safepoint_table_offset, // safepoint_table_offset
- handler_table_offset, // handler_table_offset
- constant_pool_offset, // constant_pool_offset
- code_comments_offset, // code_comments_offset
- instructions.length(), // unpadded_binary_size
- {}, // protected_instructions
- reloc_info.as_vector(), // reloc_info
- source_pos.as_vector(), // source positions
- WasmCode::kFunction, // kind
- ExecutionTier::kNone, // tier
- kNoDebugging}}; // for_debugging
+ new WasmCode{this, // native_module
+ kAnonymousFuncIndex, // index
+ dst_code_bytes, // instructions
+ stack_slots, // stack_slots
+ 0, // tagged_parameter_slots
+ safepoint_table_offset, // safepoint_table_offset
+ handler_table_offset, // handler_table_offset
+ constant_pool_offset, // constant_pool_offset
+ code_comments_offset, // code_comments_offset
+ instructions.length(), // unpadded_binary_size
+ {}, // protected_instructions
+ reloc_info.as_vector(), // reloc_info
+ source_pos.as_vector(), // source positions
+ WasmCode::kWasmFunction, // kind
+ ExecutionTier::kNone, // tier
+ kNoDebugging}}; // for_debugging
new_code->MaybePrint();
new_code->Validate();
@@ -1179,7 +1193,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
ExecutionTier tier, ForDebugging for_debugging) {
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
- CodeSpaceWriteScope code_space_write_scope(this);
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
@@ -1255,6 +1268,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
source_position_table, kind, tier, for_debugging}};
+
code->MaybePrint();
code->Validate();
@@ -1291,7 +1305,7 @@ WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
case WasmCompilationResult::kWasmToJsWrapper:
return WasmCode::Kind::kWasmToJsWrapper;
case WasmCompilationResult::kFunction:
- return WasmCode::Kind::kFunction;
+ return WasmCode::Kind::kWasmFunction;
default:
UNREACHABLE();
}
@@ -1429,6 +1443,17 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
return std::vector<WasmCode*>{start, end};
}
+std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
+
+ std::vector<WasmCode*> all_code(owned_code_.size());
+ std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
+ [](auto& entry) { return entry.second.get(); });
+ std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
+ return all_code;
+}
+
WasmCode* NativeModule::GetCode(uint32_t index) const {
base::RecursiveMutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
@@ -1960,7 +1985,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
- if (!BackingStore::ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
// When we start exposing Wasm in jitless mode, then the jitless flag
@@ -1968,10 +1992,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK(!FLAG_jitless);
VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
VirtualMemory::kMapAsJittable);
- if (!mem.IsReserved()) {
- BackingStore::ReleaseReservation(size);
- return {};
- }
+ if (!mem.IsReserved()) return {};
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
mem.end(), mem.size());
@@ -2104,6 +2125,11 @@ void WasmCodeManager::SetThreadWritable(bool writable) {
MemoryProtectionKeyPermission permissions =
writable ? kNoRestrictions : kDisableWrite;
+ // When switching to writable we should not already be writable. Otherwise
+ // this points at a problem with counting writers, or with wrong
+ // initialization (globally or per thread).
+ DCHECK_IMPLIES(writable, !MemoryProtectionKeyWritable());
+
TRACE_HEAP("Setting memory protection key %d to writable: %d.\n",
memory_protection_key_, writable);
SetPermissionsForMemoryProtectionKey(memory_protection_key_, permissions);
@@ -2113,6 +2139,16 @@ bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
return memory_protection_key_ != kNoMemoryProtectionKey;
}
+bool WasmCodeManager::MemoryProtectionKeyWritable() const {
+ return wasm::MemoryProtectionKeyWritable(memory_protection_key_);
+}
+
+void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
+ if (memory_protection_key_ == kNoMemoryProtectionKey) {
+ memory_protection_key_ = AllocateMemoryProtectionKey();
+ }
+}
+
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
std::shared_ptr<const WasmModule> module) {
@@ -2166,8 +2202,11 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t size = code_space.size();
Address end = code_space.end();
std::shared_ptr<NativeModule> ret;
- new NativeModule(enabled, std::move(code_space), std::move(module),
- isolate->async_counters(), &ret);
+ DynamicTiering dynamic_tiering = isolate->IsWasmDynamicTieringEnabled()
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled;
+ new NativeModule(enabled, dynamic_tiering, std::move(code_space),
+ std::move(module), isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
@@ -2397,7 +2436,6 @@ void WasmCodeManager::FreeNativeModule(
#endif // V8_OS_WIN64
lookup_map_.erase(code_space.address());
- BackingStore::ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 2baf46e8886..ad7e4ab26bc 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore8SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore16SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore32SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
@@ -109,7 +117,6 @@ struct WasmModule;
V(WasmAllocateArray_Uninitialized) \
V(WasmAllocateArray_InitNull) \
V(WasmAllocateArray_InitZero) \
- V(WasmArrayCopy) \
V(WasmArrayCopyWithChecks) \
V(WasmAllocateRtt) \
V(WasmAllocateFreshRtt) \
@@ -149,12 +156,7 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
class V8_EXPORT_PRIVATE WasmCode final {
public:
- enum Kind {
- kFunction,
- kWasmToCapiWrapper,
- kWasmToJsWrapper,
- kJumpTable
- };
+ enum Kind { kWasmFunction, kWasmToCapiWrapper, kWasmToJsWrapper, kJumpTable };
// Each runtime stub is identified by an id. This id is used to reference the
// stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
@@ -188,25 +190,47 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
#ifdef V8_IS_TSAN
- static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
- int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore64SaveFP;
+ }
}
}
@@ -289,7 +313,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Validate() const;
void Print(const char* name = nullptr) const;
- void MaybePrint(const char* name = nullptr) const;
+ void MaybePrint() const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
@@ -391,6 +415,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::unique_ptr<const byte[]> ConcatenateBytes(
std::initializer_list<base::Vector<const byte>>);
+ // Tries to get a reasonable name. Lazily looks up the name section, and falls
+ // back to the function index. Return value is guaranteed to not be empty.
+ std::string DebugName() const;
+
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
int trap_handler_index() const {
@@ -520,7 +548,7 @@ class WasmCodeAllocator {
// Make a code region writable. Only allowed if there is at lease one writer
// (see above).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- void MakeWritable(base::AddressRegion);
+ V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
@@ -637,6 +665,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
+ // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
+ // {owned_code_}.
+ std::vector<WasmCode*> SnapshotAllOwnedCode() const;
WasmCode* GetCode(uint32_t index) const;
bool HasCode(uint32_t index) const;
@@ -697,7 +728,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
void LogWasmCodes(Isolate*, Script);
- CompilationState* compilation_state() { return compilation_state_.get(); }
+ CompilationState* compilation_state() const {
+ return compilation_state_.get();
+ }
// Create a {CompilationEnv} object for compilation. The caller has to ensure
// that the {WasmModule} pointer stays valid while the {CompilationEnv} is
@@ -817,7 +850,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
};
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
- NativeModule(const WasmFeatures& enabled_features, VirtualMemory code_space,
+ NativeModule(const WasmFeatures& enabled_features,
+ DynamicTiering dynamic_tiering, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this);
@@ -1006,6 +1040,15 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Returns true if there is PKU support, false otherwise.
bool HasMemoryProtectionKeySupport() const;
+ // Returns {true} if the memory protection key is write-enabled for the
+ // current thread.
+ // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
+ bool MemoryProtectionKeyWritable() const;
+
+ // This allocates a memory protection key (if none was allocated before),
+ // independent of the --wasm-memory-protection-keys flag.
+ void InitializeMemoryProtectionKeyForTesting();
+
private:
friend class WasmCodeAllocator;
friend class WasmEngine;
@@ -1033,7 +1076,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
- const int memory_protection_key_;
+ int memory_protection_key_;
mutable base::Mutex native_modules_mutex_;
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index 726ceaa0185..5bb12bc863e 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -50,9 +50,9 @@ enum ValueTypeCode : uint8_t {
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
constexpr uint8_t kWasmStructTypeCode = 0x5f;
constexpr uint8_t kWasmArrayTypeCode = 0x5e;
-constexpr uint8_t kWasmFunctionExtendingTypeCode = 0x5d;
-constexpr uint8_t kWasmStructExtendingTypeCode = 0x5c;
-constexpr uint8_t kWasmArrayExtendingTypeCode = 0x5b;
+constexpr uint8_t kWasmFunctionSubtypeCode = 0x5d;
+constexpr uint8_t kWasmStructSubtypeCode = 0x5c;
+constexpr uint8_t kWasmArraySubtypeCode = 0x5b;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
@@ -118,19 +118,19 @@ constexpr uint8_t kNoCompilationHint = kMaxUInt8;
// Binary encoding of name section kinds.
enum NameSectionKindCode : uint8_t {
- kModule = 0,
- kFunction = 1,
- kLocal = 2,
+ kModuleCode = 0,
+ kFunctionCode = 1,
+ kLocalCode = 2,
// https://github.com/WebAssembly/extended-name-section/
- kLabel = 3,
- kType = 4,
- kTable = 5,
- kMemory = 6,
- kGlobal = 7,
- kElementSegment = 8,
- kDataSegment = 9,
+ kLabelCode = 3,
+ kTypeCode = 4,
+ kTableCode = 5,
+ kMemoryCode = 6,
+ kGlobalCode = 7,
+ kElementSegmentCode = 8,
+ kDataSegmentCode = 9,
// https://github.com/WebAssembly/gc/issues/193
- kField = 10
+ kFieldCode = 10
};
constexpr size_t kWasmPageSize = 0x10000;
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index 65f05ad507d..a0ecab95964 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -194,7 +194,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!type_names_) {
type_names_ = std::make_unique<NameMap>(DecodeNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kType));
+ native_module_->wire_bytes(), NameSectionKindCode::kTypeCode));
}
return type_names_->GetName(type_index);
}
@@ -203,7 +203,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!local_names_) {
local_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kLocal));
+ native_module_->wire_bytes(), NameSectionKindCode::kLocalCode));
}
return local_names_->GetName(func_index, local_index);
}
@@ -212,7 +212,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!field_names_) {
field_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kField));
+ native_module_->wire_bytes(), NameSectionKindCode::kFieldCode));
}
return field_names_->GetName(struct_index, field_index);
}
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index 6da33f1ab29..f21e2b76877 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -11,9 +11,11 @@
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
#include "src/execution/v8threads.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/strings/string-hasher-inl.h"
#include "src/utils/ostreams.h"
@@ -1034,10 +1036,10 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
for (auto* native_module : info->native_modules) {
DCHECK_EQ(1, native_modules_.count(native_module));
DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
- auto* info = native_modules_[native_module].get();
- info->isolates.erase(isolate);
+ auto* module = native_modules_[native_module].get();
+ module->isolates.erase(isolate);
if (current_gc_info_) {
- for (WasmCode* code : info->potentially_dead_code) {
+ for (WasmCode* code : module->potentially_dead_code) {
current_gc_info_->dead_code.erase(code);
}
}
@@ -1228,9 +1230,9 @@ void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
base::MutexGuard guard(&mutex_);
- auto it = native_modules_.find(native_module);
- DCHECK_NE(native_modules_.end(), it);
- for (Isolate* isolate : it->second->isolates) {
+ auto module = native_modules_.find(native_module);
+ DCHECK_NE(native_modules_.end(), module);
+ for (Isolate* isolate : module->second->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
DCHECK_EQ(1, info->native_modules.count(native_module));
@@ -1274,7 +1276,7 @@ void WasmEngine::FreeNativeModule(NativeModule* native_module) {
native_module, current_gc_info_->dead_code.size());
}
native_module_cache_.Erase(native_module);
- native_modules_.erase(it);
+ native_modules_.erase(module);
}
namespace {
@@ -1617,6 +1619,9 @@ WasmCodeManager* GetWasmCodeManager() {
// {max_mem_pages} is declared in wasm-limits.h.
uint32_t max_mem_pages() {
+ static_assert(
+ kV8MaxWasmMemoryPages * kWasmPageSize <= JSArrayBuffer::kMaxByteLength,
+ "Wasm memories must not be bigger than JSArrayBuffers");
STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h
index 72090969111..5cf61ef543d 100644
--- a/chromium/v8/src/wasm/wasm-engine.h
+++ b/chromium/v8/src/wasm/wasm-engine.h
@@ -45,6 +45,7 @@ class GdbServer;
class AsyncCompileJob;
class ErrorThrower;
struct ModuleWireBytes;
+class StreamingDecoder;
class WasmFeatures;
class V8_EXPORT_PRIVATE CompilationResultResolver {
diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc
index 101d5638765..0d8c14a6412 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.cc
+++ b/chromium/v8/src/wasm/wasm-external-refs.cc
@@ -451,7 +451,6 @@ class V8_NODISCARD ThreadNotInWasmScope {
#endif
};
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
return instance.memory_start() + index;
}
@@ -460,19 +459,6 @@ inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
return base + index;
}
-#else
-inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return instance.memory_start() + (index & instance.memory_mask());
-}
-
-inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
- size_t mem_mask = base::bits::RoundUpToPowerOfTwo(size) - 1;
- return base + (index & mem_mask);
-}
-#endif
-
template <typename V>
V ReadAndIncrementOffset(Address data, size_t* offset) {
V result = ReadUnalignedValue<V>(data + *offset);
@@ -551,6 +537,54 @@ int32_t memory_fill_wrapper(Address data) {
return kSuccess;
}
+namespace {
+inline void* ArrayElementAddress(WasmArray array, uint32_t index,
+ int element_size_bytes) {
+ return reinterpret_cast<void*>(array.ptr() + WasmArray::kHeaderSize -
+ kHeapObjectTag + index * element_size_bytes);
+}
+} // namespace
+
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length) {
+ DCHECK_GT(length, 0);
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowGarbageCollection no_gc;
+ WasmArray dst_array = WasmArray::cast(Object(raw_dst_array));
+ WasmArray src_array = WasmArray::cast(Object(raw_src_array));
+
+ bool overlapping_ranges =
+ dst_array.ptr() == src_array.ptr() &&
+ (dst_index < src_index ? dst_index + length > src_index
+ : src_index + length > dst_index);
+ wasm::ValueType element_type = src_array.type()->element_type();
+ if (element_type.is_reference()) {
+ WasmInstanceObject instance =
+ WasmInstanceObject::cast(Object(raw_instance));
+ Isolate* isolate = Isolate::FromRootAddress(instance.isolate_root());
+ ObjectSlot dst_slot = dst_array.ElementSlot(dst_index);
+ ObjectSlot src_slot = src_array.ElementSlot(src_index);
+ if (overlapping_ranges) {
+ isolate->heap()->MoveRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ } else {
+ isolate->heap()->CopyRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ }
+ } else {
+ int element_size_bytes = element_type.element_size_bytes();
+ void* dst = ArrayElementAddress(dst_array, dst_index, element_size_bytes);
+ void* src = ArrayElementAddress(src_array, src_index, element_size_bytes);
+ size_t copy_size = length * element_size_bytes;
+ if (overlapping_ranges) {
+ MemMove(dst, src, copy_size);
+ } else {
+ MemCopy(dst, src, copy_size);
+ }
+ }
+}
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h
index e8363d59367..24d4d35bece 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.h
+++ b/chromium/v8/src/wasm/wasm-external-refs.h
@@ -111,6 +111,11 @@ int32_t memory_copy_wrapper(Address data);
// zero-extend the result in the return register.
int32_t memory_fill_wrapper(Address data);
+// Assumes copy ranges are in-bounds and length > 0.
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length);
+
using WasmTrapCallbackForTesting = void (*)();
V8_EXPORT_PRIVATE void set_trap_callback_for_testing(
diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h
index 1c4c2acaec3..cf9ef00bf82 100644
--- a/chromium/v8/src/wasm/wasm-feature-flags.h
+++ b/chromium/v8/src/wasm/wasm-feature-flags.h
@@ -26,8 +26,12 @@
\
/* Non-specified, V8-only experimental additions to the GC proposal */ \
/* V8 side owner: jkummerow */ \
- V(gc_experiments, "garbage collection V8-only experimental features", false) \
- V(nn_locals, "allow non-defaultable/non-nullable locals", false) \
+ V(nn_locals, \
+ "allow non-defaultable/non-nullable locals, validated with 'until end of " \
+ "block' semantics", \
+ false) \
+ V(unsafe_nn_locals, \
+ "allow non-defaultable/non-nullable locals, no validation", false) \
\
/* Typed function references proposal. */ \
/* Official proposal: https://github.com/WebAssembly/function-references */ \
@@ -47,7 +51,12 @@
/* Branch Hinting proposal. */ \
/* https://github.com/WebAssembly/branch-hinting */ \
/* V8 side owner: jkummerow */ \
- V(branch_hinting, "branch hinting", false)
+ V(branch_hinting, "branch hinting", false) \
+ \
+ /* Stack Switching proposal. */ \
+ /* https://github.com/WebAssembly/stack-switching */ \
+ /* V8 side owner: thibaudm, fgm */ \
+ V(stack_switching, "stack switching", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -58,18 +67,6 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Exception handling proposal. */ \
- /* https://github.com/WebAssembly/exception-handling */ \
- /* V8 side owner: thibaudm */ \
- /* Staged in v8.9 */ \
- V(eh, "exception handling opcodes", false) \
- \
- /* Reference Types, a.k.a. reftypes proposal. */ \
- /* https://github.com/WebAssembly/reference-types */ \
- /* V8 side owner: ahaas */ \
- /* Staged in v7.8. */ \
- V(reftypes, "reference type opcodes", false) \
- \
/* Tail call / return call proposal. */ \
/* https://github.com/webassembly/tail-call */ \
/* V8 side owner: thibaudm */ \
@@ -93,6 +90,13 @@
/* Shipped in v9.1 * */ \
V(simd, "SIMD opcodes", true) \
\
+ /* Reference Types, a.k.a. reftypes proposal. */ \
+ /* https://github.com/WebAssembly/reference-types */ \
+ /* V8 side owner: ahaas */ \
+ /* Staged in v7.8. */ \
+ /* Shipped in v9.6 * */ \
+ V(reftypes, "reference type opcodes", true) \
+ \
/* Threads proposal. */ \
/* https://github.com/webassembly/threads */ \
/* NOTE: This is enabled via chromium flag on desktop systems since v7.4, */ \
@@ -104,6 +108,13 @@
/* V8 side owner: gdeepti */ \
V(threads, "thread opcodes", true) \
\
+ /* Exception handling proposal. */ \
+ /* https://github.com/WebAssembly/exception-handling */ \
+ /* V8 side owner: thibaudm */ \
+ /* Staged in v8.9 */ \
+ /* Shipped in v9.5 */ \
+ V(eh, "exception handling opcodes", true) \
+ \
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
diff --git a/chromium/v8/src/wasm/wasm-init-expr.cc b/chromium/v8/src/wasm/wasm-init-expr.cc
index 14a7e3b6a6f..c6641034ba8 100644
--- a/chromium/v8/src/wasm/wasm-init-expr.cc
+++ b/chromium/v8/src/wasm/wasm-init-expr.cc
@@ -39,7 +39,11 @@ ValueType WasmInitExpr::type(const WasmModule* module,
case kRefNullConst:
return ValueType::Ref(immediate().heap_type, kNullable);
case kStructNewWithRtt:
+ case kStructNew:
+ case kStructNewDefaultWithRtt:
+ case kStructNewDefault:
case kArrayInit:
+ case kArrayInitStatic:
return ValueType::Ref(immediate().index, kNonNullable);
case kRttCanon:
return ValueType::Rtt(immediate().heap_type, 0);
diff --git a/chromium/v8/src/wasm/wasm-init-expr.h b/chromium/v8/src/wasm/wasm-init-expr.h
index bf68265b2a1..551fce29915 100644
--- a/chromium/v8/src/wasm/wasm-init-expr.h
+++ b/chromium/v8/src/wasm/wasm-init-expr.h
@@ -34,7 +34,11 @@ class WasmInitExpr {
kRefNullConst,
kRefFuncConst,
kStructNewWithRtt,
+ kStructNew,
+ kStructNewDefaultWithRtt,
+ kStructNewDefault,
kArrayInit,
+ kArrayInitStatic,
kRttCanon,
kRttSub,
kRttFreshSub,
@@ -99,6 +103,31 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr StructNew(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNew;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
+ static WasmInitExpr StructNewDefaultWithRtt(uint32_t index,
+ WasmInitExpr rtt) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewDefaultWithRtt;
+ expr.immediate_.index = index;
+ expr.operands_.push_back(std::move(rtt));
+ return expr;
+ }
+
+ static WasmInitExpr StructNewDefault(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewDefault;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
static WasmInitExpr ArrayInit(uint32_t index,
std::vector<WasmInitExpr> elements) {
WasmInitExpr expr;
@@ -108,6 +137,15 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr ArrayInitStatic(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kArrayInitStatic;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
static WasmInitExpr RttCanon(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kRttCanon;
@@ -157,6 +195,9 @@ class WasmInitExpr {
case kRefNullConst:
return immediate().heap_type == other.immediate().heap_type;
case kStructNewWithRtt:
+ case kStructNew:
+ case kStructNewDefaultWithRtt:
+ case kStructNewDefault:
if (immediate().index != other.immediate().index) return false;
DCHECK_EQ(operands().size(), other.operands().size());
for (uint32_t i = 0; i < operands().size(); i++) {
@@ -164,6 +205,7 @@ class WasmInitExpr {
}
return true;
case kArrayInit:
+ case kArrayInitStatic:
if (immediate().index != other.immediate().index) return false;
if (operands().size() != other.operands().size()) return false;
for (uint32_t i = 0; i < operands().size(); i++) {
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index b65db601545..fab66c598dd 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -7,6 +7,8 @@
#include <cinttypes>
#include <cstring>
+#include "include/v8-function.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
@@ -17,12 +19,14 @@
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
#include "src/objects/fixed-array.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
@@ -59,7 +63,9 @@ class WasmStreaming::WasmStreamingImpl {
void OnBytesReceived(const uint8_t* bytes, size_t size) {
streaming_decoder_->OnBytesReceived(base::VectorOf(bytes, size));
}
- void Finish() { streaming_decoder_->Finish(); }
+ void Finish(bool can_use_compiled_module) {
+ streaming_decoder_->Finish(can_use_compiled_module);
+ }
void Abort(MaybeLocal<Value> exception) {
i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate_));
@@ -112,9 +118,9 @@ void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
impl_->OnBytesReceived(bytes, size);
}
-void WasmStreaming::Finish() {
+void WasmStreaming::Finish(bool can_use_compiled_module) {
TRACE_EVENT0("v8.wasm", "wasm.FinishStreaming");
- impl_->Finish();
+ impl_->Finish(can_use_compiled_module);
}
void WasmStreaming::Abort(MaybeLocal<Value> exception) {
@@ -182,9 +188,6 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
}
GET_FIRST_ARGUMENT_AS(Module)
-GET_FIRST_ARGUMENT_AS(Memory)
-GET_FIRST_ARGUMENT_AS(Table)
-GET_FIRST_ARGUMENT_AS(Global)
GET_FIRST_ARGUMENT_AS(Tag)
#undef GET_FIRST_ARGUMENT_AS
@@ -652,6 +655,25 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(Boolean::New(isolate, validated));
}
+namespace {
+bool TransferPrototype(i::Isolate* isolate, i::Handle<i::JSObject> destination,
+ i::Handle<i::JSReceiver> source) {
+ i::MaybeHandle<i::HeapObject> maybe_prototype =
+ i::JSObject::GetPrototype(isolate, source);
+ i::Handle<i::HeapObject> prototype;
+ if (maybe_prototype.ToHandle(&prototype)) {
+ Maybe<bool> result = i::JSObject::SetPrototype(destination, prototype,
+ /*from_javascript=*/false,
+ internal::kThrowOnError);
+ if (!result.FromJust()) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ }
+ return true;
+}
+} // namespace
+
// new WebAssembly.Module(bytes) -> WebAssembly.Module
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -677,25 +699,38 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- i::MaybeHandle<i::Object> module_obj;
+ i::MaybeHandle<i::WasmModuleObject> maybe_module_obj;
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i::wasm::GetWasmEngine()->SyncCompile(
+ maybe_module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i::wasm::GetWasmEngine()->SyncCompile(
+ maybe_module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes);
}
- if (module_obj.is_null()) return;
+ i::Handle<i::WasmModuleObject> module_obj;
+ if (!maybe_module_obj.ToHandle(&module_obj)) return;
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {module_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Module} directly, but some
+ // subclass: {module_obj} has {WebAssembly.Module}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, module_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(module_obj)));
}
// WebAssembly.Module.imports(module) -> Array<Import>
@@ -752,37 +787,6 @@ void WebAssemblyModuleCustomSections(
args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
}
-MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
- Local<Value> module,
- Local<Value> ffi) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- i::MaybeHandle<i::Object> instance_object;
- {
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
-
- // TODO(ahaas): These checks on the module should not be necessary here They
- // are just a workaround for https://crbug.com/837417.
- i::Handle<i::Object> module_obj = Utils::OpenHandle(*module);
- if (!module_obj->IsWasmModuleObject()) {
- thrower.TypeError("Argument 0 must be a WebAssembly.Module object");
- return {};
- }
-
- i::MaybeHandle<i::JSReceiver> maybe_imports =
- GetValueAsImports(ffi, &thrower);
- if (thrower.error()) return {};
-
- instance_object = i::wasm::GetWasmEngine()->SyncInstantiate(
- i_isolate, &thrower, i::Handle<i::WasmModuleObject>::cast(module_obj),
- maybe_imports, i::MaybeHandle<i::JSArrayBuffer>());
- }
-
- DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
- if (instance_object.is_null()) return {};
- return Utils::ToLocal(instance_object.ToHandleChecked());
-}
-
// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -793,23 +797,48 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
- if (!args.IsConstructCall()) {
- thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
- return;
- }
+ i::MaybeHandle<i::JSObject> maybe_instance_obj;
+ {
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
+ return;
+ }
- GetFirstArgumentAsModule(args, &thrower);
- if (thrower.error()) return;
+ i::MaybeHandle<i::WasmModuleObject> maybe_module =
+ GetFirstArgumentAsModule(args, &thrower);
+ if (thrower.error()) return;
- // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
- // We'll check for that in WebAssemblyInstantiateImpl.
- Local<Value> data = args[1];
+ i::Handle<i::WasmModuleObject> module_obj = maybe_module.ToHandleChecked();
+
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(args[1], &thrower);
+ if (thrower.error()) return;
+
+ maybe_instance_obj = i::wasm::GetWasmEngine()->SyncInstantiate(
+ i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
+ }
+
+ i::Handle<i::JSObject> instance_obj;
+ if (!maybe_instance_obj.ToHandle(&instance_obj)) {
+ DCHECK(i_isolate->has_scheduled_exception());
+ return;
+ }
- Local<Value> instance;
- if (WebAssemblyInstantiateImpl(isolate, args[0], data).ToLocal(&instance)) {
- args.GetReturnValue().Set(instance);
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {instance_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Instance} directly, but some
+ // subclass: {instance_obj} has {WebAssembly.Instance}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, instance_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
}
+
+ args.GetReturnValue().Set(Utils::ToLocal(instance_obj));
}
// WebAssembly.instantiateStreaming(Response | Promise<Response> [, imports])
@@ -1030,7 +1059,7 @@ bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
}
// Fetch 'initial' or 'minimum' property from object. If both are provided,
-// 'initial' is used.
+// a TypeError is thrown.
// TODO(aseemgarg): change behavior when the following bug is resolved:
// https://github.com/WebAssembly/js-types/issues/6
bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -1043,13 +1072,27 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
result, lower_bound, upper_bound)) {
return false;
}
- auto enabled_features = i::wasm::WasmFeatures::FromFlags();
- if (!has_initial && enabled_features.has_type_reflection()) {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(
+ reinterpret_cast<i::Isolate*>(isolate));
+ if (enabled_features.has_type_reflection()) {
+ bool has_minimum = false;
+ int64_t minimum = 0;
if (!GetOptionalIntegerProperty(isolate, thrower, context, object,
- v8_str(isolate, "minimum"), &has_initial,
- result, lower_bound, upper_bound)) {
+ v8_str(isolate, "minimum"), &has_minimum,
+ &minimum, lower_bound, upper_bound)) {
return false;
}
+ if (has_initial && has_minimum) {
+ thrower->TypeError(
+ "The properties 'initial' and 'minimum' are not allowed at the same "
+ "time");
+ return false;
+ }
+ if (has_minimum) {
+ // Only {minimum} exists, so we use {minimum} as {initial}.
+ has_initial = true;
+ *result = minimum;
+ }
}
if (!has_initial) {
// TODO(aseemgarg): update error message when the spec issue is resolved.
@@ -1059,6 +1102,19 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return true;
}
+namespace {
+i::Handle<i::Object> DefaultReferenceValue(i::Isolate* isolate,
+ i::wasm::ValueType type) {
+ if (type == i::wasm::kWasmFuncRef) {
+ return isolate->factory()->null_value();
+ }
+ if (type.is_reference()) {
+ return isolate->factory()->undefined_value();
+ }
+ UNREACHABLE();
+}
+} // namespace
+
// new WebAssembly.Table(args) -> WebAssembly.Table
void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -1084,7 +1140,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!maybe.ToLocal(&value)) return;
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
- auto enabled_features = i::wasm::WasmFeatures::FromFlags();
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
// The JS api uses 'anyfunc' instead of 'funcref'.
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
type = i::wasm::kWasmFuncRef;
@@ -1115,12 +1171,38 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj =
+ i::Handle<i::WasmTableObject> table_obj =
i::WasmTableObject::New(i_isolate, i::Handle<i::WasmInstanceObject>(),
type, static_cast<uint32_t>(initial), has_maximum,
- static_cast<uint32_t>(maximum), &fixed_array);
+ static_cast<uint32_t>(maximum), &fixed_array,
+ DefaultReferenceValue(i_isolate, type));
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {table_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Table} directly, but some
+ // subclass: {table_obj} has {WebAssembly.Table}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, table_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
+ if (initial > 0 && args.Length() >= 2 && !args[1]->IsUndefined()) {
+ i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ if (!i::WasmTableObject::IsValidElement(i_isolate, table_obj, element)) {
+ thrower.TypeError(
+ "Argument 2 must be undefined, null, or a value of type compatible "
+ "with the type of the new table.");
+ return;
+ }
+ for (uint32_t index = 0; index < static_cast<uint32_t>(initial); ++index) {
+ i::WasmTableObject::Set(i_isolate, table_obj, index, element);
+ }
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(table_obj));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(table_obj)));
}
void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -1183,6 +1265,19 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("could not allocate memory");
return;
}
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {memory_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Memory} directly, but some
+ // subclass: {memory_obj} has {WebAssembly.Memory}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, memory_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
if (shared == i::SharedFlag::kShared) {
i::Handle<i::JSArrayBuffer> buffer(
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
@@ -1336,6 +1431,18 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {global_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Global} directly, but some
+ // subclass: {global_obj} has {WebAssembly.Global}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, global_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
// Convert value to a WebAssembly value, the default value is 0.
Local<v8::Value> value = Local<Value>::Cast(args[1]);
switch (type.kind()) {
@@ -1578,7 +1685,6 @@ void EncodeExceptionValues(v8::Isolate* isolate,
case i::wasm::kBottom:
case i::wasm::kS128:
UNREACHABLE();
- break;
}
}
}
@@ -1821,16 +1927,16 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> init_value = i_isolate->factory()->null_value();
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- if (enabled_features.has_typed_funcref()) {
- if (args.Length() >= 2 && !args[1]->IsUndefined()) {
- init_value = Utils::OpenHandle(*args[1]);
- }
+ i::Handle<i::Object> init_value;
+
+ if (args.Length() >= 2 && !args[1]->IsUndefined()) {
+ init_value = Utils::OpenHandle(*args[1]);
if (!i::WasmTableObject::IsValidElement(i_isolate, receiver, init_value)) {
thrower.TypeError("Argument 1 must be a valid type for the table");
return;
}
+ } else {
+ init_value = DefaultReferenceValue(i_isolate, receiver->type());
}
int old_size =
@@ -1888,7 +1994,12 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ i::Handle<i::Object> element;
+ if (args.Length() >= 2) {
+ element = Utils::OpenHandle(*args[1]);
+ } else {
+ element = DefaultReferenceValue(i_isolate, table_object->type());
+ }
if (!i::WasmTableObject::IsValidElement(i_isolate, table_object, element)) {
thrower.TypeError(
"Argument 1 must be null or a WebAssembly function of type compatible "
@@ -1898,16 +2009,14 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::WasmTableObject::Set(i_isolate, table_object, index, element);
}
-// WebAssembly.Table.type(WebAssembly.Table) -> TableType
+// WebAssembly.Table.type() -> TableType
void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.type()");
- auto maybe_table = GetFirstArgumentAsTable(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmTableObject> table = maybe_table.ToHandleChecked();
+ EXTRACT_THIS(table, WasmTableObject);
base::Optional<uint32_t> max_size;
if (!table->maximum_length().IsUndefined()) {
uint64_t max_size64 = table->maximum_length().Number();
@@ -1980,16 +2089,14 @@ void WebAssemblyMemoryGetBuffer(
return_value.Set(Utils::ToLocal(buffer));
}
-// WebAssembly.Memory.type(WebAssembly.Memory) -> MemoryType
+// WebAssembly.Memory.type() -> MemoryType
void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.type()");
- auto maybe_memory = GetFirstArgumentAsMemory(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmMemoryObject> memory = maybe_memory.ToHandleChecked();
+ EXTRACT_THIS(memory, WasmMemoryObject);
i::Handle<i::JSArrayBuffer> buffer(memory->array_buffer(), i_isolate);
size_t curr_size = buffer->byte_length() / i::wasm::kWasmPageSize;
DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
@@ -2000,7 +2107,8 @@ void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
max_size.emplace(static_cast<uint32_t>(max_size64));
}
- auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size);
+ bool shared = buffer->is_shared();
+ auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size, shared);
args.GetReturnValue().Set(Utils::ToLocal(type));
}
@@ -2346,16 +2454,14 @@ void WebAssemblyGlobalSetValue(
}
}
-// WebAssembly.Global.type(WebAssembly.Global) -> GlobalType
+// WebAssembly.Global.type() -> GlobalType
void WebAssemblyGlobalType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Global.type()");
- auto maybe_global = GetFirstArgumentAsGlobal(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmGlobalObject> global = maybe_global.ToHandleChecked();
+ EXTRACT_THIS(global, WasmGlobalObject);
auto type = i::wasm::GetTypeForGlobal(i_isolate, global->is_mutable(),
global->type());
args.GetReturnValue().Set(Utils::ToLocal(type));
@@ -2580,7 +2686,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, table_constructor, "type", WebAssemblyTableType, 1);
+ InstallFunc(isolate, table_proto, "type", WebAssemblyTableType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, table_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Table"), ro_attributes);
@@ -2600,7 +2707,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, memory_constructor, "type", WebAssemblyMemoryType, 1);
+ InstallFunc(isolate, memory_proto, "type", WebAssemblyMemoryType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
@@ -2622,7 +2730,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, global_constructor, "type", WebAssemblyGlobalType, 1);
+ InstallFunc(isolate, global_proto, "type", WebAssemblyGlobalType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
diff --git a/chromium/v8/src/wasm/wasm-limits.h b/chromium/v8/src/wasm/wasm-limits.h
index b7806af797f..fa7784e724c 100644
--- a/chromium/v8/src/wasm/wasm-limits.h
+++ b/chromium/v8/src/wasm/wasm-limits.h
@@ -40,7 +40,7 @@ constexpr size_t kV8MaxWasmDataSegments = 100000;
// Also, do not use this limit to validate declared memory, use
// kSpecMaxMemoryPages for that.
constexpr size_t kV8MaxWasmMemoryPages = kSystemPointerSize == 4
- ? 32768 // = 2 GiB
+ ? 32767 // = 2 GiB
: 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
@@ -58,9 +58,6 @@ constexpr size_t kV8MaxWasmMemories = 1;
// GC proposal. These limits are not standardized yet.
constexpr size_t kV8MaxWasmStructFields = 999;
constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
-// Maximum supported by implementation: ((1<<27)-3).
-// Reason: total object size in bytes must fit into a Smi, for filler objects.
-constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
constexpr size_t kV8MaxWasmArrayInitLength = 999;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
diff --git a/chromium/v8/src/wasm/wasm-linkage.h b/chromium/v8/src/wasm/wasm-linkage.h
index 2d980555192..ecf59f9ed56 100644
--- a/chromium/v8/src/wasm/wasm-linkage.h
+++ b/chromium/v8/src/wasm/wasm-linkage.h
@@ -80,6 +80,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == LOONG64 ================================================================
+// ===========================================================================
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
+constexpr Register kGpReturnRegisters[] = {a0, a1};
+constexpr DoubleRegister kFpParamRegisters[] = {f0, f1, f2, f3, f4, f5, f6, f7};
+constexpr DoubleRegister kFpReturnRegisters[] = {f0, f1};
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/chromium/v8/src/wasm/wasm-module-builder.cc b/chromium/v8/src/wasm/wasm-module-builder.cc
index 2bf20ea3ec3..9bb34721388 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.cc
+++ b/chromium/v8/src/wasm/wasm-module-builder.cc
@@ -264,7 +264,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
functions_(zone),
tables_(zone),
data_segments_(zone),
- indirect_functions_(zone),
+ element_segments_(zone),
globals_(zone),
exceptions_(zone),
signature_map_(zone),
@@ -290,15 +290,20 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
}
}
-uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
- auto sig_entry = signature_map_.find(*sig);
- if (sig_entry != signature_map_.end()) return sig_entry->second;
+uint32_t WasmModuleBuilder::ForceAddSignature(FunctionSig* sig,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
signature_map_.emplace(*sig, index);
- types_.push_back(Type(sig));
+ types_.push_back(Type(sig, supertype));
return index;
}
+uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig, uint32_t supertype) {
+ auto sig_entry = signature_map_.find(*sig);
+ if (sig_entry != signature_map_.end()) return sig_entry->second;
+ return ForceAddSignature(sig, supertype);
+}
+
uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
DCHECK_EQ(0, type->return_count());
int type_index = AddSignature(type);
@@ -307,15 +312,16 @@ uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
return except_index;
}
-uint32_t WasmModuleBuilder::AddStructType(StructType* type) {
+uint32_t WasmModuleBuilder::AddStructType(StructType* type,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type));
+ types_.push_back(Type(type, supertype));
return index;
}
-uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
+uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type));
+ types_.push_back(Type(type, supertype));
return index;
}
@@ -323,75 +329,52 @@ uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
const uint32_t WasmModuleBuilder::kNullIndex =
std::numeric_limits<uint32_t>::max();
-// TODO(9495): Add support for typed function tables and more init. expressions.
-uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
- DCHECK(allocating_indirect_functions_allowed_);
- uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
- DCHECK_GE(FLAG_wasm_max_table_size, index);
- if (count > FLAG_wasm_max_table_size - index) {
+uint32_t WasmModuleBuilder::IncreaseTableMinSize(uint32_t table_index,
+ uint32_t count) {
+ DCHECK_LT(table_index, tables_.size());
+ uint32_t old_min_size = tables_[table_index].min_size;
+ if (count > FLAG_wasm_max_table_size - old_min_size) {
return std::numeric_limits<uint32_t>::max();
}
- uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
- DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
- indirect_functions_.resize(new_size, kNullIndex);
- uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
- if (tables_.empty()) {
- // This cannot use {AddTable} because that would flip the
- // {allocating_indirect_functions_allowed_} flag.
- tables_.push_back({kWasmFuncRef, new_size, max, true, {}});
- } else {
- // There can only be the indirect function table so far, otherwise the
- // {allocating_indirect_functions_allowed_} flag would have been false.
- DCHECK_EQ(1u, tables_.size());
- DCHECK_EQ(kWasmFuncRef, tables_[0].type);
- DCHECK(tables_[0].has_maximum);
- tables_[0].min_size = new_size;
- tables_[0].max_size = max;
- }
- return index;
-}
-
-void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
- uint32_t direct) {
- indirect_functions_[indirect] = direct;
-}
-
-void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
- DCHECK_GE(FLAG_wasm_max_table_size, max);
- DCHECK_GE(max, indirect_functions_.size());
- max_table_size_ = max;
- DCHECK(allocating_indirect_functions_allowed_);
- if (!tables_.empty()) {
- tables_[0].max_size = max;
- }
+ tables_[table_index].min_size = old_min_size + count;
+ tables_[table_index].max_size =
+ std::max(old_min_size + count, tables_[table_index].max_size);
+ return old_min_size;
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, 0, false, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size, WasmInitExpr init) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, std::move(init)});
return static_cast<uint32_t>(tables_.size() - 1);
}
+void WasmModuleBuilder::AddElementSegment(WasmElemSegment segment) {
+ element_segments_.push_back(std::move(segment));
+}
+
+void WasmModuleBuilder::SetIndirectFunction(
+ uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode) {
+ WasmElemSegment segment(zone_, kWasmFuncRef, table_index,
+ WasmInitExpr(static_cast<int>(index_in_table)));
+ segment.indexing_mode = indexing_mode;
+ segment.entries.emplace_back(WasmElemSegment::Entry::kRefFuncEntry,
+ direct_function_index);
+ AddElementSegment(std::move(segment));
+}
+
uint32_t WasmModuleBuilder::AddImport(base::Vector<const char> name,
FunctionSig* sig,
base::Vector<const char> module) {
@@ -454,8 +437,9 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
-void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
- ValueType type) {
+void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
+ const WasmInitExpr& init,
+ ValueType type) {
switch (init.kind()) {
case WasmInitExpr::kI32Const:
buffer->write_u8(kExprI32Const);
@@ -531,22 +515,49 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
break;
}
+ case WasmInitExpr::kStructNew:
case WasmInitExpr::kStructNewWithRtt:
+ case WasmInitExpr::kStructNewDefault:
+ case WasmInitExpr::kStructNewDefaultWithRtt:
+ STATIC_ASSERT((kExprStructNew >> 8) == kGCPrefix);
STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprStructNewDefault >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprStructNewDefaultWithRtt >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprStructNewWithRtt));
+ WasmOpcode opcode;
+ switch (init.kind()) {
+ case WasmInitExpr::kStructNewWithRtt:
+ opcode = kExprStructNewWithRtt;
+ break;
+ case WasmInitExpr::kStructNew:
+ opcode = kExprStructNew;
+ break;
+ case WasmInitExpr::kStructNewDefaultWithRtt:
+ opcode = kExprStructNewDefaultWithRtt;
+ break;
+ case WasmInitExpr::kStructNewDefault:
+ opcode = kExprStructNewDefault;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ buffer->write_u8(static_cast<uint8_t>(opcode));
buffer->write_u32v(init.immediate().index);
break;
case WasmInitExpr::kArrayInit:
+ case WasmInitExpr::kArrayInitStatic:
STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprArrayInitStatic >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprArrayInit));
+ buffer->write_u8(static_cast<uint8_t>(
+ init.kind() == WasmInitExpr::kArrayInit ? kExprArrayInit
+ : kExprArrayInitStatic));
buffer->write_u32v(init.immediate().index);
buffer->write_u32v(static_cast<uint32_t>(init.operands().size() - 1));
break;
@@ -559,7 +570,8 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kRttSub:
case WasmInitExpr::kRttFreshSub:
// The operand to rtt.sub must be emitted first.
- WriteInitializerExpression(buffer, init.operands()[0], kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, init.operands()[0],
+ kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
@@ -571,6 +583,11 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
}
+void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
+ ValueType type) {
+ WriteInitializerExpressionWithEnd(buffer, init, type);
+ buffer->write_u8(kExprEnd);
+}
} // namespace
void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
@@ -584,10 +601,12 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_size(types_.size());
for (const Type& type : types_) {
+ bool has_super = type.supertype != kNoSuperType;
switch (type.kind) {
case Type::kFunctionSig: {
FunctionSig* sig = type.sig;
- buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_u8(has_super ? kWasmFunctionSubtypeCode
+ : kWasmFunctionTypeCode);
buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
WriteValueType(buffer, param);
@@ -596,23 +615,40 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
for (auto ret : sig->returns()) {
WriteValueType(buffer, ret);
}
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kFuncRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
case Type::kStructType: {
StructType* struct_type = type.struct_type;
- buffer->write_u8(kWasmStructTypeCode);
+ buffer->write_u8(has_super ? kWasmStructSubtypeCode
+ : kWasmStructTypeCode);
buffer->write_size(struct_type->field_count());
for (uint32_t i = 0; i < struct_type->field_count(); i++) {
WriteValueType(buffer, struct_type->field(i));
buffer->write_u8(struct_type->mutability(i) ? 1 : 0);
}
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kDataRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
case Type::kArrayType: {
ArrayType* array_type = type.array_type;
- buffer->write_u8(kWasmArrayTypeCode);
+ buffer->write_u8(has_super ? kWasmArraySubtypeCode
+ : kWasmArrayTypeCode);
WriteValueType(buffer, array_type->element_type());
buffer->write_u8(array_type->mutability() ? 1 : 0);
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kDataRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
}
@@ -705,7 +741,6 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
WriteValueType(buffer, global.type);
buffer->write_u8(global.mutability ? 1 : 0);
WriteInitializerExpression(buffer, global.init, global.type);
- buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
@@ -744,31 +779,67 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
FixupSection(buffer, start);
}
- // == emit function table elements ===========================================
- if (indirect_functions_.size() > 0) {
+ // == emit element segments ==================================================
+ if (element_segments_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer->write_u8(1); // count of entries
- buffer->write_u8(0); // table index
- uint32_t first_element = 0;
- while (first_element < indirect_functions_.size() &&
- indirect_functions_[first_element] == kNullIndex) {
- first_element++;
- }
- uint32_t last_element =
- static_cast<uint32_t>(indirect_functions_.size() - 1);
- while (last_element >= first_element &&
- indirect_functions_[last_element] == kNullIndex) {
- last_element--;
- }
- buffer->write_u8(kExprI32Const); // offset
- buffer->write_u32v(first_element);
- buffer->write_u8(kExprEnd);
- uint32_t element_count = last_element - first_element + 1;
- buffer->write_size(element_count);
- for (uint32_t i = first_element; i <= last_element; i++) {
- buffer->write_size(indirect_functions_[i] + function_imports_.size());
+ buffer->write_size(element_segments_.size());
+ for (const WasmElemSegment& segment : element_segments_) {
+ bool is_active = segment.status == WasmElemSegment::kStatusActive;
+ // If this segment is expressible in the backwards-compatible syntax
+ // (before reftypes proposal), we should emit it in that syntax.
+ // This is the case if the segment is active and all entries are function
+ // references. Note that this is currently the only path that allows
+ // kRelativeToImports function indexing mode.
+ // TODO(manoskouk): Remove this logic once reftypes has shipped.
+ bool backwards_compatible =
+ is_active && segment.table_index == 0 &&
+ std::all_of(
+ segment.entries.begin(), segment.entries.end(), [](auto& entry) {
+ return entry.kind ==
+ WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry;
+ });
+ if (backwards_compatible) {
+ buffer->write_u8(0);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ buffer->write_u32v(
+ segment.indexing_mode == WasmElemSegment::kRelativeToImports
+ ? entry.index
+ : entry.index +
+ static_cast<uint32_t>(function_imports_.size()));
+ }
+ } else {
+ DCHECK_EQ(segment.indexing_mode, WasmElemSegment::kRelativeToImports);
+ // If we pick the general syntax, we always explicitly emit the table
+ // index and the type, and use the expressions-as-elements syntax. I.e.
+ // the initial byte is one of 0x05, 0x06, and 0x07.
+ uint8_t kind_mask =
+ segment.status == WasmElemSegment::kStatusActive
+ ? 0b10
+ : segment.status == WasmElemSegment::kStatusDeclarative ? 0b11
+ : 0b01;
+ uint8_t expressions_as_elements_mask = 0b100;
+ buffer->write_u8(kind_mask | expressions_as_elements_mask);
+ if (is_active) {
+ buffer->write_u32v(segment.table_index);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ }
+ WriteValueType(buffer, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ uint8_t opcode =
+ entry.kind == WasmElemSegment::Entry::kGlobalGetEntry
+ ? kExprGlobalGet
+ : entry.kind == WasmElemSegment::Entry::kRefFuncEntry
+ ? kExprRefFunc
+ : kExprRefNull;
+ buffer->write_u8(opcode);
+ buffer->write_u32v(entry.index);
+ buffer->write_u8(kExprEnd);
+ }
+ }
}
-
FixupSection(buffer, start);
}
@@ -833,7 +904,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// Emit the section string.
buffer->write_string(base::CStrVector("name"));
// Emit a subsection for the function names.
- buffer->write_u8(NameSectionKindCode::kFunction);
+ buffer->write_u8(NameSectionKindCode::kFunctionCode);
// Emit a placeholder for the subsection length.
size_t functions_start = buffer->reserve_u32v();
// Emit the function names.
diff --git a/chromium/v8/src/wasm/wasm-module-builder.h b/chromium/v8/src/wasm/wasm-module-builder.h
index db2091cdba8..7ba140775d8 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.h
+++ b/chromium/v8/src/wasm/wasm-module-builder.h
@@ -207,6 +207,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
+ uint32_t sig_index() { return signature_index_; }
inline FunctionSig* signature();
private:
@@ -245,6 +246,68 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
WasmModuleBuilder(const WasmModuleBuilder&) = delete;
WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
+ // Static representation of wasm element segment (table initializer). This is
+ // different than the version in wasm-module.h.
+ class WasmElemSegment {
+ public:
+ // asm.js gives function indices starting with the first non-imported
+ // function.
+ enum FunctionIndexingMode {
+ kRelativeToImports,
+ kRelativeToDeclaredFunctions
+ };
+ enum Status {
+ kStatusActive, // copied automatically during instantiation.
+ kStatusPassive, // copied explicitly after instantiation.
+ kStatusDeclarative // purely declarative and never copied.
+ };
+ struct Entry {
+ enum Kind { kGlobalGetEntry, kRefFuncEntry, kRefNullEntry } kind;
+ uint32_t index;
+ Entry(Kind kind, uint32_t index) : kind(kind), index(index) {}
+ Entry() : kind(kRefNullEntry), index(0) {}
+ };
+
+ // Construct an active segment.
+ WasmElemSegment(Zone* zone, ValueType type, uint32_t table_index,
+ WasmInitExpr offset)
+ : type(type),
+ table_index(table_index),
+ offset(std::move(offset)),
+ entries(zone),
+ status(kStatusActive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ // Construct a passive or declarative segment, which has no table
+ // index or offset.
+ WasmElemSegment(Zone* zone, ValueType type, bool declarative)
+ : type(type),
+ table_index(0),
+ entries(zone),
+ status(declarative ? kStatusDeclarative : kStatusPassive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
+
+ ValueType type;
+ uint32_t table_index;
+ WasmInitExpr offset;
+ FunctionIndexingMode indexing_mode = kRelativeToImports;
+ ZoneVector<Entry> entries;
+ Status status;
+
+ private:
+ // This ensures no {WasmInitExpr} with subexpressions is used, which would
+ // cause a memory leak because those are stored in an std::vector. Such
+ // offset would also be mistyped.
+ bool IsValidOffsetKind(WasmInitExpr::Operator kind) {
+ return kind == WasmInitExpr::kI32Const ||
+ kind == WasmInitExpr::kGlobalGet;
+ }
+ };
+
// Building methods.
uint32_t AddImport(base::Vector<const char> name, FunctionSig* sig,
base::Vector<const char> module = {});
@@ -255,16 +318,27 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
bool mutability,
base::Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
- uint32_t AddSignature(FunctionSig* sig);
+ // Add an element segment to this {WasmModuleBuilder}. {segment}'s enties
+ // have to be initialized.
+ void AddElementSegment(WasmElemSegment segment);
+ // Helper method to create an active segment with one function. Assumes that
+ // table segment at {table_index} is typed as funcref.
+ void SetIndirectFunction(uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode);
+ // Increase the starting size of the table at {table_index} by {count}. Also
+ // increases the maximum table size if needed. Returns the former starting
+ // size, or the maximum uint32_t value if the maximum table size has been
+ // exceeded.
+ uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
+ // Adds the signature to the module if it does not already exist.
+ uint32_t AddSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType);
+ // Does not deduplicate function signatures.
+ uint32_t ForceAddSignature(FunctionSig* sig,
+ uint32_t supertype = kNoSuperType);
uint32_t AddException(FunctionSig* type);
- uint32_t AddStructType(StructType* type);
- uint32_t AddArrayType(ArrayType* type);
- // In the current implementation, it's supported to have uninitialized slots
- // at the beginning and/or end of the indirect function table, as long as
- // the filled slots form a contiguous block in the middle.
- uint32_t AllocateIndirectFunctions(uint32_t count);
- void SetIndirectFunction(uint32_t indirect, uint32_t direct);
- void SetMaxTableSize(uint32_t max);
+ uint32_t AddStructType(StructType* type, uint32_t supertype = kNoSuperType);
+ uint32_t AddArrayType(ArrayType* type, uint32_t supertype = kNoSuperType);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
@@ -288,10 +362,17 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone() { return zone_; }
+ ValueType GetTableType(uint32_t index) { return tables_[index].type; }
+
+ bool IsSignature(uint32_t index) {
+ return types_[index].kind == Type::kFunctionSig;
+ }
+
FunctionSig* GetSignature(uint32_t index) {
DCHECK(types_[index].kind == Type::kFunctionSig);
return types_[index].sig;
}
+
bool IsStructType(uint32_t index) {
return types_[index].kind == Type::kStructType;
}
@@ -304,10 +385,15 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
}
ArrayType* GetArrayType(uint32_t index) { return types_[index].array_type; }
+ WasmFunctionBuilder* GetFunction(uint32_t index) { return functions_[index]; }
int NumExceptions() { return static_cast<int>(exceptions_.size()); }
int NumTypes() { return static_cast<int>(types_.size()); }
+ int NumTables() { return static_cast<int>(tables_.size()); }
+
+ int NumFunctions() { return static_cast<int>(functions_.size()); }
+
FunctionSig* GetExceptionType(int index) {
return types_[exceptions_[index]].sig;
}
@@ -317,13 +403,14 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
private:
struct Type {
enum Kind { kFunctionSig, kStructType, kArrayType };
- explicit Type(FunctionSig* signature)
- : kind(kFunctionSig), sig(signature) {}
- explicit Type(StructType* struct_type)
- : kind(kStructType), struct_type(struct_type) {}
- explicit Type(ArrayType* array_type)
- : kind(kArrayType), array_type(array_type) {}
+ explicit Type(FunctionSig* signature, uint32_t supertype)
+ : kind(kFunctionSig), supertype(supertype), sig(signature) {}
+ explicit Type(StructType* struct_type, uint32_t supertype)
+ : kind(kStructType), supertype(supertype), struct_type(struct_type) {}
+ explicit Type(ArrayType* array_type, uint32_t supertype)
+ : kind(kArrayType), supertype(supertype), array_type(array_type) {}
Kind kind;
+ uint32_t supertype;
union {
FunctionSig* sig;
StructType* struct_type;
@@ -380,12 +467,11 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmTable> tables_;
ZoneVector<WasmDataSegment> data_segments_;
- ZoneVector<uint32_t> indirect_functions_;
+ ZoneVector<WasmElemSegment> element_segments_;
ZoneVector<WasmGlobal> globals_;
ZoneVector<int> exceptions_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
- uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
@@ -393,8 +479,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
#if DEBUG
// Once AddExportedImport is called, no more imports can be added.
bool adding_imports_allowed_ = true;
- // Indirect functions must be allocated before adding extra tables.
- bool allocating_indirect_functions_allowed_ = true;
#endif
};
diff --git a/chromium/v8/src/wasm/wasm-module-sourcemap.cc b/chromium/v8/src/wasm/wasm-module-sourcemap.cc
index 85a171e5acb..ea03dae8e2f 100644
--- a/chromium/v8/src/wasm/wasm-module-sourcemap.cc
+++ b/chromium/v8/src/wasm/wasm-module-sourcemap.cc
@@ -6,11 +6,18 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-json.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
#include "src/api/api.h"
#include "src/base/vlq-base64.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
diff --git a/chromium/v8/src/wasm/wasm-module-sourcemap.h b/chromium/v8/src/wasm/wasm-module-sourcemap.h
index fd8c1117fa7..38c0358f90c 100644
--- a/chromium/v8/src/wasm/wasm-module-sourcemap.h
+++ b/chromium/v8/src/wasm/wasm-module-sourcemap.h
@@ -12,10 +12,13 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
// The class is for decoding and managing source map generated by a WebAssembly
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index 97a31487ea9..0035c00bf2f 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -113,6 +113,23 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
return func_index;
}
+// TODO(7748): Measure whether this iterative implementation is fast enough.
+// We could cache the result on the module, in yet another vector indexed by
+// type index.
+int GetSubtypingDepth(const WasmModule* module, uint32_t type_index) {
+ uint32_t starting_point = type_index;
+ int depth = 0;
+ while ((type_index = module->supertype(type_index)) != kGenericSuperType) {
+ if (type_index == starting_point) return -1; // Cycle detected.
+ // This is disallowed and will be rejected by validation, but might occur
+ // when this function is called.
+ if (type_index == kNoSuperType) break;
+ depth++;
+ if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) break;
+ }
+ return depth;
+}
+
void LazilyGeneratedNames::AddForTesting(int function_index,
WireBytesRef name) {
base::MutexGuard lock(&mutex_);
@@ -293,19 +310,23 @@ Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
}
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
- base::Optional<uint32_t> max_size) {
+ base::Optional<uint32_t> max_size,
+ bool shared) {
Factory* factory = isolate->factory();
Handle<JSFunction> object_function = isolate->object_function();
Handle<JSObject> object = factory->NewJSObject(object_function);
Handle<String> minimum_string = factory->InternalizeUtf8String("minimum");
Handle<String> maximum_string = factory->InternalizeUtf8String("maximum");
+ Handle<String> shared_string = factory->InternalizeUtf8String("shared");
JSObject::AddProperty(isolate, object, minimum_string,
factory->NewNumberFromUint(min_size), NONE);
if (max_size.has_value()) {
JSObject::AddProperty(isolate, object, maximum_string,
factory->NewNumberFromUint(max_size.value()), NONE);
}
+ JSObject::AddProperty(isolate, object, shared_string,
+ factory->ToBoolean(shared), NONE);
return object;
}
@@ -401,7 +422,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
maximum_size.emplace(module->maximum_pages);
}
type_value =
- GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+ GetTypeForMemory(isolate, module->initial_pages, maximum_size,
+ module->has_shared_memory);
}
import_kind = memory_string;
break;
@@ -498,7 +520,8 @@ Handle<JSArray> GetExports(Isolate* isolate,
maximum_size.emplace(module->maximum_pages);
}
type_value =
- GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+ GetTypeForMemory(isolate, module->initial_pages, maximum_size,
+ module->has_shared_memory);
}
export_kind = memory_string;
break;
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index d1f874a9085..08a88c4a8e6 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -259,6 +259,11 @@ struct V8_EXPORT_PRIVATE WasmDebugSymbols {
struct WasmTable;
+// End of a chain of explicit supertypes.
+constexpr uint32_t kGenericSuperType = 0xFFFFFFFE;
+// Used for types that have no explicit supertype.
+constexpr uint32_t kNoSuperType = 0xFFFFFFFF;
+
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<Zone> signature_zone;
@@ -288,6 +293,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
WireBytesRef name = {0, 0};
std::vector<TypeDefinition> types; // by type index
std::vector<uint8_t> type_kinds; // by type index
+ std::vector<uint32_t> supertypes; // by type index
// Map from each type index to the index of its corresponding canonical type.
// Note: right now, only functions are canonicalized, and arrays and structs
// map to themselves.
@@ -295,9 +301,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool has_type(uint32_t index) const { return index < types.size(); }
- void add_signature(const FunctionSig* sig) {
+ void add_signature(const FunctionSig* sig, uint32_t supertype) {
types.push_back(TypeDefinition(sig));
type_kinds.push_back(kWasmFunctionTypeCode);
+ supertypes.push_back(supertype);
uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
canonicalized_type_ids.push_back(canonical_id);
}
@@ -309,9 +316,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].function_sig;
}
- void add_struct_type(const StructType* type) {
+ void add_struct_type(const StructType* type, uint32_t supertype) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmStructTypeCode);
+ supertypes.push_back(supertype);
// No canonicalization for structs.
canonicalized_type_ids.push_back(0);
}
@@ -323,9 +331,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].struct_type;
}
- void add_array_type(const ArrayType* type) {
+ void add_array_type(const ArrayType* type, uint32_t supertype) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmArrayTypeCode);
+ supertypes.push_back(supertype);
// No canonicalization for arrays.
canonicalized_type_ids.push_back(0);
}
@@ -337,6 +346,14 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].array_type;
}
+ uint32_t supertype(uint32_t index) const {
+ DCHECK(index < supertypes.size());
+ return supertypes[index];
+ }
+ bool has_supertype(uint32_t index) const {
+ return supertype(index) != kNoSuperType;
+ }
+
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
@@ -418,6 +435,12 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset);
// contained within a function.
int GetNearestWasmFunction(const WasmModule* module, uint32_t byte_offset);
+// Gets the explicitly defined subtyping depth for the given type.
+// Returns 0 if the type has no explicit supertype.
+// The result is capped to {kV8MaxRttSubtypingDepth + 1}.
+// Invalid cyclic hierarchies will return -1.
+int GetSubtypingDepth(const WasmModule* module, uint32_t type_index);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -477,7 +500,8 @@ Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig,
Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
ValueType type);
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
- base::Optional<uint32_t> max_size);
+ base::Optional<uint32_t> max_size,
+ bool shared);
Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
uint32_t min_size,
base::Optional<uint32_t> max_size);
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index a75d83df027..be6d7dd6f70 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -186,7 +186,6 @@ bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
kIsolateRootOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
@@ -559,11 +558,26 @@ int WasmStruct::Size(const wasm::StructType* type) {
Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
}
-int WasmStruct::GcSafeSize(Map map) {
- wasm::StructType* type = GcSafeType(map);
- return Size(type);
+// static
+void WasmStruct::EncodeInstanceSizeInMap(int instance_size, Map map) {
+ // WasmStructs can be bigger than the {map.instance_size_in_words} field
+ // can describe; yet we have to store the instance size somewhere on the
+ // map so that the GC can read it without relying on any other objects
+ // still being around. To solve this problem, we store the instance size
+ // in two other fields that are otherwise unused for WasmStructs.
+ STATIC_ASSERT(0xFFFF - kHeaderSize >
+ wasm::kMaxValueTypeSize * wasm::kV8MaxWasmStructFields);
+ map.SetWasmByte1(instance_size & 0xFF);
+ map.SetWasmByte2(instance_size >> 8);
+}
+
+// static
+int WasmStruct::DecodeInstanceSizeFromMap(Map map) {
+ return (map.WasmByte2() << 8) | map.WasmByte1();
}
+int WasmStruct::GcSafeSize(Map map) { return DecodeInstanceSizeFromMap(map); }
+
wasm::StructType* WasmStruct::type() const { return type(map()); }
Address WasmStruct::RawFieldAddress(int raw_offset) {
@@ -614,13 +628,24 @@ wasm::ArrayType* WasmArray::GcSafeType(Map map) {
wasm::ArrayType* WasmArray::type() const { return type(map()); }
int WasmArray::SizeFor(Map map, int length) {
- int element_size = type(map)->element_type().element_size_bytes();
+ int element_size = DecodeElementSizeFromMap(map);
return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
}
-int WasmArray::GcSafeSizeFor(Map map, int length) {
- int element_size = GcSafeType(map)->element_type().element_size_bytes();
- return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
+uint32_t WasmArray::element_offset(uint32_t index) {
+ DCHECK_LE(index, length());
+ return WasmArray::kHeaderSize +
+ index * type()->element_type().element_size_bytes();
+}
+
+Address WasmArray::ElementAddress(uint32_t index) {
+ return ptr() + element_offset(index) - kHeapObjectTag;
+}
+
+ObjectSlot WasmArray::ElementSlot(uint32_t index) {
+ DCHECK_LE(index, length());
+ DCHECK(type()->element_type().is_reference());
+ return RawField(kHeaderSize + kTaggedSize * index);
}
// static
@@ -630,11 +655,18 @@ Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
return isolate->factory()->undefined_value();
}
wasm::ValueType element_type = array->type()->element_type();
- uint32_t offset =
- WasmArray::kHeaderSize + index * element_type.element_size_bytes();
- return ReadValueAt(isolate, array, element_type, offset);
+ return ReadValueAt(isolate, array, element_type,
+ array->element_offset(index));
}
+// static
+void WasmArray::EncodeElementSizeInMap(int element_size, Map map) {
+ map.SetWasmByte1(element_size);
+}
+
+// static
+int WasmArray::DecodeElementSizeFromMap(Map map) { return map.WasmByte1(); }
+
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
// Due to the type-specific pointer tags for external pointers, we need to
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index a6ff80f6242..8112221c28f 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -12,11 +12,13 @@
#include "src/debug/debug-interface.h"
#include "src/logging/counters.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -250,7 +252,7 @@ base::Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
Handle<WasmTableObject> WasmTableObject::New(
Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::ValueType type,
uint32_t initial, bool has_maximum, uint32_t maximum,
- Handle<FixedArray>* entries) {
+ Handle<FixedArray>* entries, Handle<Object> initial_value) {
// TODO(7748): Make this work with other types when spec clears up.
{
const WasmModule* module =
@@ -259,9 +261,8 @@ Handle<WasmTableObject> WasmTableObject::New(
}
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(initial);
- Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
- backing_store->set(i, null);
+ backing_store->set(i, *initial_value);
}
Handle<Object> max;
@@ -1242,21 +1243,13 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
CHECK_LE(mem_size, wasm::max_mem_bytes());
#if V8_HOST_ARCH_64_BIT
- uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask64);
#else
// Must handle memory > 2GiB specially.
CHECK_LE(mem_size, size_t{kMaxUInt32});
- uint32_t mem_mask32 =
- (mem_size > 2 * size_t{GB})
- ? 0xFFFFFFFFu
- : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(mem_size)) -
- 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask32);
#endif
}
@@ -1540,7 +1533,8 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (sig_id >= 0) {
wasm::NativeModule* native_module =
instance->module_object().native_module();
- // TODO(wasm): Cache and reuse wrapper code.
+ // TODO(wasm): Cache and reuse wrapper code, to avoid repeated compilation
+ // and permissions switching.
const wasm::WasmFeatures enabled = native_module->enabled_features();
auto resolved = compiler::ResolveWasmImportCall(
callable, sig, instance->module(), enabled);
@@ -1553,10 +1547,11 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
expected_arity = Handle<JSFunction>::cast(callable)
->shared()
- .internal_formal_parameter_count();
+ .internal_formal_parameter_count_without_receiver();
}
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, false, expected_arity);
+ wasm::CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -1693,18 +1688,6 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) {
}
}
-ObjectSlot WasmArray::ElementSlot(uint32_t index) {
- DCHECK_LE(index, length());
- DCHECK(type()->element_type().is_reference());
- return RawField(kHeaderSize + kTaggedSize * index);
-}
-
-Address WasmArray::ElementAddress(uint32_t index) {
- DCHECK_LE(index, length());
- return ptr() + WasmArray::kHeaderSize +
- index * type()->element_type().element_size_bytes() - kHeapObjectTag;
-}
-
// static
Handle<WasmTagObject> WasmTagObject::New(Isolate* isolate,
const wasm::FunctionSig* sig,
@@ -2030,7 +2013,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
// method. This does not apply to functions exported from asm.js however.
DCHECK_EQ(is_asm_js_module, js_function->IsConstructor());
shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
+ shared->set_internal_formal_parameter_count(JSParameterCount(arity));
shared->set_script(instance->module_object().script());
return Handle<WasmExportedFunction>::cast(js_function);
}
@@ -2115,7 +2098,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
CK kind = compiler::kDefaultImportCallKind;
if (callable->IsJSFunction()) {
SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
if (expected_arity != parameter_count) {
kind = CK::kJSFunctionArityMismatch;
}
@@ -2143,7 +2127,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Factory::JSFunctionBuilder{isolate, shared, context}
.set_map(function_map)
.Build();
- js_function->shared().set_internal_formal_parameter_count(parameter_count);
+ js_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(parameter_count));
return Handle<WasmJSFunction>::cast(js_function);
}
@@ -2217,10 +2202,6 @@ Handle<AsmWasmData> AsmWasmData::New(
return result;
}
-static_assert(wasm::kV8MaxWasmArrayLength <=
- (Smi::kMaxValue - WasmArray::kHeaderSize) / kDoubleSize,
- "max Wasm array size must fit into max object size");
-
namespace wasm {
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index 11d5c265ed5..3c554575f1f 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -182,9 +182,6 @@ class WasmModuleObject
class WasmTableObject
: public TorqueGeneratedWasmTableObject<WasmTableObject, JSObject> {
public:
- // Dispatched behavior.
- DECL_PRINTER(WasmTableObject)
-
inline wasm::ValueType type();
V8_EXPORT_PRIVATE static int Grow(Isolate* isolate,
@@ -194,7 +191,8 @@ class WasmTableObject
V8_EXPORT_PRIVATE static Handle<WasmTableObject> New(
Isolate* isolate, Handle<WasmInstanceObject> instance,
wasm::ValueType type, uint32_t initial, bool has_maximum,
- uint32_t maximum, Handle<FixedArray>* entries);
+ uint32_t maximum, Handle<FixedArray>* entries,
+ Handle<Object> initial_value);
V8_EXPORT_PRIVATE static void AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table,
@@ -266,9 +264,6 @@ class WasmMemoryObject
public:
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
- // Dispatched behavior.
- DECL_PRINTER(WasmMemoryObject)
-
// Add an instance to the internal (weak) list.
V8_EXPORT_PRIVATE static void AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
@@ -356,7 +351,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
- DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
@@ -397,7 +391,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kMemoryStartOffset, kSystemPointerSize) \
V(kMemorySizeOffset, kSizetSize) \
- V(kMemoryMaskOffset, kSizetSize) \
V(kStackLimitAddressOffset, kSystemPointerSize) \
V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
@@ -555,9 +548,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
class WasmTagObject
: public TorqueGeneratedWasmTagObject<WasmTagObject, JSObject> {
public:
- // Dispatched behavior.
- DECL_PRINTER(WasmTagObject)
-
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this tag object.
bool MatchesSignature(const wasm::FunctionSig* sig);
@@ -842,8 +832,6 @@ class WasmExceptionTag
V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
int index);
- DECL_PRINTER(WasmExceptionTag)
-
TQ_OBJECT_CONSTRUCTORS(WasmExceptionTag)
};
@@ -903,6 +891,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, WasmObject> {
static inline wasm::StructType* GcSafeType(Map map);
static inline int Size(const wasm::StructType* type);
static inline int GcSafeSize(Map map);
+ static inline void EncodeInstanceSizeInMap(int instance_size, Map map);
+ static inline int DecodeInstanceSizeFromMap(Map map);
// Returns the address of the field at given offset.
inline Address RawFieldAddress(int raw_offset);
@@ -935,19 +925,30 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
// Get the {ObjectSlot} corresponding to the element at {index}. Requires that
// this is a reference array.
- ObjectSlot ElementSlot(uint32_t index);
- wasm::WasmValue GetElement(uint32_t index);
+ inline ObjectSlot ElementSlot(uint32_t index);
+ V8_EXPORT_PRIVATE wasm::WasmValue GetElement(uint32_t index);
static inline int SizeFor(Map map, int length);
- static inline int GcSafeSizeFor(Map map, int length);
// Returns boxed value of the array's element.
static inline Handle<Object> GetElement(Isolate* isolate,
Handle<WasmArray> array,
uint32_t index);
- // Returns the Address of the element at {index}.
- Address ElementAddress(uint32_t index);
+ // Returns the offset/Address of the element at {index}.
+ inline uint32_t element_offset(uint32_t index);
+ inline Address ElementAddress(uint32_t index);
+
+ static int MaxLength(const wasm::ArrayType* type) {
+ // The total object size must fit into a Smi, for filler objects. To make
+ // the behavior of Wasm programs independent from the Smi configuration,
+ // we hard-code the smaller of the two supported ranges.
+ int element_shift = type->element_type().element_size_log2();
+ return (SmiTagging<4>::kSmiMaxValue - kHeaderSize) >> element_shift;
+ }
+
+ static inline void EncodeElementSizeInMap(int element_size, Map map);
+ static inline int DecodeElementSizeFromMap(Map map);
DECL_PRINTER(WasmArray)
diff --git a/chromium/v8/src/wasm/wasm-opcodes-inl.h b/chromium/v8/src/wasm/wasm-opcodes-inl.h
index 550d7f4671c..1034b72d917 100644
--- a/chromium/v8/src/wasm/wasm-opcodes-inl.h
+++ b/chromium/v8/src/wasm/wasm-opcodes-inl.h
@@ -382,12 +382,16 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// GC operations.
CASE_OP(StructNewWithRtt, "struct.new_with_rtt")
+ CASE_OP(StructNewDefaultWithRtt, "struct.new_default_with_rtt")
+ CASE_OP(StructNew, "struct.new")
CASE_OP(StructNewDefault, "struct.new_default")
CASE_OP(StructGet, "struct.get")
CASE_OP(StructGetS, "struct.get_s")
CASE_OP(StructGetU, "struct.get_u")
CASE_OP(StructSet, "struct.set")
CASE_OP(ArrayNewWithRtt, "array.new_with_rtt")
+ CASE_OP(ArrayNewDefaultWithRtt, "array.new_default_with_rtt")
+ CASE_OP(ArrayNew, "array.new")
CASE_OP(ArrayNewDefault, "array.new_default")
CASE_OP(ArrayGet, "array.get")
CASE_OP(ArrayGetS, "array.get_s")
@@ -396,6 +400,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(ArrayLen, "array.len")
CASE_OP(ArrayCopy, "array.copy")
CASE_OP(ArrayInit, "array.init")
+ CASE_OP(ArrayInitStatic, "array.init_static")
CASE_OP(I31New, "i31.new")
CASE_OP(I31GetS, "i31.get_s")
CASE_OP(I31GetU, "i31.get_u")
@@ -403,9 +408,13 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(RttSub, "rtt.sub")
CASE_OP(RttFreshSub, "rtt.fresh_sub")
CASE_OP(RefTest, "ref.test")
+ CASE_OP(RefTestStatic, "ref.test_static")
CASE_OP(RefCast, "ref.cast")
+ CASE_OP(RefCastStatic, "ref.cast_static")
CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(BrOnCastStatic, "br_on_cast_static")
CASE_OP(BrOnCastFail, "br_on_cast_fail")
+ CASE_OP(BrOnCastStaticFail, "br_on_cast_static_fail")
CASE_OP(RefIsFunc, "ref.is_func")
CASE_OP(RefIsData, "ref.is_data")
CASE_OP(RefIsI31, "ref.is_i31")
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index 50e813ad024..d920b7660b0 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -650,13 +650,15 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
#define FOREACH_GC_OPCODE(V) \
V(StructNewWithRtt, 0xfb01, _) \
- V(StructNewDefault, 0xfb02, _) \
+ V(StructNewDefaultWithRtt, 0xfb02, _) \
V(StructGet, 0xfb03, _) \
V(StructGetS, 0xfb04, _) \
V(StructGetU, 0xfb05, _) \
V(StructSet, 0xfb06, _) \
+ V(StructNew, 0xfb07, _) \
+ V(StructNewDefault, 0xfb08, _) \
V(ArrayNewWithRtt, 0xfb11, _) \
- V(ArrayNewDefault, 0xfb12, _) \
+ V(ArrayNewDefaultWithRtt, 0xfb12, _) \
V(ArrayGet, 0xfb13, _) \
V(ArrayGetS, 0xfb14, _) \
V(ArrayGetU, 0xfb15, _) \
@@ -664,6 +666,9 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(ArrayLen, 0xfb17, _) \
V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
V(ArrayInit, 0xfb19, _) /* not standardized - V8 experimental */ \
+ V(ArrayInitStatic, 0xfb1a, _) \
+ V(ArrayNew, 0xfb1b, _) \
+ V(ArrayNewDefault, 0xfb1c, _) \
V(I31New, 0xfb20, _) \
V(I31GetS, 0xfb21, _) \
V(I31GetU, 0xfb22, _) \
@@ -674,6 +679,10 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RefCast, 0xfb41, _) \
V(BrOnCast, 0xfb42, _) \
V(BrOnCastFail, 0xfb43, _) \
+ V(RefTestStatic, 0xfb44, _) \
+ V(RefCastStatic, 0xfb45, _) \
+ V(BrOnCastStatic, 0xfb46, _) \
+ V(BrOnCastStaticFail, 0xfb47, _) \
V(RefIsFunc, 0xfb50, _) \
V(RefIsData, 0xfb51, _) \
V(RefIsI31, 0xfb52, _) \
diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc
index d3165582c83..b0d697924e2 100644
--- a/chromium/v8/src/wasm/wasm-serialization.cc
+++ b/chromium/v8/src/wasm/wasm-serialization.cc
@@ -303,7 +303,7 @@ NativeModuleSerializer::NativeModuleSerializer(
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(bool);
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
if (code->tier() != ExecutionTier::kTurbofan) {
return sizeof(bool);
}
@@ -334,7 +334,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(false);
return true;
}
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
// Only serialize TurboFan code, as Liftoff code can contain breakpoints or
// non-relocatable constants.
if (code->tier() != ExecutionTier::kTurbofan) {
diff --git a/chromium/v8/src/wasm/wasm-subtyping.cc b/chromium/v8/src/wasm/wasm-subtyping.cc
index d2b7e9fe31d..83b1bbe4629 100644
--- a/chromium/v8/src/wasm/wasm-subtyping.cc
+++ b/chromium/v8/src/wasm/wasm-subtyping.cc
@@ -223,6 +223,8 @@ V8_INLINE bool EquivalentIndices(uint32_t index1, uint32_t index2,
}
}
+} // namespace
+
bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -234,8 +236,10 @@ bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
for (uint32_t i = 0; i < super_struct->field_count(); i++) {
bool sub_mut = sub_struct->mutability(i);
bool super_mut = super_struct->mutability(i);
@@ -261,8 +265,10 @@ bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
super_module->types[supertype_index].array_type;
bool sub_mut = sub_array->mutability();
bool super_mut = super_array->mutability();
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
if (sub_mut != super_mut ||
(sub_mut &&
!EquivalentTypes(sub_array->element_type(), super_array->element_type(),
@@ -294,8 +300,10 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
for (uint32_t i = 0; i < sub_func->parameter_count(); i++) {
// Contravariance for params.
@@ -318,7 +326,6 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return true;
}
-} // namespace
V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
ValueType subtype, ValueType supertype, const WasmModule* sub_module,
@@ -410,11 +417,35 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
DCHECK(super_heap.is_index());
uint32_t super_index = super_heap.ref_index();
DCHECK(super_module->has_type(super_index));
+ // The {IsSubtypeOf} entry point already has a fast path checking ValueType
+ // equality; here we catch (ref $x) being a subtype of (ref null $x).
+ if (sub_module == super_module && sub_index == super_index) return true;
uint8_t sub_kind = sub_module->type_kinds[sub_index];
if (sub_kind != super_module->type_kinds[super_index]) return false;
+ // Types with explicit supertypes just check those.
+ if (sub_module->has_supertype(sub_index)) {
+ // TODO(7748): Figure out cross-module story.
+ if (sub_module != super_module) return false;
+
+ uint32_t explicit_super = sub_module->supertype(sub_index);
+ while (true) {
+ if (explicit_super == super_index) return true;
+ // Reached the end of the explicitly defined inheritance chain.
+ if (explicit_super == kGenericSuperType) return false;
+ // Types without explicit supertype can't occur here, they would have
+ // failed validation.
+ DCHECK_NE(explicit_super, kNoSuperType);
+ explicit_super = sub_module->supertype(explicit_super);
+ }
+ } else {
+ // A structural type (without explicit supertype) is never a subtype of
+ // a nominal type (with explicit supertype).
+ if (super_module->has_supertype(super_index)) return false;
+ }
+
// Accessing the caches for subtyping and equivalence from multiple background
// threads is protected by a lock.
base::RecursiveMutexGuard type_cache_access(
diff --git a/chromium/v8/src/wasm/wasm-subtyping.h b/chromium/v8/src/wasm/wasm-subtyping.h
index 59e7935d1f1..53232ca2c24 100644
--- a/chromium/v8/src/wasm/wasm-subtyping.h
+++ b/chromium/v8/src/wasm/wasm-subtyping.h
@@ -97,6 +97,20 @@ V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
// case another WasmModule gets allocated in the same address later.
void DeleteCachedTypeJudgementsForModule(const WasmModule* module);
+// Checks whether {subtype_index} is a legal subtype of {supertype_index}.
+// These are the same checks that {IsSubtypeOf} uses for comparing types without
+// explicitly given supertypes; for validating such explicit supertypes they
+// can be called directly.
+bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+
} // namespace wasm
} // namespace internal
} // namespace v8