summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h52
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h117
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h39
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc113
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h95
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc828
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h9
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h27
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h29
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h535
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h92
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h85
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h40
-rw-r--r--deps/v8/src/wasm/c-api.cc234
-rw-r--r--deps/v8/src/wasm/code-space-access.cc74
-rw-r--r--deps/v8/src/wasm/code-space-access.h85
-rw-r--r--deps/v8/src/wasm/compilation-environment.h45
-rw-r--r--deps/v8/src/wasm/decoder.h12
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1911
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc25
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h10
-rw-r--r--deps/v8/src/wasm/function-compiler.cc165
-rw-r--r--deps/v8/src/wasm/function-compiler.h47
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc194
-rw-r--r--deps/v8/src/wasm/init-expr-interface.cc143
-rw-r--r--deps/v8/src/wasm/init-expr-interface.h97
-rw-r--r--deps/v8/src/wasm/leb-helper.h8
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc2
-rw-r--r--deps/v8/src/wasm/memory-protection-key.cc11
-rw-r--r--deps/v8/src/wasm/memory-protection-key.h7
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc16
-rw-r--r--deps/v8/src/wasm/module-compiler.cc596
-rw-r--r--deps/v8/src/wasm/module-compiler.h11
-rw-r--r--deps/v8/src/wasm/module-decoder.cc423
-rw-r--r--deps/v8/src/wasm/module-decoder.h21
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc688
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc87
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h26
-rw-r--r--deps/v8/src/wasm/struct-types.h3
-rw-r--r--deps/v8/src/wasm/sync-streaming-decoder.cc7
-rw-r--r--deps/v8/src/wasm/value-type.h16
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc453
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h366
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc83
-rw-r--r--deps/v8/src/wasm/wasm-debug.h8
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc135
-rw-r--r--deps/v8/src/wasm/wasm-engine.h33
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc12
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h3
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.cc84
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.h67
-rw-r--r--deps/v8/src/wasm/wasm-js.cc147
-rw-r--r--deps/v8/src/wasm/wasm-limits.h23
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc85
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h44
-rw-r--r--deps/v8/src/wasm/wasm-module.cc19
-rw-r--r--deps/v8/src/wasm/wasm-module.h43
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h322
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc184
-rw-r--r--deps/v8/src/wasm/wasm-objects.h283
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq56
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h13
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h71
-rw-r--r--deps/v8/src/wasm/wasm-result.cc13
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc68
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h8
-rw-r--r--deps/v8/src/wasm/wasm-value.h106
69 files changed, 5916 insertions, 3847 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 9a9932c6f6..e2bd64c88f 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -566,7 +566,8 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break;
case kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
- vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
+ vmov(reg.fp(), base::Double(value.to_f64_boxed().get_bits()),
+ extra_scratch);
break;
}
default:
@@ -757,11 +758,12 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
- CallRecordWriteStub(dst_addr,
- actual_offset_reg == no_reg ? Operand(offset_imm)
- : Operand(actual_offset_reg),
- RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr,
+ actual_offset_reg == no_reg ? Operand(offset_imm)
+ : Operand(actual_offset_reg),
+ RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -2031,11 +2033,11 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
vmov(dst.gp(), scratch_f);
// Check underflow and NaN.
DwVfpRegister scratch_d = temps.AcquireD();
- vmov(scratch_d, Double(static_cast<double>(INT32_MIN - 1.0)));
+ vmov(scratch_d, base::Double(static_cast<double>(INT32_MIN - 1.0)));
VFPCompareAndSetFlags(src.fp(), scratch_d);
b(trap, le);
// Check overflow.
- vmov(scratch_d, Double(static_cast<double>(INT32_MAX + 1.0)));
+ vmov(scratch_d, base::Double(static_cast<double>(INT32_MAX + 1.0)));
VFPCompareAndSetFlags(src.fp(), scratch_d);
b(trap, ge);
return true;
@@ -2047,11 +2049,11 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
vmov(dst.gp(), scratch_f);
// Check underflow and NaN.
DwVfpRegister scratch_d = temps.AcquireD();
- vmov(scratch_d, Double(static_cast<double>(-1.0)));
+ vmov(scratch_d, base::Double(static_cast<double>(-1.0)));
VFPCompareAndSetFlags(src.fp(), scratch_d);
b(trap, le);
// Check overflow.
- vmov(scratch_d, Double(static_cast<double>(UINT32_MAX + 1.0)));
+ vmov(scratch_d, base::Double(static_cast<double>(UINT32_MAX + 1.0)));
VFPCompareAndSetFlags(src.fp(), scratch_d);
b(trap, ge);
return true;
@@ -3040,8 +3042,8 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
vshr(NeonS32, tmp, liftoff::GetSimd128Register(src), 31);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- vmov(mask.low(), Double((uint64_t)0x0000'0002'0000'0001));
- vmov(mask.high(), Double((uint64_t)0x0000'0008'0000'0004));
+ vmov(mask.low(), base::Double((uint64_t)0x0000'0002'0000'0001));
+ vmov(mask.high(), base::Double((uint64_t)0x0000'0008'0000'0004));
vand(tmp, mask, tmp);
vpadd(Neon32, tmp.low(), tmp.low(), tmp.high());
vpadd(Neon32, tmp.low(), tmp.low(), kDoubleRegZero);
@@ -3225,8 +3227,8 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
vshr(NeonS16, tmp, liftoff::GetSimd128Register(src), 15);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- vmov(mask.low(), Double((uint64_t)0x0008'0004'0002'0001));
- vmov(mask.high(), Double((uint64_t)0x0080'0040'0020'0010));
+ vmov(mask.low(), base::Double((uint64_t)0x0008'0004'0002'0001));
+ vmov(mask.high(), base::Double((uint64_t)0x0080'0040'0020'0010));
vand(tmp, mask, tmp);
vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
@@ -3535,8 +3537,8 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
vshr(NeonS8, tmp, liftoff::GetSimd128Register(src), 7);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- vmov(mask.low(), Double((uint64_t)0x8040'2010'0804'0201));
- vmov(mask.high(), Double((uint64_t)0x8040'2010'0804'0201));
+ vmov(mask.low(), base::Double((uint64_t)0x8040'2010'0804'0201));
+ vmov(mask.high(), base::Double((uint64_t)0x8040'2010'0804'0201));
vand(tmp, mask, tmp);
vext(mask, tmp, tmp, 8);
vzip(Neon8, mask, tmp);
@@ -3830,9 +3832,9 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- base::Memcpy(vals, imms, sizeof(vals));
- vmov(dst.low_fp(), Double(vals[0]));
- vmov(dst.high_fp(), Double(vals[1]));
+ memcpy(vals, imms, sizeof(vals));
+ vmov(dst.low_fp(), base::Double(vals[0]));
+ vmov(dst.high_fp(), base::Double(vals[1]));
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -4232,6 +4234,18 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
+ ValueKind kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 9639a6ffd4..a80c0d3c30 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -128,46 +128,33 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
Register offset, T offset_imm,
bool i64_offset = false) {
- if (offset.is_valid()) {
- if (offset_imm == 0) {
- return i64_offset ? MemOperand(addr.X(), offset.X())
- : MemOperand(addr.X(), offset.W(), UXTW);
- }
- DCHECK_GE(kMaxUInt32, offset_imm);
- if (i64_offset) {
- Register tmp = temps->AcquireX();
- assm->Add(tmp, offset.X(), offset_imm);
- return MemOperand(addr.X(), tmp);
- } else {
- Register tmp = temps->AcquireW();
- assm->Add(tmp, offset.W(), offset_imm);
- return MemOperand(addr.X(), tmp, UXTW);
- }
- }
- return MemOperand(addr.X(), offset_imm);
-}
-
-// Certain load instructions do not support offset (register or immediate).
-// This creates a MemOperand that is suitable for such instructions by adding
-// |addr|, |offset| (if needed), and |offset_imm| into a temporary.
-inline MemOperand GetMemOpWithImmOffsetZero(LiftoffAssembler* assm,
- UseScratchRegisterScope* temps,
- Register addr, Register offset,
- uintptr_t offset_imm) {
+ if (!offset.is_valid()) return MemOperand(addr.X(), offset_imm);
+ Register effective_addr = addr.X();
+ if (offset_imm) {
+ effective_addr = temps->AcquireX();
+ assm->Add(effective_addr, addr.X(), offset_imm);
+ }
+ return i64_offset ? MemOperand(effective_addr, offset.X())
+ : MemOperand(effective_addr, offset.W(), UXTW);
+}
+
+// Compute the effective address (sum of |addr|, |offset| (if given) and
+// |offset_imm|) into a temporary register. This is needed for certain load
+// instructions that do not support an offset (register or immediate).
+// Returns |addr| if both |offset| and |offset_imm| are zero.
+inline Register GetEffectiveAddress(LiftoffAssembler* assm,
+ UseScratchRegisterScope* temps,
+ Register addr, Register offset,
+ uintptr_t offset_imm) {
+ if (!offset.is_valid() && offset_imm == 0) return addr;
Register tmp = temps->AcquireX();
if (offset.is_valid()) {
- // offset has passed BoundsCheckMem in liftoff-compiler, and been unsigned
- // extended, so it is fine to use the full width of the register.
- assm->Add(tmp, addr, offset);
- if (offset_imm != 0) {
- assm->Add(tmp, tmp, offset_imm);
- }
- } else {
- if (offset_imm != 0) {
- assm->Add(tmp, addr, offset_imm);
- }
+ // TODO(clemensb): This needs adaption for memory64.
+ assm->Add(tmp, addr, Operand(offset, UXTW));
+ addr = tmp;
}
- return MemOperand(tmp.X(), 0);
+ if (offset_imm != 0) assm->Add(tmp, addr, offset_imm);
+ return tmp;
}
enum class ShiftDirection : bool { kLeft, kRight };
@@ -339,7 +326,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
#endif
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset, 1);
-#if V8_OS_WIN
+#if V8_TARGET_OS_WIN
if (frame_size > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see
@@ -470,11 +457,18 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier skip_write_barrier) {
- // Store the value.
UseScratchRegisterScope temps(this);
- MemOperand dst_op =
- liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
- StoreTaggedField(src.gp(), dst_op);
+ Operand offset_op = offset_reg.is_valid() ? Operand(offset_reg.W(), UXTW)
+ : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.AcquireX();
+ Add(effective_offset.W(), offset_reg.W(), offset_imm);
+ offset_op = effective_offset;
+ }
+ StoreTaggedField(src.gp(), MemOperand(dst_addr.X(), offset_op));
if (skip_write_barrier || FLAG_disable_write_barriers) return;
@@ -491,12 +485,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
&exit);
- CallRecordWriteStub(dst_addr,
- dst_op.IsRegisterOffset()
- ? Operand(dst_op.regoffset().X())
- : Operand(dst_op.offset()),
- RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -1505,11 +1496,11 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- sxtb(dst, src);
+ sxtb(dst.W(), src.W());
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- sxth(dst, src);
+ sxth(dst.W(), src.W());
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
@@ -1643,8 +1634,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
UseScratchRegisterScope temps(this);
MemOperand src_op =
transform == LoadTransformationKind::kSplat
- ? liftoff::GetMemOpWithImmOffsetZero(this, &temps, src_addr,
- offset_reg, offset_imm)
+ ? MemOperand{liftoff::GetEffectiveAddress(this, &temps, src_addr,
+ offset_reg, offset_imm)}
: liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset();
MachineType memtype = type.mem_type();
@@ -1695,8 +1686,8 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
UseScratchRegisterScope temps(this);
- MemOperand src_op = liftoff::GetMemOpWithImmOffsetZero(
- this, &temps, addr, offset_reg, offset_imm);
+ MemOperand src_op{
+ liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
*protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
@@ -1722,8 +1713,8 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
StoreType type, uint8_t lane,
uint32_t* protected_store_pc) {
UseScratchRegisterScope temps(this);
- MemOperand dst_op =
- liftoff::GetMemOpWithImmOffsetZero(this, &temps, dst, offset, offset_imm);
+ MemOperand dst_op{
+ liftoff::GetEffectiveAddress(this, &temps, dst, offset, offset_imm)};
if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep();
@@ -2848,7 +2839,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- base::Memcpy(vals, imms, sizeof(vals));
+ memcpy(vals, imms, sizeof(vals));
Movi(dst.fp().V16B(), vals[1], vals[0]);
}
@@ -3237,6 +3228,18 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
+ ValueKind kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
// The stack pointer is required to be quadword aligned.
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 0cd8ddea57..d29963dea1 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -382,8 +382,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
lea(scratch, dst_op);
- CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
- SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, scratch, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -1674,7 +1675,7 @@ inline void Emit64BitShiftOperation(
reg_moves.emplace_back(dst, src, kI64);
reg_moves.emplace_back(ecx, amount, kI32);
- assm->ParallelRegisterMove(VectorOf(reg_moves));
+ assm->ParallelRegisterMove(base::VectorOf(reg_moves));
// Do the actual shift.
(assm->*emit_shift)(dst.high_gp(), dst.low_gp());
@@ -3244,7 +3245,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- base::Memcpy(vals, imms, sizeof(vals));
+ memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
uint64_t high = vals[1];
@@ -4807,6 +4808,36 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
+ ValueKind kind) {
+ if (kind == kF32) {
+ ucomiss(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ ucomisd(src, src);
+ }
+ Label ret;
+ j(parity_odd, &ret);
+ mov(Operand(dst, 0), Immediate(1));
+ bind(&ret);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ if (lane_kind == kF32) {
+ movaps(tmp_fp, src);
+ cmpunordps(tmp_fp, tmp_fp);
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ movapd(tmp_fp, src);
+ cmpunordpd(tmp_fp, tmp_fp);
+ }
+ pmovmskb(tmp_gp, tmp_fp);
+ or_(Operand(dst, 0), tmp_gp);
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index f8b01ac960..724bd6f90f 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -15,6 +15,7 @@
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/object-access.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"
@@ -446,6 +447,10 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
SetInstanceCacheRegister(source.cached_instance);
}
+ if (source.cached_mem_start != no_reg) {
+ SetMemStartCacheRegister(source.cached_mem_start);
+ }
+
uint32_t stack_base = stack_depth + num_locals;
uint32_t target_height = stack_base + arity;
uint32_t discarded = source.stack_height() - target_height;
@@ -461,10 +466,11 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// multiple times need to be copied to another free register. Compute the list
// of used registers.
LiftoffRegList used_regs;
- for (auto& src : VectorOf(source_begin, num_locals)) {
+ for (auto& src : base::VectorOf(source_begin, num_locals)) {
if (src.is_reg()) used_regs.set(src.reg());
}
- for (auto& src : VectorOf(source_begin + stack_base + discarded, arity)) {
+ for (auto& src :
+ base::VectorOf(source_begin + stack_base + discarded, arity)) {
if (src.is_reg()) used_regs.set(src.reg());
}
@@ -680,8 +686,9 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
// Materialize constants on top of the stack ({arity} many), and locals.
VarState* stack_base = cache_state_.stack_state.data();
for (auto slots :
- {VectorOf(stack_base + cache_state_.stack_state.size() - arity, arity),
- VectorOf(stack_base, num_locals())}) {
+ {base::VectorOf(stack_base + cache_state_.stack_state.size() - arity,
+ arity),
+ base::VectorOf(stack_base, num_locals())}) {
for (VarState& slot : slots) {
if (!slot.is_const()) continue;
RegClass rc = reg_class_for(slot.kind());
@@ -709,10 +716,13 @@ void LiftoffAssembler::MergeFullStackWith(CacheState& target,
}
// Full stack merging is only done for forward jumps, so we can just clear the
- // instance cache register at the target in case of mismatch.
+ // cache registers at the target in case of mismatch.
if (source.cached_instance != target.cached_instance) {
target.ClearCachedInstanceRegister();
}
+ if (source.cached_mem_start != target.cached_mem_start) {
+ target.ClearCachedMemStartRegister();
+ }
}
void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
@@ -738,27 +748,34 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
cache_state_.stack_state[stack_base + i]);
}
- // Check whether the cached instance needs to be moved to another register.
- // Register moves are executed as part of the {StackTransferRecipe}. Remember
- // whether the register content has to be reloaded after executing the stack
- // transfers.
+ // Check whether the cached instance and/or memory start need to be moved to
+ // another register. Register moves are executed as part of the
+ // {StackTransferRecipe}. Remember whether the register content has to be
+ // reloaded after executing the stack transfers.
bool reload_instance = false;
- // If the registers match, or the destination has no cache register, nothing
- // needs to be done.
- if (cache_state_.cached_instance != target.cached_instance &&
- target.cached_instance != no_reg) {
+ bool reload_mem_start = false;
+ for (auto tuple :
+ {std::make_tuple(&reload_instance, cache_state_.cached_instance,
+ &target.cached_instance),
+ std::make_tuple(&reload_mem_start, cache_state_.cached_mem_start,
+ &target.cached_mem_start)}) {
+ bool* reload = std::get<0>(tuple);
+ Register src_reg = std::get<1>(tuple);
+ Register* dst_reg = std::get<2>(tuple);
+ // If the registers match, or the destination has no cache register, nothing
+ // needs to be done.
+ if (src_reg == *dst_reg || *dst_reg == no_reg) continue;
// On forward jumps, just reset the cached register in the target state.
if (jump_direction == kForwardJump) {
- target.ClearCachedInstanceRegister();
- } else if (cache_state_.cached_instance != no_reg) {
+ target.ClearCacheRegister(dst_reg);
+ } else if (src_reg != no_reg) {
// If the source has the content but in the wrong register, execute a
// register move as part of the stack transfer.
- transfers.MoveRegister(LiftoffRegister{target.cached_instance},
- LiftoffRegister{cache_state_.cached_instance},
- kPointerKind);
+ transfers.MoveRegister(LiftoffRegister{*dst_reg},
+ LiftoffRegister{src_reg}, kPointerKind);
} else {
// Otherwise (the source state has no cached content), we reload later.
- reload_instance = true;
+ *reload = true;
}
}
@@ -768,6 +785,21 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
if (reload_instance) {
LoadInstanceFromFrame(target.cached_instance);
}
+ if (reload_mem_start) {
+ // {target.cached_instance} already got restored above, so we can use it
+ // if it exists.
+ Register instance = target.cached_instance;
+ if (instance == no_reg) {
+ // We don't have the instance available yet. Store it into the target
+ // mem_start, so that we can load the mem_start from there.
+ instance = target.cached_mem_start;
+ LoadInstanceFromFrame(instance);
+ }
+ LoadFromInstance(
+ target.cached_mem_start, instance,
+ ObjectAccess::ToTagged(WasmInstanceObject::kMemoryStartOffset),
+ sizeof(size_t));
+ }
}
void LiftoffAssembler::Spill(VarState* slot) {
@@ -798,7 +830,7 @@ void LiftoffAssembler::SpillAllRegisters() {
Spill(slot.offset(), slot.reg(), slot.kind());
slot.MakeStack();
}
- cache_state_.ClearCachedInstanceRegister();
+ cache_state_.ClearAllCacheRegisters();
cache_state_.reset_used_registers();
}
@@ -807,9 +839,21 @@ void LiftoffAssembler::ClearRegister(
LiftoffRegList pinned) {
if (reg == cache_state()->cached_instance) {
cache_state()->ClearCachedInstanceRegister();
+ // We can return immediately. The instance is only used to load information
+ // at the beginning of an instruction when values don't have to be in
+ // specific registers yet. Therefore the instance should never be one of the
+ // {possible_uses}.
+ for (Register* use : possible_uses) {
+ USE(use);
+ DCHECK_NE(reg, *use);
+ }
return;
- }
- if (cache_state()->is_used(LiftoffRegister(reg))) {
+ } else if (reg == cache_state()->cached_mem_start) {
+ cache_state()->ClearCachedMemStartRegister();
+ // The memory start may be among the {possible_uses}, e.g. for an atomic
+ // compare exchange. Therefore it is necessary to iterate over the
+ // {possible_uses} below, and we cannot return early.
+ } else if (cache_state()->is_used(LiftoffRegister(reg))) {
SpillRegister(LiftoffRegister(reg));
}
Register replacement = no_reg;
@@ -905,7 +949,7 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
constexpr size_t kInputShift = 1;
// Spill all cache slots which are not being used as parameters.
- cache_state_.ClearCachedInstanceRegister();
+ cache_state_.ClearAllCacheRegisters();
for (VarState* it = cache_state_.stack_state.end() - 1 - num_params;
it >= cache_state_.stack_state.begin() &&
!cache_state_.used_registers.is_empty();
@@ -1039,7 +1083,7 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
}
void LiftoffAssembler::ParallelRegisterMove(
- Vector<const ParallelRegisterMoveTuple> tuples) {
+ base::Vector<const ParallelRegisterMoveTuple> tuples) {
StackTransferRecipe stack_transfers(this);
for (auto tuple : tuples) {
if (tuple.dst == tuple.src) continue;
@@ -1139,13 +1183,15 @@ bool LiftoffAssembler::ValidateCacheState() const {
}
used_regs.set(reg);
}
- if (cache_state_.cached_instance != no_reg) {
- DCHECK(!used_regs.has(cache_state_.cached_instance));
- int liftoff_code =
- LiftoffRegister{cache_state_.cached_instance}.liftoff_code();
- used_regs.set(cache_state_.cached_instance);
- DCHECK_EQ(0, register_use_count[liftoff_code]);
- register_use_count[liftoff_code] = 1;
+ for (Register cache_reg :
+ {cache_state_.cached_instance, cache_state_.cached_mem_start}) {
+ if (cache_reg != no_reg) {
+ DCHECK(!used_regs.has(cache_reg));
+ int liftoff_code = LiftoffRegister{cache_reg}.liftoff_code();
+ used_regs.set(cache_reg);
+ DCHECK_EQ(0, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 1;
+ }
}
bool valid = memcmp(register_use_count, cache_state_.register_use_count,
sizeof(register_use_count)) == 0 &&
@@ -1268,8 +1314,11 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b) {
// edges), we only care that pointer types stay amongst pointer types.
// It's fine if ref/optref overwrite each other.
DCHECK(is_object_reference(b));
+ } else if (is_rtt(a)) {
+ // Same for rtt/rtt_with_depth.
+ DCHECK(is_rtt(b));
} else {
- // All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
+ // All other types (primitive numbers, bottom/stmt) must be equal.
DCHECK_EQ(a, b);
}
return true; // Dummy so this can be called via DCHECK.
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index ab82b4fec8..c27653bb95 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -200,6 +200,7 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
Register cached_instance = no_reg;
+ Register cached_mem_start = no_reg;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
@@ -250,31 +251,47 @@ class LiftoffAssembler : public TurboAssembler {
// Volatile registers are registers which are used for caching values that
// can easily be reloaded. Those are returned first if we run out of free
// registers.
- // Note: This interface is a bit more generic than currently needed, in
- // anticipation of more "volatile registers" being added later.
bool has_volatile_register(LiftoffRegList candidates) {
- return cached_instance != no_reg && candidates.has(cached_instance);
+ return (cached_instance != no_reg && candidates.has(cached_instance)) ||
+ (cached_mem_start != no_reg && candidates.has(cached_mem_start));
}
LiftoffRegister take_volatile_register(LiftoffRegList candidates) {
- DCHECK(candidates.has(cached_instance));
- LiftoffRegister ret{cached_instance};
+ DCHECK(has_volatile_register(candidates));
+ Register reg = no_reg;
+ if (cached_instance != no_reg && candidates.has(cached_instance)) {
+ reg = cached_instance;
+ cached_instance = no_reg;
+ } else {
+ DCHECK(candidates.has(cached_mem_start));
+ reg = cached_mem_start;
+ cached_mem_start = no_reg;
+ }
+
+ LiftoffRegister ret{reg};
DCHECK_EQ(1, register_use_count[ret.liftoff_code()]);
register_use_count[ret.liftoff_code()] = 0;
used_registers.clear(ret);
- cached_instance = no_reg;
return ret;
}
- void SetInstanceCacheRegister(Register reg) {
- DCHECK_EQ(no_reg, cached_instance);
- cached_instance = reg;
+ void SetCacheRegister(Register* cache, Register reg) {
+ DCHECK_EQ(no_reg, *cache);
+ *cache = reg;
int liftoff_code = LiftoffRegister{reg}.liftoff_code();
DCHECK_EQ(0, register_use_count[liftoff_code]);
register_use_count[liftoff_code] = 1;
used_registers.set(reg);
}
+ void SetInstanceCacheRegister(Register reg) {
+ SetCacheRegister(&cached_instance, reg);
+ }
+
+ void SetMemStartCacheRegister(Register reg) {
+ SetCacheRegister(&cached_mem_start, reg);
+ }
+
Register TrySetCachedInstanceRegister(LiftoffRegList pinned) {
DCHECK_EQ(no_reg, cached_instance);
LiftoffRegList available_regs =
@@ -290,13 +307,25 @@ class LiftoffAssembler : public TurboAssembler {
return new_cache_reg;
}
- void ClearCachedInstanceRegister() {
- if (cached_instance == no_reg) return;
- int liftoff_code = LiftoffRegister{cached_instance}.liftoff_code();
+ void ClearCacheRegister(Register* cache) {
+ DCHECK(cache == &cached_instance || cache == &cached_mem_start);
+ if (*cache == no_reg) return;
+ int liftoff_code = LiftoffRegister{*cache}.liftoff_code();
DCHECK_EQ(1, register_use_count[liftoff_code]);
register_use_count[liftoff_code] = 0;
- used_registers.clear(cached_instance);
- cached_instance = no_reg;
+ used_registers.clear(*cache);
+ *cache = no_reg;
+ }
+
+ void ClearCachedInstanceRegister() { ClearCacheRegister(&cached_instance); }
+
+ void ClearCachedMemStartRegister() {
+ ClearCacheRegister(&cached_mem_start);
+ }
+
+ void ClearAllCacheRegisters() {
+ ClearCacheRegister(&cached_instance);
+ ClearCacheRegister(&cached_mem_start);
}
void inc_used(LiftoffRegister reg) {
@@ -551,6 +580,8 @@ class LiftoffAssembler : public TurboAssembler {
if (cache_state_.is_free(r)) continue;
if (r.is_gp() && cache_state_.cached_instance == r.gp()) {
cache_state_.ClearCachedInstanceRegister();
+ } else if (r.is_gp() && cache_state_.cached_mem_start == r.gp()) {
+ cache_state_.ClearCachedMemStartRegister();
} else {
SpillRegister(r);
}
@@ -596,11 +627,11 @@ class LiftoffAssembler : public TurboAssembler {
: dst(dst), src(src), kind(kind) {}
};
- void ParallelRegisterMove(Vector<const ParallelRegisterMoveTuple>);
+ void ParallelRegisterMove(base::Vector<const ParallelRegisterMoveTuple>);
void ParallelRegisterMove(
std::initializer_list<ParallelRegisterMoveTuple> moves) {
- ParallelRegisterMove(VectorOf(moves));
+ ParallelRegisterMove(base::VectorOf(moves));
}
void MoveToReturnLocations(const FunctionSig*,
@@ -653,13 +684,13 @@ class LiftoffAssembler : public TurboAssembler {
int32_t offset_imm, LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier = kNoSkipWriteBarrier);
- inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
- LiftoffRegList pinned) {
+ void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
+ LiftoffRegList pinned) {
int offset = FixedArray::kLengthOffset - kHeapObjectTag;
LoadSmiAsInt32(dst, array, offset, pinned);
}
- inline void LoadSmiAsInt32(LiftoffRegister dst, Register src_addr,
- int32_t offset, LiftoffRegList pinned) {
+ void LoadSmiAsInt32(LiftoffRegister dst, Register src_addr, int32_t offset,
+ LiftoffRegList pinned) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
@@ -822,7 +853,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_u32_to_intptr(Register dst, Register src);
- inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
+ void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
if (kSystemPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
@@ -830,7 +861,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_add(dst, lhs, rhs);
}
}
- inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
+ void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
if (kSystemPointerSize == 8) {
emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
@@ -838,7 +869,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_sub(dst, lhs, rhs);
}
}
- inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
+ void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
if (kSystemPointerSize == 8) {
emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
@@ -846,7 +877,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_and(dst, lhs, rhs);
}
}
- inline void emit_ptrsize_shri(Register dst, Register src, int amount) {
+ void emit_ptrsize_shri(Register dst, Register src, int amount) {
if (kSystemPointerSize == 8) {
emit_i64_shri(LiftoffRegister(dst), LiftoffRegister(src), amount);
} else {
@@ -854,7 +885,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
- inline void emit_ptrsize_addi(Register dst, Register lhs, intptr_t imm) {
+ void emit_ptrsize_addi(Register dst, Register lhs, intptr_t imm) {
if (kSystemPointerSize == 8) {
emit_i64_addi(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
} else {
@@ -862,8 +893,8 @@ class LiftoffAssembler : public TurboAssembler {
}
}
- inline void emit_ptrsize_set_cond(LiftoffCondition condition, Register dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
+ void emit_ptrsize_set_cond(LiftoffCondition condition, Register dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
if (kSystemPointerSize == 8) {
emit_i64_set_cond(condition, dst, lhs, rhs);
} else {
@@ -871,7 +902,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
- inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
+ void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
if (kSystemPointerSize == 8) {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
LiftoffRegister(src));
@@ -1425,6 +1456,14 @@ class LiftoffAssembler : public TurboAssembler {
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
+ // Set the i32 at address dst to 1 if src is a NaN.
+ inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
+
+ // Set the i32 at address dst to a non-zero value if src contains a NaN.
+ inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp, DoubleRegister tmp_fp,
+ ValueKind lane_kind);
+
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 3204f69675..57b6457c77 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -22,6 +22,7 @@
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/memory-tracing.h"
@@ -76,12 +77,12 @@ struct assert_field_size {
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef V8_CODE_COMMENTS
-#define DEBUG_CODE_COMMENT(str) \
- do { \
- __ RecordComment(str); \
+#define CODE_COMMENT(str) \
+ do { \
+ __ RecordComment(str); \
} while (false)
#else
-#define DEBUG_CODE_COMMENT(str) ((void)0)
+#define CODE_COMMENT(str) ((void)0)
#endif
constexpr LoadType::LoadTypeValue kPointerLoadType =
@@ -208,14 +209,15 @@ class DebugSideTableBuilder {
};
// Adds a new entry in regular code.
- void NewEntry(int pc_offset, Vector<DebugSideTable::Entry::Value> values) {
+ void NewEntry(int pc_offset,
+ base::Vector<DebugSideTable::Entry::Value> values) {
entries_.emplace_back(pc_offset, static_cast<int>(values.size()),
GetChangedStackValues(last_values_, values));
}
// Adds a new entry for OOL code, and returns a pointer to a builder for
// modifying that entry.
- EntryBuilder* NewOOLEntry(Vector<DebugSideTable::Entry::Value> values) {
+ EntryBuilder* NewOOLEntry(base::Vector<DebugSideTable::Entry::Value> values) {
constexpr int kNoPcOffsetYet = -1;
ool_entries_.emplace_back(kNoPcOffsetYet, static_cast<int>(values.size()),
GetChangedStackValues(last_ool_values_, values));
@@ -251,7 +253,7 @@ class DebugSideTableBuilder {
private:
static std::vector<Value> GetChangedStackValues(
std::vector<Value>& last_values,
- Vector<DebugSideTable::Entry::Value> values) {
+ base::Vector<DebugSideTable::Entry::Value> values) {
std::vector<Value> changed_values;
int old_stack_size = static_cast<int>(last_values.size());
last_values.resize(values.size());
@@ -455,7 +457,9 @@ class LiftoffCompiler {
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
ForDebugging for_debugging, int func_index,
- Vector<const int> breakpoints = {}, int dead_breakpoint = 0)
+ base::Vector<const int> breakpoints = {},
+ int dead_breakpoint = 0, int32_t* max_steps = nullptr,
+ int32_t* nondeterminism = nullptr)
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
@@ -471,7 +475,9 @@ class LiftoffCompiler {
next_breakpoint_ptr_(breakpoints.begin()),
next_breakpoint_end_(breakpoints.end()),
dead_breakpoint_(dead_breakpoint),
- handlers_(compilation_zone) {
+ handlers_(compilation_zone),
+ max_steps_(max_steps),
+ nondeterminism_(nondeterminism) {
if (breakpoints.empty()) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
}
@@ -485,13 +491,17 @@ class LiftoffCompiler {
handler_table_offset_);
}
- OwnedVector<uint8_t> GetSourcePositionTable() {
+ std::unique_ptr<AssemblerBuffer> ReleaseBuffer() {
+ return asm_.ReleaseBuffer();
+ }
+
+ base::OwnedVector<uint8_t> GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
- OwnedVector<uint8_t> GetProtectedInstructionsData() const {
- return OwnedVector<uint8_t>::Of(
- Vector<const uint8_t>::cast(VectorOf(protected_instructions_)));
+ base::OwnedVector<uint8_t> GetProtectedInstructionsData() const {
+ return base::OwnedVector<uint8_t>::Of(base::Vector<const uint8_t>::cast(
+ base::VectorOf(protected_instructions_)));
}
uint32_t GetTotalFrameSlotCountForGC() const {
@@ -529,9 +539,6 @@ class LiftoffCompiler {
// Lazily update {supported_types_}; then check again.
if (CpuFeatures::SupportsWasmSimd128()) supported_types_.Add(kS128);
- if (FLAG_experimental_liftoff_extern_ref) {
- supported_types_.Add(kExternRefSupported);
- }
if (supported_types_.contains(kind)) return true;
LiftoffBailoutReason bailout_reason;
@@ -550,7 +557,7 @@ class LiftoffCompiler {
default:
UNREACHABLE();
}
- EmbeddedVector<char, 128> buffer;
+ base::EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", name(kind), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
@@ -635,7 +642,7 @@ class LiftoffCompiler {
}
void StackCheck(FullDecoder* decoder, WasmCodePosition position) {
- DEBUG_CODE_COMMENT("stack check");
+ CODE_COMMENT("stack check");
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return;
// Loading the limit address can change the stack state, hence do this
@@ -660,7 +667,15 @@ class LiftoffCompiler {
? LiftoffAssembler::CacheState::SpillLocation::kStackSlots
: LiftoffAssembler::CacheState::SpillLocation::kTopOfStack);
if (V8_UNLIKELY(for_debugging_)) {
+ // When debugging, we do not just push all registers to the stack, but we
+ // spill them to their proper stack locations such that we can inspect
+ // them.
+ // The only exception is the cached memory start, which we just push
+ // before the stack check and pop afterwards.
regs_to_save = {};
+ if (__ cache_state()->cached_mem_start != no_reg) {
+ regs_to_save.set(__ cache_state()->cached_mem_start);
+ }
spilled_regs = GetSpilledRegistersForInspection();
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
@@ -695,7 +710,7 @@ class LiftoffCompiler {
}
void TraceFunctionEntry(FullDecoder* decoder) {
- DEBUG_CODE_COMMENT("trace function entry");
+ CODE_COMMENT("trace function entry");
__ SpillAllRegisters();
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -714,7 +729,6 @@ class LiftoffCompiler {
__ CodeEntry();
- DEBUG_CODE_COMMENT("enter frame");
__ EnterFrame(StackFrame::WASM);
__ set_has_frame(true);
pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
@@ -739,7 +753,7 @@ class LiftoffCompiler {
if (for_debugging_) __ ResetOSRTarget();
// Process parameters.
- if (num_params) DEBUG_CODE_COMMENT("process parameters");
+ if (num_params) CODE_COMMENT("process parameters");
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
@@ -749,39 +763,41 @@ class LiftoffCompiler {
DCHECK_EQ(input_idx, descriptor_->InputCount());
// Initialize locals beyond parameters.
- if (num_params < __ num_locals()) DEBUG_CODE_COMMENT("init locals");
+ if (num_params < __ num_locals()) CODE_COMMENT("init locals");
if (SpillLocalsInitially(decoder, num_params)) {
+ bool has_refs = false;
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueKind kind = __ local_kind(param_idx);
+ has_refs |= is_reference(kind);
__ PushStack(kind);
}
int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size);
+
+ // Initialize all reference type locals with ref.null.
+ if (has_refs) {
+ Register null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
+ LoadNullValue(null_ref_reg, {});
+ for (uint32_t local_index = num_params; local_index < __ num_locals();
+ ++local_index) {
+ ValueKind kind = __ local_kind(local_index);
+ if (is_reference(kind)) {
+ __ Spill(__ cache_state()->stack_state[local_index].offset(),
+ LiftoffRegister(null_ref_reg), kind);
+ }
+ }
+ }
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueKind kind = __ local_kind(param_idx);
+ // Anything which is not i32 or i64 requires spilling.
+ DCHECK(kind == kI32 || kind == kI64);
__ PushConstant(kind, int32_t{0});
}
}
- if (FLAG_experimental_liftoff_extern_ref) {
- // Initialize all reference type locals with ref.null.
- Register null_ref_reg = no_reg;
- for (uint32_t local_index = num_params; local_index < __ num_locals();
- ++local_index) {
- ValueKind kind = __ local_kind(local_index);
- if (is_reference(kind)) {
- if (null_ref_reg == no_reg) {
- null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
- LoadNullValue(null_ref_reg, {});
- }
- __ Spill(__ cache_state()->stack_state[local_index].offset(),
- LiftoffRegister(null_ref_reg), kind);
- }
- }
- }
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
if (V8_UNLIKELY(debug_sidetable_builder_)) {
@@ -795,7 +811,7 @@ class LiftoffCompiler {
if (FLAG_wasm_dynamic_tiering) {
// TODO(arobin): Avoid spilling registers unconditionally.
__ SpillAllRegisters();
- DEBUG_CODE_COMMENT("dynamic tiering");
+ CODE_COMMENT("dynamic tiering");
LiftoffRegList pinned;
// Get the number of calls array address.
@@ -838,14 +854,18 @@ class LiftoffCompiler {
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
- DEBUG_CODE_COMMENT(
+ CODE_COMMENT(
(std::string("out of line: ") + GetRuntimeStubName(ool->stub)).c_str());
__ bind(ool->label.get());
const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
- const bool is_mem_out_of_bounds =
- ool->stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
- if (is_mem_out_of_bounds && env_->use_trap_handler) {
+ // Only memory OOB traps need a {pc}, but not unconditionally. Static OOB
+ // accesses do not need protected instruction information, hence they also
+ // do not set {pc}.
+ DCHECK_IMPLIES(ool->stub != WasmCode::kThrowWasmTrapMemOutOfBounds,
+ ool->pc == 0);
+
+ if (env_->bounds_checks == kTrapHandler && ool->pc != 0) {
uint32_t pc = static_cast<uint32_t>(__ pc_offset());
DCHECK_EQ(pc, __ pc_offset());
protected_instructions_.emplace_back(
@@ -858,19 +878,19 @@ class LiftoffCompiler {
// In this mode, we never generate stack checks.
DCHECK(!is_stack_check);
__ CallTrapCallbackForTesting();
- DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
return;
}
- // We cannot both push and spill registers.
- DCHECK(ool->regs_to_save.is_empty() || ool->spilled_registers == nullptr);
if (!ool->regs_to_save.is_empty()) {
__ PushRegisters(ool->regs_to_save);
- } else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
+ }
+ if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
for (auto& entry : ool->spilled_registers->entries) {
+ // We should not push and spill the same register.
+ DCHECK(!ool->regs_to_save.has(entry.reg));
__ Spill(entry.offset, entry.reg, entry.kind);
}
}
@@ -980,13 +1000,14 @@ class LiftoffCompiler {
}
}
if (has_breakpoint) {
+ CODE_COMMENT("breakpoint");
EmitBreakpoint(decoder);
// Once we emitted an unconditional breakpoint, we don't need to check
// function entry breaks any more.
did_function_entry_break_checks_ = true;
} else if (!did_function_entry_break_checks_) {
did_function_entry_break_checks_ = true;
- DEBUG_CODE_COMMENT("check function entry break");
+ CODE_COMMENT("check function entry break");
Label do_break;
Label no_break;
Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
@@ -1013,11 +1034,33 @@ class LiftoffCompiler {
// removed. Adding a dead breakpoint here ensures that the source
// position exists, and that the offset to the return address is the
// same as in the old code.
+ CODE_COMMENT("dead breakpoint");
Label cont;
__ emit_jump(&cont);
EmitBreakpoint(decoder);
__ bind(&cont);
}
+ if (V8_UNLIKELY(max_steps_ != nullptr)) {
+ CODE_COMMENT("check max steps");
+ LiftoffRegList pinned;
+ LiftoffRegister max_steps = __ GetUnusedRegister(kGpReg, {});
+ pinned.set(max_steps);
+ LiftoffRegister max_steps_addr = __ GetUnusedRegister(kGpReg, pinned);
+ pinned.set(max_steps_addr);
+ __ LoadConstant(
+ max_steps_addr,
+ WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(max_steps_)));
+ __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load,
+ pinned);
+ Label cont;
+ __ emit_i32_cond_jumpi(kUnequal, &cont, max_steps.gp(), 0);
+ // Abort.
+ Trap(decoder, kTrapUnreachable);
+ __ bind(&cont);
+ __ emit_i32_subi(max_steps.gp(), max_steps.gp(), 1);
+ __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store,
+ pinned);
+ }
}
void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
@@ -1025,18 +1068,15 @@ class LiftoffCompiler {
// {EmitDebuggingInfo} stays outlined.
if (V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode);
TraceCacheState(decoder);
-#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = decoder->read_prefixed_opcode<Decoder::kFullValidation>(
- decoder->pc());
- }
- DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
-#endif
+ CODE_COMMENT(WasmOpcodes::OpcodeName(
+ WasmOpcodes::IsPrefixOpcode(opcode)
+ ? decoder->read_prefixed_opcode<Decoder::kFullValidation>(
+ decoder->pc())
+ : opcode));
}
void EmitBreakpoint(FullDecoder* decoder) {
- DEBUG_CODE_COMMENT("breakpoint");
DCHECK(for_debugging_);
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
@@ -1109,7 +1149,7 @@ class LiftoffCompiler {
void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
- Control* block, Vector<Value> values) {
+ Control* block, base::Vector<Value> values) {
DCHECK(block->is_try_catch());
__ emit_jump(block->label.get());
@@ -1128,7 +1168,7 @@ class LiftoffCompiler {
__ cache_state()->Split(block->try_info->catch_state);
- DEBUG_CODE_COMMENT("load caught exception tag");
+ CODE_COMMENT("load caught exception tag");
DCHECK_EQ(__ cache_state()->stack_state.back().kind(), kRef);
LiftoffRegister caught_tag =
GetExceptionProperty(__ cache_state()->stack_state.back(),
@@ -1136,14 +1176,14 @@ class LiftoffCompiler {
LiftoffRegList pinned;
pinned.set(caught_tag);
- DEBUG_CODE_COMMENT("load expected exception tag");
+ CODE_COMMENT("load expected exception tag");
Register imm_tag = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, ExceptionsTable, pinned);
__ LoadTaggedPointer(
imm_tag, imm_tag, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
- DEBUG_CODE_COMMENT("compare tags");
+ CODE_COMMENT("compare tags");
Label caught;
__ emit_cond_jump(kEqual, &caught, kI32, imm_tag, caught_tag.gp());
// The tags don't match, merge the current state into the catch state and
@@ -1203,8 +1243,7 @@ class LiftoffCompiler {
}
void CatchAll(FullDecoder* decoder, Control* block) {
- DCHECK(block->is_try_catchall() || block->is_try_catch() ||
- block->is_try_unwind());
+ DCHECK(block->is_try_catchall() || block->is_try_catch());
DCHECK_EQ(decoder->control_at(0), block);
// The catch block is unreachable if no possible throws in the try block
@@ -1232,7 +1271,23 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
- __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
+ if (!has_outstanding_op()) {
+ // Unary "equal" means "equals zero".
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
+ } else if (outstanding_op_ == kExprI32Eqz) {
+ // Unary "unequal" means "not equals zero".
+ __ emit_cond_jump(kUnequal, if_block->else_state->label.get(), kI32,
+ value);
+ outstanding_op_ = kNoOutstandingOp;
+ } else {
+ // Otherwise, it's an i32 compare opcode.
+ LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
+ Register rhs = value;
+ Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
+ __ emit_cond_jump(cond, if_block->else_state->label.get(), kI32, lhs,
+ rhs);
+ outstanding_op_ = kNoOutstandingOp;
+ }
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
@@ -1248,9 +1303,8 @@ class LiftoffCompiler {
}
DCHECK(!c->is_try_catchall());
if (c->is_try_catch()) {
- // Drop the implicit exception ref.
- DCHECK_EQ(c->label_state.stack_height() + 1,
- __ cache_state()->stack_height());
+ // Drop the implicit exception ref if any. There may be none if this is a
+ // catch-less try block.
__ MergeStackWith(c->label_state, c->br_merge()->arity,
LiftoffAssembler::kForwardJump);
} else {
@@ -1295,7 +1349,7 @@ class LiftoffCompiler {
}
void FinishTry(FullDecoder* decoder, Control* c) {
- DCHECK(c->is_try_catch() || c->is_try_catchall() || c->is_try_unwind());
+ DCHECK(c->is_try_catch() || c->is_try_catchall());
if (!c->end_merge.reached) {
if (c->try_info->catch_reached) {
// Drop the implicit exception ref.
@@ -1319,8 +1373,7 @@ class LiftoffCompiler {
if (c->is_onearmed_if()) {
// Special handling for one-armed ifs.
FinishOneArmedIf(decoder, c);
- } else if (c->is_try_catch() || c->is_try_catchall() ||
- c->is_try_unwind()) {
+ } else if (c->is_try_catch() || c->is_try_catchall()) {
FinishTry(decoder, c);
} else if (c->end_merge.reached) {
// There is a merge already. Merge our state into that, then continue with
@@ -1401,7 +1454,8 @@ class LiftoffCompiler {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
}
- template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind,
+ ValueKind result_lane_kind = kVoid, class EmitFn>
void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_kind);
constexpr RegClass result_rc = reg_class_for(result_kind);
@@ -1410,6 +1464,15 @@ class LiftoffCompiler {
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
+ if (V8_UNLIKELY(nondeterminism_)) {
+ auto pinned = LiftoffRegList::ForRegs(dst);
+ if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
+ CheckNan(dst, pinned, result_kind);
+ } else if (result_kind == ValueKind::kS128 &&
+ (result_lane_kind == kF32 || result_lane_kind == kF64)) {
+ CheckS128Nan(dst, pinned, result_lane_kind);
+ }
+ }
__ PushRegister(result_kind, dst);
}
@@ -1555,7 +1618,9 @@ class LiftoffCompiler {
kNoTrap)
case kExprI32Eqz:
DCHECK(decoder->lookahead(0, kExprI32Eqz));
- if (decoder->lookahead(1, kExprBrIf) && !for_debugging_) {
+ if ((decoder->lookahead(1, kExprBrIf) ||
+ decoder->lookahead(1, kExprIf)) &&
+ !for_debugging_) {
DCHECK(!has_outstanding_op());
outstanding_op_ = kExprI32Eqz;
break;
@@ -1585,10 +1650,6 @@ class LiftoffCompiler {
nullptr);
});
case kExprRefIsNull: {
- if (!FLAG_experimental_liftoff_extern_ref) {
- unsupported(decoder, kRefTypes, "ref_is_null");
- return;
- }
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister());
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
@@ -1631,6 +1692,8 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
+ static_assert(result_kind != kF32 && result_kind != kF64,
+ "Unhandled nondeterminism for fuzzing.");
__ PushRegister(result_kind, dst);
} else {
// The RHS was not an immediate.
@@ -1639,7 +1702,8 @@ class LiftoffCompiler {
}
template <ValueKind src_kind, ValueKind result_kind,
- bool swap_lhs_rhs = false, typename EmitFn>
+ bool swap_lhs_rhs = false, ValueKind result_lane_kind = kVoid,
+ typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
@@ -1652,6 +1716,15 @@ class LiftoffCompiler {
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
+ if (V8_UNLIKELY(nondeterminism_)) {
+ auto pinned = LiftoffRegList::ForRegs(dst);
+ if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
+ CheckNan(dst, pinned, result_kind);
+ } else if (result_kind == ValueKind::kS128 &&
+ (result_lane_kind == kF32 || result_lane_kind == kF64)) {
+ CheckS128Nan(dst, pinned, result_lane_kind);
+ }
+ }
__ PushRegister(result_kind, dst);
}
@@ -1679,7 +1752,8 @@ class LiftoffCompiler {
template <WasmOpcode opcode>
void EmitI32CmpOp(FullDecoder* decoder) {
DCHECK(decoder->lookahead(0, opcode));
- if (decoder->lookahead(1, kExprBrIf) && !for_debugging_) {
+ if ((decoder->lookahead(1, kExprBrIf) || decoder->lookahead(1, kExprIf)) &&
+ !for_debugging_) {
DCHECK(!has_outstanding_op());
outstanding_op_ = opcode;
return;
@@ -2005,10 +2079,6 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
- if (!FLAG_experimental_liftoff_extern_ref) {
- unsupported(decoder, kRefTypes, "ref_null");
- return;
- }
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {});
__ PushRegister(type.kind(), null);
@@ -2033,7 +2103,7 @@ class LiftoffCompiler {
void Drop(FullDecoder* decoder) { __ DropValues(1); }
void TraceFunctionExit(FullDecoder* decoder) {
- DEBUG_CODE_COMMENT("trace function exit");
+ CODE_COMMENT("trace function exit");
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
LiftoffRegList pinned;
@@ -2072,14 +2142,13 @@ class LiftoffCompiler {
if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
- DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
}
void LocalGet(FullDecoder* decoder, Value* result,
- const LocalIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back(
local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
@@ -2143,16 +2212,16 @@ class LiftoffCompiler {
}
void LocalSet(FullDecoder* decoder, const Value& value,
- const LocalIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
LocalSet(imm.index, false);
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
- const LocalIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
LocalSet(imm.index, true);
}
- void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
+ void AllocateLocals(FullDecoder* decoder, base::Vector<Value> local_values) {
// TODO(7748): Introduce typed functions bailout reason
unsupported(decoder, kGC, "let");
}
@@ -2248,7 +2317,7 @@ class LiftoffCompiler {
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
LoadType type = LoadType::ForValueKind(kind);
- __ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
+ __ Load(value, addr, no_reg, offset, type, pinned, nullptr, false);
__ PushRegister(kind, value);
}
@@ -2289,11 +2358,11 @@ class LiftoffCompiler {
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueKind(kind);
- __ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
+ __ Store(addr, no_reg, offset, reg, type, {}, nullptr, false);
}
void TableGet(FullDecoder* decoder, const Value&, Value*,
- const TableIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -2317,7 +2386,7 @@ class LiftoffCompiler {
}
void TableSet(FullDecoder* decoder, const Value&, const Value&,
- const TableIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
@@ -2380,7 +2449,8 @@ class LiftoffCompiler {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
- DCHECK_EQ(kind, __ cache_state()->stack_state.end()[-2].kind());
+ DCHECK(CheckCompatibleStackSlotTypes(
+ kind, __ cache_state()->stack_state.end()[-2].kind()));
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
@@ -2555,6 +2625,8 @@ class LiftoffCompiler {
Label* AddOutOfLineTrap(FullDecoder* decoder, WasmCode::RuntimeStubId stub,
uint32_t pc = 0) {
+ // Only memory OOB traps need a {pc}.
+ DCHECK_IMPLIES(stub != WasmCode::kThrowWasmTrapMemOutOfBounds, pc == 0);
DCHECK(FLAG_wasm_bounds_checks);
OutOfLineSafepointInfo* safepoint_info = nullptr;
if (V8_UNLIKELY(for_debugging_)) {
@@ -2594,24 +2666,30 @@ class LiftoffCompiler {
Register index_ptrsize =
kNeedI64RegPair && index.is_gp_pair() ? index.low_gp() : index.gp();
+ // Without bounds checks (testing only), just return the ptrsize index.
+ if (V8_UNLIKELY(env_->bounds_checks == kNoBoundsChecks)) {
+ return index_ptrsize;
+ }
+
+ // Early return for trap handler.
+ DCHECK_IMPLIES(env_->module->is_memory64,
+ env_->bounds_checks == kExplicitBoundsChecks);
if (!force_check && !statically_oob &&
- (!FLAG_wasm_bounds_checks || env_->use_trap_handler)) {
+ env_->bounds_checks == kTrapHandler) {
// With trap handlers we should not have a register pair as input (we
// would only return the lower half).
- DCHECK_IMPLIES(env_->use_trap_handler, index.is_gp());
+ DCHECK(index.is_gp());
return index_ptrsize;
}
- DEBUG_CODE_COMMENT("bounds check memory");
+ CODE_COMMENT("bounds check memory");
- // TODO(wasm): This adds protected instruction information for the jump
- // instruction we are about to generate. It would be better to just not add
- // protected instruction info when the pc is 0.
+ // Set {pc} of the OOL code to {0} to avoid generation of protected
+ // instruction information (see {GenerateOutOfLineCode}.
Label* trap_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
- env_->use_trap_handler ? __ pc_offset() : 0);
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, 0);
- if (statically_oob) {
+ if (V8_UNLIKELY(statically_oob)) {
__ emit_jump(trap_label);
decoder->SetSucceedingCodeDynamicallyUnreachable();
return no_reg;
@@ -2740,10 +2818,11 @@ class LiftoffCompiler {
Register AddMemoryMasking(Register index, uintptr_t* offset,
LiftoffRegList* pinned) {
- if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
+ if (!FLAG_untrusted_code_mitigations ||
+ env_->bounds_checks == kTrapHandler) {
return index;
}
- DEBUG_CODE_COMMENT("mask memory index");
+ CODE_COMMENT("mask memory index");
// Make sure that we can overwrite {index}.
if (__ cache_state()->is_used(LiftoffRegister(index))) {
Register old_index = index;
@@ -2779,6 +2858,17 @@ class LiftoffCompiler {
return true;
}
+ Register GetMemoryStart(LiftoffRegList pinned) {
+ Register memory_start = __ cache_state()->cached_mem_start;
+ if (memory_start == no_reg) {
+ memory_start = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LOAD_INSTANCE_FIELD(memory_start, MemoryStart, kSystemPointerSize,
+ pinned);
+ __ cache_state()->SetMemStartCacheRegister(memory_start);
+ }
+ return memory_start;
+ }
+
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
@@ -2795,10 +2885,9 @@ class LiftoffCompiler {
bool i64_offset = index_val.type == kWasmI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
- DEBUG_CODE_COMMENT("load from memory (constant offset)");
+ CODE_COMMENT("load from memory (constant offset)");
LiftoffRegList pinned;
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
+ Register mem = pinned.set(GetMemoryStart(pinned));
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
i64_offset);
@@ -2809,20 +2898,19 @@ class LiftoffCompiler {
kDontForceCheck);
if (index == no_reg) return;
- DEBUG_CODE_COMMENT("load from memory");
+ CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
// Load the memory start address only now to reduce register pressure
// (important on ia32).
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
+ Register mem = pinned.set(GetMemoryStart(pinned));
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
__ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
i64_offset);
- if (env_->use_trap_handler) {
+ if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
@@ -2858,15 +2946,14 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("load with transformation");
- Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ CODE_COMMENT("load with transformation");
+ Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
uint32_t protected_load_pc = 0;
__ LoadTransform(value, addr, index, offset, type, transform,
&protected_load_pc);
- if (env_->use_trap_handler) {
+ if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
@@ -2899,15 +2986,14 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("load lane");
- Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ CODE_COMMENT("load lane");
+ Register addr = GetMemoryStart(pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
uint32_t protected_load_pc = 0;
__ LoadLane(result, value, addr, index, offset, type, laneidx,
&protected_load_pc);
- if (env_->use_trap_handler) {
+ if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
@@ -2935,9 +3021,8 @@ class LiftoffCompiler {
auto& index_slot = __ cache_state()->stack_state.back();
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
- DEBUG_CODE_COMMENT("store to memory (constant offset)");
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
+ CODE_COMMENT("store to memory (constant offset)");
+ Register mem = pinned.set(GetMemoryStart(pinned));
__ Store(mem, no_reg, offset, value, type, pinned, nullptr, true);
} else {
LiftoffRegister full_index = __ PopToRegister(pinned);
@@ -2947,17 +3032,16 @@ class LiftoffCompiler {
pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("store to memory");
+ CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
// (important on ia32).
- Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
+ Register mem = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
__ Store(mem, index, offset, value, type, outer_pinned,
&protected_store_pc, true);
- if (env_->use_trap_handler) {
+ if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
@@ -2983,12 +3067,11 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("store lane to memory");
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ CODE_COMMENT("store lane to memory");
+ Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
__ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
- if (env_->use_trap_handler) {
+ if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
@@ -3065,12 +3148,14 @@ class LiftoffCompiler {
}
}
- OwnedVector<DebugSideTable::Entry::Value> GetCurrentDebugSideTableEntries(
+ base::OwnedVector<DebugSideTable::Entry::Value>
+ GetCurrentDebugSideTableEntries(
FullDecoder* decoder,
DebugSideTableBuilder::AssumeSpilling assume_spilling) {
auto& stack_state = __ cache_state()->stack_state;
- auto values = OwnedVector<DebugSideTable::Entry::Value>::NewForOverwrite(
- stack_state.size());
+ auto values =
+ base::OwnedVector<DebugSideTable::Entry::Value>::NewForOverwrite(
+ stack_state.size());
// For function calls, the decoder still has the arguments on the stack, but
// Liftoff already popped them. Hence {decoder->stack_size()} can be bigger
@@ -3088,8 +3173,7 @@ class LiftoffCompiler {
? next_control->stack_depth + __ num_locals() +
next_control->num_exceptions
: __ cache_state()->stack_height();
- bool exception = control->is_try_catch() || control->is_try_catchall() ||
- control->is_try_unwind();
+ bool exception = control->is_try_catch() || control->is_try_catchall();
for (; index < end_index; ++index) {
auto& slot = stack_state[index];
auto& value = values[index];
@@ -3230,7 +3314,8 @@ class LiftoffCompiler {
__ bind(&cont_false);
}
- template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind,
+ ValueKind result_lane_kind = kVoid, typename EmitFn>
void EmitTerOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
@@ -3246,6 +3331,16 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
+ if (V8_UNLIKELY(nondeterminism_)) {
+ auto pinned = LiftoffRegList::ForRegs(dst);
+ if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
+ CheckNan(dst, pinned, result_kind);
+ } else if (result_kind == ValueKind::kS128 &&
+ (result_lane_kind == kF32 || result_lane_kind == kF64)) {
+ CheckS128Nan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst),
+ result_lane_kind);
+ }
+ }
__ PushRegister(result_kind, dst);
}
@@ -3274,6 +3369,7 @@ class LiftoffCompiler {
}
}
+ template <ValueKind result_lane_kind>
void EmitSimdFloatRoundingOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
ExternalReference (*ext_ref)()) {
@@ -3285,10 +3381,14 @@ class LiftoffCompiler {
auto sig_v_s = MakeSig::Params(kS128);
GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
+ if (V8_UNLIKELY(nondeterminism_)) {
+ auto pinned = LiftoffRegList::ForRegs(dst);
+ CheckS128Nan(dst, pinned, result_lane_kind);
+ }
__ PushRegister(kS128, dst);
}
- void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void SimdOp(FullDecoder* decoder, WasmOpcode opcode, base::Vector<Value> args,
Value* result) {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
@@ -3307,9 +3407,9 @@ class LiftoffCompiler {
case wasm::kExprI64x2Splat:
return EmitUnOp<kI64, kS128>(&LiftoffAssembler::emit_i64x2_splat);
case wasm::kExprF32x4Splat:
- return EmitUnOp<kF32, kS128>(&LiftoffAssembler::emit_f32x4_splat);
+ return EmitUnOp<kF32, kS128, kF32>(&LiftoffAssembler::emit_f32x4_splat);
case wasm::kExprF64x2Splat:
- return EmitUnOp<kF64, kS128>(&LiftoffAssembler::emit_f64x2_splat);
+ return EmitUnOp<kF64, kS128, kF64>(&LiftoffAssembler::emit_f64x2_splat);
case wasm::kExprI8x16Eq:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_eq);
case wasm::kExprI8x16Ne:
@@ -3621,92 +3721,108 @@ class LiftoffCompiler {
return EmitUnOp<kS128, kS128>(
&LiftoffAssembler::emit_i64x2_uconvert_i32x4_high);
case wasm::kExprF32x4Abs:
- return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_abs);
+ return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_abs);
case wasm::kExprF32x4Neg:
- return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_neg);
+ return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_neg);
case wasm::kExprF32x4Sqrt:
- return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_sqrt);
+ return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_sqrt);
case wasm::kExprF32x4Ceil:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_ceil,
&ExternalReference::wasm_f32x4_ceil);
case wasm::kExprF32x4Floor:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_floor,
ExternalReference::wasm_f32x4_floor);
case wasm::kExprF32x4Trunc:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_trunc,
ExternalReference::wasm_f32x4_trunc);
case wasm::kExprF32x4NearestInt:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_nearest_int,
ExternalReference::wasm_f32x4_nearest_int);
case wasm::kExprF32x4Add:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_add);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_add);
case wasm::kExprF32x4Sub:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_sub);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_sub);
case wasm::kExprF32x4Mul:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_mul);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_mul);
case wasm::kExprF32x4Div:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_div);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_div);
case wasm::kExprF32x4Min:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_min);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_min);
case wasm::kExprF32x4Max:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_max);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_max);
case wasm::kExprF32x4Pmin:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_pmin);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_pmin);
case wasm::kExprF32x4Pmax:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_pmax);
+ return EmitBinOp<kS128, kS128, false, kF32>(
+ &LiftoffAssembler::emit_f32x4_pmax);
case wasm::kExprF64x2Abs:
- return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_abs);
+ return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_abs);
case wasm::kExprF64x2Neg:
- return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_neg);
+ return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_neg);
case wasm::kExprF64x2Sqrt:
- return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_sqrt);
+ return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_sqrt);
case wasm::kExprF64x2Ceil:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_ceil,
&ExternalReference::wasm_f64x2_ceil);
case wasm::kExprF64x2Floor:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_floor,
ExternalReference::wasm_f64x2_floor);
case wasm::kExprF64x2Trunc:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_trunc,
ExternalReference::wasm_f64x2_trunc);
case wasm::kExprF64x2NearestInt:
- return EmitSimdFloatRoundingOpWithCFallback(
+ return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_nearest_int,
ExternalReference::wasm_f64x2_nearest_int);
case wasm::kExprF64x2Add:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_add);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_add);
case wasm::kExprF64x2Sub:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_sub);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_sub);
case wasm::kExprF64x2Mul:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_mul);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_mul);
case wasm::kExprF64x2Div:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_div);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_div);
case wasm::kExprF64x2Min:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_min);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_min);
case wasm::kExprF64x2Max:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_max);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_max);
case wasm::kExprF64x2Pmin:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_pmin);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_pmin);
case wasm::kExprF64x2Pmax:
- return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_pmax);
+ return EmitBinOp<kS128, kS128, false, kF64>(
+ &LiftoffAssembler::emit_f64x2_pmax);
case wasm::kExprI32x4SConvertF32x4:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_i32x4_sconvert_f32x4);
case wasm::kExprI32x4UConvertF32x4:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_i32x4_uconvert_f32x4);
case wasm::kExprF32x4SConvertI32x4:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_f32x4_sconvert_i32x4);
case wasm::kExprF32x4UConvertI32x4:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_f32x4_uconvert_i32x4);
case wasm::kExprI8x16SConvertI16x8:
return EmitBinOp<kS128, kS128>(
@@ -3761,16 +3877,16 @@ class LiftoffCompiler {
case wasm::kExprI64x2Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs);
case wasm::kExprF64x2ConvertLowI32x4S:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF64>(
&LiftoffAssembler::emit_f64x2_convert_low_i32x4_s);
case wasm::kExprF64x2ConvertLowI32x4U:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF64>(
&LiftoffAssembler::emit_f64x2_convert_low_i32x4_u);
case wasm::kExprF64x2PromoteLowF32x4:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF64>(
&LiftoffAssembler::emit_f64x2_promote_low_f32x4);
case wasm::kExprF32x4DemoteF64x2Zero:
- return EmitUnOp<kS128, kS128>(
+ return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_f32x4_demote_f64x2_zero);
case wasm::kExprI32x4TruncSatF64x2SZero:
return EmitUnOp<kS128, kS128>(
@@ -3824,7 +3940,7 @@ class LiftoffCompiler {
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
- const Vector<Value> inputs, Value* result) {
+ const base::Vector<Value> inputs, Value* result) {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
}
@@ -3902,7 +4018,7 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
uint8_t shuffle[kSimd128Size];
- base::Memcpy(shuffle, imm.value, sizeof(shuffle));
+ memcpy(shuffle, imm.value, sizeof(shuffle));
bool is_swizzle;
bool needs_swap;
wasm::SimdShuffle::CanonicalizeShuffle(lhs == rhs, shuffle, &needs_swap,
@@ -4120,7 +4236,7 @@ class LiftoffCompiler {
LiftoffAssembler::VarState& exception_var,
const WasmException* exception) {
LiftoffRegList pinned;
- DEBUG_CODE_COMMENT("get exception values");
+ CODE_COMMENT("get exception values");
LiftoffRegister values_array = GetExceptionProperty(
exception_var, RootIndex::kwasm_exception_values_symbol);
pinned.set(values_array);
@@ -4141,7 +4257,7 @@ class LiftoffCompiler {
__ emit_jump(&skip_handler);
// Handler: merge into the catch state, and jump to the catch body.
- DEBUG_CODE_COMMENT("-- landing pad --");
+ CODE_COMMENT("-- landing pad --");
__ bind(handler.get());
__ ExceptionHandler();
__ PushException();
@@ -4165,7 +4281,7 @@ class LiftoffCompiler {
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
- const Vector<Value>& /* args */) {
+ const base::Vector<Value>& /* args */) {
LiftoffRegList pinned;
// Load the encoded size in a register for the builtin call.
@@ -4189,7 +4305,7 @@ class LiftoffCompiler {
// Now store the exception values in the FixedArray. Do this from last to
// first value, such that we can just pop them from the value stack.
- DEBUG_CODE_COMMENT("fill values array");
+ CODE_COMMENT("fill values array");
int index = encoded_size;
auto* sig = imm.exception->sig;
for (size_t param_idx = sig->parameter_count(); param_idx > 0;
@@ -4200,7 +4316,7 @@ class LiftoffCompiler {
DCHECK_EQ(0, index);
// Load the exception tag.
- DEBUG_CODE_COMMENT("load exception tag");
+ CODE_COMMENT("load exception tag");
LiftoffRegister exception_tag =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag.gp(), ExceptionsTable, pinned);
@@ -4233,9 +4349,8 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("atomic store to memory");
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ CODE_COMMENT("atomic store to memory");
+ Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
__ AtomicStore(addr, index, offset, value, type, outer_pinned);
@@ -4257,9 +4372,8 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
- DEBUG_CODE_COMMENT("atomic load from memory");
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ CODE_COMMENT("atomic load from memory");
+ Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ AtomicLoad(value, addr, index, offset, type, pinned);
@@ -4306,8 +4420,7 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ Register addr = pinned.set(GetMemoryStart(pinned));
(asm_.*emit_fn)(addr, index, offset, value, result, type);
__ PushRegister(result_kind, result);
@@ -4363,8 +4476,7 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
@@ -4377,7 +4489,7 @@ class LiftoffCompiler {
void CallRuntimeStub(WasmCode::RuntimeStubId stub_id, const ValueKindSig& sig,
std::initializer_list<LiftoffAssembler::VarState> params,
int position) {
- DEBUG_CODE_COMMENT(
+ CODE_COMMENT(
(std::string{"call builtin: "} + GetRuntimeStubName(stub_id)).c_str());
auto interface_descriptor = Builtins::CallInterfaceDescriptorFor(
RuntimeStubIdToBuiltinName(stub_id));
@@ -4555,7 +4667,8 @@ class LiftoffCompiler {
V(I64AtomicCompareExchange16U, kI64Store16) \
V(I64AtomicCompareExchange32U, kI64Store32)
- void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void AtomicOp(FullDecoder* decoder, WasmOpcode opcode,
+ base::Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
switch (opcode) {
#define ATOMIC_STORE_OP(name, type) \
@@ -4624,7 +4737,7 @@ class LiftoffCompiler {
LiftoffRegister segment_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
+ __ LoadConstant(segment_index, WasmValue(imm.data_segment.index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
auto sig =
@@ -4640,7 +4753,7 @@ class LiftoffCompiler {
__ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
- void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
+ void DataDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
LiftoffRegList pinned;
Register seg_size_array =
@@ -4709,7 +4822,7 @@ class LiftoffCompiler {
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
- Vector<Value> args) {
+ base::Vector<Value> args) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -4719,7 +4832,7 @@ class LiftoffCompiler {
LiftoffRegister segment_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LoadSmi(segment_index_reg, imm.elem_segment_index);
+ LoadSmi(segment_index_reg, imm.element_segment.index);
LiftoffAssembler::VarState segment_index(kPointerKind, segment_index_reg,
0);
@@ -4738,7 +4851,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
- void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
+ void ElemDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
LiftoffRegList pinned;
Register dropped_elem_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
@@ -4758,7 +4871,7 @@ class LiftoffCompiler {
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
- Vector<Value> args) {
+ base::Vector<Value> args) {
LiftoffRegList pinned;
LiftoffRegister table_dst_index_reg =
@@ -4788,7 +4901,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
- void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableGrow(FullDecoder* decoder, const IndexImmediate<validate>& imm,
const Value&, const Value&, Value* result) {
LiftoffRegList pinned;
@@ -4813,7 +4926,7 @@ class LiftoffCompiler {
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
- void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableSize(FullDecoder* decoder, const IndexImmediate<validate>& imm,
Value*) {
// We have to look up instance->tables[table_index].length.
@@ -4840,7 +4953,7 @@ class LiftoffCompiler {
__ PushRegister(kI32, LiftoffRegister(result));
}
- void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableFill(FullDecoder* decoder, const IndexImmediate<validate>& imm,
const Value&, const Value&, const Value&) {
LiftoffRegList pinned;
@@ -4891,18 +5004,11 @@ class LiftoffCompiler {
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value);
}
- if (imm.struct_type->field_count() == 0) {
- static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
- WasmStruct::kHeaderSize == kTaggedSize,
- "empty structs need exactly one padding field");
- ValueKind field_kind = ValueKind::kRef;
- LiftoffRegister value = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LoadNullValue(value.gp(), pinned);
- StoreObjectField(obj.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize),
- value, pinned, field_kind);
- pinned.clear(value);
- }
+ // If this assert fails then initialization of padding field might be
+ // necessary.
+ static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
+ WasmStruct::kHeaderSize == 2 * kTaggedSize,
+ "empty struct might require initialization of padding field");
__ PushRegister(kRef, obj);
}
@@ -4919,12 +5025,12 @@ class LiftoffCompiler {
}
void StructGet(FullDecoder* decoder, const Value& struct_obj,
- const FieldIndexImmediate<validate>& field, bool is_signed,
+ const FieldImmediate<validate>& field, bool is_signed,
Value* result) {
- const StructType* struct_type = field.struct_index.struct_type;
- ValueKind field_kind = struct_type->field(field.index).kind();
+ const StructType* struct_type = field.struct_imm.struct_type;
+ ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
if (!CheckSupportedType(decoder, field_kind, "field load")) return;
- int offset = StructFieldOffset(struct_type, field.index);
+ int offset = StructFieldOffset(struct_type, field.field_imm.index);
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
@@ -4936,11 +5042,11 @@ class LiftoffCompiler {
}
void StructSet(FullDecoder* decoder, const Value& struct_obj,
- const FieldIndexImmediate<validate>& field,
+ const FieldImmediate<validate>& field,
const Value& field_value) {
- const StructType* struct_type = field.struct_index.struct_type;
- ValueKind field_kind = struct_type->field(field.index).kind();
- int offset = StructFieldOffset(struct_type, field.index);
+ const StructType* struct_type = field.struct_imm.struct_type;
+ ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
+ int offset = StructFieldOffset(struct_type, field.field_imm.index);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
@@ -4955,7 +5061,7 @@ class LiftoffCompiler {
LiftoffRegister length =
__ LoadToRegister(__ cache_state()->stack_state.end()[-2], {});
Label* trap_label =
- AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayOutOfBounds);
+ AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
static_cast<int>(wasm::kV8MaxWasmArrayLength));
}
@@ -4971,46 +5077,49 @@ class LiftoffCompiler {
__ LoadConstant(elem_size_reg, WasmValue(elem_size));
LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
- CallRuntimeStub(WasmCode::kWasmAllocateArrayWithRtt,
- MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
- {rtt_var, length_var, elem_size_var},
- decoder->position());
+ WasmCode::RuntimeStubId stub_id =
+ initial_value_on_stack
+ ? WasmCode::kWasmAllocateArray_Uninitialized
+ : is_reference(elem_kind) ? WasmCode::kWasmAllocateArray_InitNull
+ : WasmCode::kWasmAllocateArray_InitZero;
+ CallRuntimeStub(
+ stub_id, MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
+ {rtt_var, length_var, elem_size_var}, decoder->position());
// Drop the RTT.
__ cache_state()->stack_state.pop_back(1);
}
LiftoffRegister obj(kReturnRegister0);
- LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
- LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
- LiftoffRegister value = initial_value_on_stack
- ? pinned.set(__ PopToRegister(pinned))
- : pinned.set(__ GetUnusedRegister(
- reg_class_for(elem_kind), pinned));
- if (!initial_value_on_stack) {
+ if (initial_value_on_stack) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
+ LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+
+ // Initialize the array's elements.
+ LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(
+ offset,
+ WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
+ LiftoffRegister end_offset = length;
+ if (element_size_log2(elem_kind) != 0) {
+ __ emit_i32_shli(end_offset.gp(), length.gp(),
+ element_size_log2(elem_kind));
+ }
+ __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
+ Label loop, done;
+ __ bind(&loop);
+ __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
+ end_offset.gp());
+ StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
+ __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
+ __ emit_jump(&loop);
+
+ __ bind(&done);
+ } else {
if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
- SetDefaultValue(value, elem_kind, pinned);
+ // Drop the length.
+ __ cache_state()->stack_state.pop_back(1);
}
-
- // Initialize the array's elements.
- LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(
- offset,
- WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
- LiftoffRegister end_offset = length;
- if (element_size_log2(elem_kind) != 0) {
- __ emit_i32_shli(end_offset.gp(), length.gp(),
- element_size_log2(elem_kind));
- }
- __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
- Label loop, done;
- __ bind(&loop);
- __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
- end_offset.gp());
- StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
- __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
- __ emit_jump(&loop);
-
- __ bind(&done);
__ PushRegister(kRef, obj);
}
@@ -5080,6 +5189,28 @@ class LiftoffCompiler {
__ PushRegister(kI32, len);
}
+ void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
+ const Value& src, const Value& src_index,
+ const Value& length) {
+ CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
+ MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
+ // Builtin parameter order:
+ // [dst_index, src_index, length, dst, src].
+ {__ cache_state()->stack_state.end()[-4],
+ __ cache_state()->stack_state.end()[-2],
+ __ cache_state()->stack_state.end()[-1],
+ __ cache_state()->stack_state.end()[-5],
+ __ cache_state()->stack_state.end()[-3]},
+ decoder->position());
+ __ cache_state()->stack_state.pop_back(5);
+ }
+
+ void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ const base::Vector<Value>& elements, const Value& rtt,
+ Value* result) {
+ UNREACHABLE();
+ }
+
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
constexpr static int kI31To32BitSmiShift = 33;
@@ -5130,7 +5261,7 @@ class LiftoffCompiler {
}
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
- Value* result) {
+ Value* result, WasmRttSubMode mode) {
ValueKind parent_value_kind = parent.type.kind();
ValueKind rtt_value_kind = kRttWithDepth;
LiftoffAssembler::VarState parent_var =
@@ -5138,8 +5269,11 @@ class LiftoffCompiler {
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(type_reg, WasmValue(type_index));
LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
+ WasmCode::RuntimeStubId target = mode == WasmRttSubMode::kCanonicalize
+ ? WasmCode::kWasmAllocateRtt
+ : WasmCode::kWasmAllocateFreshRtt;
CallRuntimeStub(
- WasmCode::kWasmAllocateRtt,
+ target,
MakeSig::Returns(rtt_value_kind).Params(kI32, parent_value_kind),
{type_var, parent_var}, decoder->position());
// Drop the parent RTT.
@@ -5441,7 +5575,7 @@ class LiftoffCompiler {
template <TypeChecker type_checker>
void BrOnAbstractType(const Value& object, FullDecoder* decoder,
- uint32_t br_depth, ValueKind result_kind) {
+ uint32_t br_depth) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (br_depth != decoder->control_depth() - 1) {
@@ -5449,36 +5583,72 @@ class LiftoffCompiler {
decoder->control_at(br_depth)->br_merge()->arity);
}
- Label match, no_match;
+ Label no_match;
LiftoffRegister obj_reg =
(this->*type_checker)(object, &no_match, {}, no_reg);
- __ bind(&match);
- __ PushRegister(result_kind, obj_reg);
+ __ PushRegister(kRef, obj_reg);
BrOrRet(decoder, br_depth, 0);
__ bind(&no_match);
- // Drop the branch's value, restore original value.
- Drop(decoder);
- __ PushRegister(object.type.kind(), obj_reg);
+ }
+
+ template <TypeChecker type_checker>
+ void BrOnNonAbstractType(const Value& object, FullDecoder* decoder,
+ uint32_t br_depth) {
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (br_depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(br_depth)->br_merge()->arity);
+ }
+
+ Label no_match, end;
+ LiftoffRegister obj_reg =
+ (this->*type_checker)(object, &no_match, {}, no_reg);
+ __ PushRegister(kRef, obj_reg);
+ __ emit_jump(&end);
+
+ __ bind(&no_match);
+ BrOrRet(decoder, br_depth, 0);
+
+ __ bind(&end);
}
void BrOnData(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
- br_depth, kRef);
+ br_depth);
}
void BrOnFunc(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
- br_depth, kRef);
+ br_depth);
}
void BrOnI31(FullDecoder* decoder, const Value& object,
Value* /* value_on_branch */, uint32_t br_depth) {
return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
- br_depth, kRef);
+ br_depth);
+ }
+
+ void BrOnNonData(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnNonAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
+ br_depth);
+ }
+
+ void BrOnNonFunc(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnNonAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
+ br_depth);
+ }
+
+ void BrOnNonI31(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnNonAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
+ br_depth);
}
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
@@ -5580,7 +5750,7 @@ class LiftoffCompiler {
Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register indirect_function_table = no_reg;
- if (imm.table_index != 0) {
+ if (imm.table_imm.index != 0) {
Register indirect_function_tables =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(indirect_function_tables,
@@ -5589,7 +5759,7 @@ class LiftoffCompiler {
indirect_function_table = indirect_function_tables;
__ LoadTaggedPointer(
indirect_function_table, indirect_function_tables, no_reg,
- ObjectAccess::ElementOffsetInTaggedFixedArray(imm.table_index),
+ ObjectAccess::ElementOffsetInTaggedFixedArray(imm.table_imm.index),
pinned);
}
@@ -5598,13 +5768,13 @@ class LiftoffCompiler {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapTableOutOfBounds);
uint32_t canonical_sig_num =
- env_->module->canonicalized_type_ids[imm.sig_index];
+ env_->module->canonicalized_type_ids[imm.sig_imm.index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
// Compare against table size stored in
// {instance->indirect_function_table_size}.
- if (imm.table_index == 0) {
+ if (imm.table_imm.index == 0) {
LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
pinned);
} else {
@@ -5618,7 +5788,7 @@ class LiftoffCompiler {
// Mask the index to prevent SSCA.
if (FLAG_untrusted_code_mitigations) {
- DEBUG_CODE_COMMENT("Mask indirect call index");
+ CODE_COMMENT("Mask indirect call index");
// mask = ((index - size) & ~index) >> 31
// Reuse allocated registers; note: size is still stored in {tmp_const}.
Register diff = table;
@@ -5638,9 +5808,9 @@ class LiftoffCompiler {
__ emit_i32_and(index, index, mask);
}
- DEBUG_CODE_COMMENT("Check indirect call signature");
+ CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- if (imm.table_index == 0) {
+ if (imm.table_imm.index == 0) {
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds,
kSystemPointerSize, pinned);
} else {
@@ -5664,7 +5834,7 @@ class LiftoffCompiler {
tmp_const);
// At this point {index} has already been multiplied by 4.
- DEBUG_CODE_COMMENT("Execute indirect call");
+ CODE_COMMENT("Execute indirect call");
if (kTaggedSize != kInt32Size) {
DCHECK_EQ(kTaggedSize, kInt32Size * 2);
// Multiply {index} by another 2 to represent kTaggedSize items.
@@ -5673,7 +5843,7 @@ class LiftoffCompiler {
// At this point {index} has already been multiplied by kTaggedSize.
// Load the instance from {instance->ift_instances[key]}
- if (imm.table_index == 0) {
+ if (imm.table_imm.index == 0) {
LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
} else {
__ LoadTaggedPointer(
@@ -5695,7 +5865,7 @@ class LiftoffCompiler {
Register* explicit_instance = &tmp_const;
// Load the target from {instance->ift_targets[key]}
- if (imm.table_index == 0) {
+ if (imm.table_imm.index == 0) {
LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
kSystemPointerSize, pinned);
} else {
@@ -5824,8 +5994,12 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(
WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
pinned);
+#ifdef V8_EXTERNAL_CODE_SPACE
+ __ LoadCodeDataContainerEntry(target.gp(), target.gp());
+#else
__ emit_ptrsize_addi(target.gp(), target.gp(),
wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
+#endif
// Fall through to {perform_call}.
__ bind(&perform_call);
@@ -5949,24 +6123,11 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset);
__ Load(tmp, map, no_reg, kInstanceTypeOffset, LoadType::kI32Load16U,
pinned);
- // We're going to test a range of instance types with a single unsigned
- // comparison. Statically assert that this is safe, i.e. that there are
- // no instance types between array and struct types that might possibly
- // occur (i.e. internal types are OK, types of Wasm objects are not).
- // At the time of this writing:
- // WASM_ARRAY_TYPE = 180
- // WASM_CAPI_FUNCTION_DATA_TYPE = 181
- // WASM_STRUCT_TYPE = 182
- // The specific values don't matter; the relative order does.
- static_assert(
- WASM_STRUCT_TYPE == static_cast<InstanceType>(WASM_ARRAY_TYPE + 2),
- "Relying on specific InstanceType values here");
- static_assert(WASM_CAPI_FUNCTION_DATA_TYPE ==
- static_cast<InstanceType>(WASM_ARRAY_TYPE + 1),
- "Relying on specific InstanceType values here");
- __ emit_i32_subi(tmp.gp(), tmp.gp(), WASM_ARRAY_TYPE);
+ // We're going to test a range of WasmObject instance types with a single
+ // unsigned comparison.
+ __ emit_i32_subi(tmp.gp(), tmp.gp(), FIRST_WASM_OBJECT_TYPE);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, not_data_ref, tmp.gp(),
- WASM_STRUCT_TYPE - WASM_ARRAY_TYPE);
+ LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE);
}
void MaybeOSR() {
@@ -5985,10 +6146,34 @@ class LiftoffCompiler {
__ FinishCall(sig, call_descriptor);
}
+ void CheckNan(LiftoffRegister src, LiftoffRegList pinned, ValueKind kind) {
+ DCHECK(kind == ValueKind::kF32 || kind == ValueKind::kF64);
+ auto nondeterminism_addr = __ GetUnusedRegister(kGpReg, pinned);
+ __ LoadConstant(
+ nondeterminism_addr,
+ WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
+ __ emit_set_if_nan(nondeterminism_addr.gp(), src.fp(), kind);
+ }
+
+ void CheckS128Nan(LiftoffRegister dst, LiftoffRegList pinned,
+ ValueKind lane_kind) {
+ RegClass rc = reg_class_for(kS128);
+ LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
+ LiftoffRegister nondeterminism_addr =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(
+ nondeterminism_addr,
+ WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
+ __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
+ tmp_fp.fp(), lane_kind);
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
- kI32, kI64, kF32, kF64};
- static constexpr base::EnumSet<ValueKind> kExternRefSupported{
+ // MVP:
+ kI32, kI64, kF32, kF64,
+ // Extern ref:
kRef, kOptRef, kRtt, kRttWithDepth, kI8, kI16};
LiftoffAssembler asm_;
@@ -6043,6 +6228,9 @@ class LiftoffCompiler {
// Current number of exception refs on the stack.
int num_exceptions_ = 0;
+ int32_t* max_steps_;
+ int32_t* nondeterminism_;
+
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
}
@@ -6090,38 +6278,39 @@ class LiftoffCompiler {
constexpr WasmOpcode LiftoffCompiler::kNoOutstandingOp;
// static
constexpr base::EnumSet<ValueKind> LiftoffCompiler::kUnconditionallySupported;
-// static
-constexpr base::EnumSet<ValueKind> LiftoffCompiler::kExternRefSupported;
} // namespace
WasmCompilationResult ExecuteLiftoffCompilation(
- AccountingAllocator* allocator, CompilationEnv* env,
- const FunctionBody& func_body, int func_index, ForDebugging for_debugging,
- Counters* counters, WasmFeatures* detected, Vector<const int> breakpoints,
- std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
+ CompilationEnv* env, const FunctionBody& func_body, int func_index,
+ ForDebugging for_debugging, Counters* counters, WasmFeatures* detected,
+ base::Vector<const int> breakpoints,
+ std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint,
+ int32_t* max_steps, int32_t* nondeterminism) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileBaseline", "funcIndex", func_index, "bodySize",
func_body_size);
- Zone zone(allocator, "LiftoffCompilationZone");
+ Zone zone(GetWasmEngine()->allocator(), "LiftoffCompilationZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
size_t code_size_estimate =
WasmCodeManager::EstimateLiftoffCodeSize(func_body_size);
// Allocate the initial buffer a bit bigger to avoid reallocation during code
- // generation.
- std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
- wasm::WasmInstructionBuffer::New(128 + code_size_estimate * 4 / 3);
+ // generation. Overflows when casting to int are fine, as we will allocate at
+ // least {AssemblerBase::kMinimalBufferSize} anyway, so in the worst case we
+ // have to grow more often.
+ int initial_buffer_size = static_cast<int>(128 + code_size_estimate * 4 / 3);
std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
+ DCHECK_IMPLIES(max_steps, for_debugging == kForDebugging);
WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, detected, func_body,
- call_descriptor, env, &zone, instruction_buffer->CreateView(),
+ call_descriptor, env, &zone, NewAssemblerBuffer(initial_buffer_size),
debug_sidetable_builder.get(), for_debugging, func_index, breakpoints,
- dead_breakpoint);
+ dead_breakpoint, max_steps, nondeterminism);
decoder.Decode();
LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) compiler->OnFirstError(&decoder);
@@ -6142,7 +6331,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
WasmCompilationResult result;
compiler->GetCode(&result.code_desc);
- result.instr_buffer = instruction_buffer->ReleaseBuffer();
+ result.instr_buffer = compiler->ReleaseBuffer();
result.source_positions = compiler->GetSourcePositionTable();
result.protected_instructions_data = compiler->GetProtectedInstructionsData();
result.frame_slot_count = compiler->GetTotalFrameSlotCountForGC();
@@ -6163,22 +6352,23 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
auto* native_module = code->native_module();
auto* function = &native_module->module()->functions[code->index()];
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
- Vector<const byte> function_bytes = wire_bytes.GetFunctionBytes(function);
+ base::Vector<const byte> function_bytes =
+ wire_bytes.GetFunctionBytes(function);
CompilationEnv env = native_module->CreateCompilationEnv();
FunctionBody func_body{function->sig, 0, function_bytes.begin(),
function_bytes.end()};
- AccountingAllocator* allocator = native_module->engine()->allocator();
- Zone zone(allocator, "LiftoffDebugSideTableZone");
+ Zone zone(GetWasmEngine()->allocator(), "LiftoffDebugSideTableZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, function->sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
constexpr int kSteppingBreakpoints[] = {0};
DCHECK(code->for_debugging() == kForDebugging ||
code->for_debugging() == kForStepping);
- Vector<const int> breakpoints = code->for_debugging() == kForStepping
- ? ArrayVector(kSteppingBreakpoints)
- : Vector<const int>{};
+ base::Vector<const int> breakpoints =
+ code->for_debugging() == kForStepping
+ ? base::ArrayVector(kSteppingBreakpoints)
+ : base::Vector<const int>{};
WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, native_module->module(), env.enabled_features, &detected,
func_body, call_descriptor, &env, &zone,
@@ -6197,7 +6387,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
#undef WASM_INSTANCE_OBJECT_FIELD_SIZE
#undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_PTR_INSTANCE_FIELD
-#undef DEBUG_CODE_COMMENT
+#undef CODE_COMMENT
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 6987c2e779..e01d617ea4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -54,10 +54,11 @@ enum LiftoffBailoutReason : int8_t {
};
V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
- AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
- ForDebugging, Counters*, WasmFeatures* detected_features,
- Vector<const int> breakpoints = {},
- std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0);
+ CompilationEnv*, const FunctionBody&, int func_index, ForDebugging,
+ Counters*, WasmFeatures* detected_features,
+ base::Vector<const int> breakpoints = {},
+ std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0,
+ int32_t* max_steps = nullptr, int32_t* nondeterminism = nullptr);
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
const WasmCode*);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index a823a3238d..48891ab08b 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -485,8 +485,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
Addu(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
- SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, scratch, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -3003,6 +3004,28 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, 1);
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ LoadZeroIfNotFPUCondition(scratch);
+ Sw(scratch, MemOperand(dst));
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 95bf512590..c7a66ca754 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -464,8 +464,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
Daddu(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
- SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, scratch, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -1935,7 +1936,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
MSARegister dst_msa = dst.fp().toW();
uint64_t vals[2];
- base::Memcpy(vals, imms, sizeof(vals));
+ memcpy(vals, imms, sizeof(vals));
li(kScratchReg, vals[0]);
insert_d(dst_msa, 0, kScratchReg);
li(kScratchReg, vals[1]);
@@ -3171,6 +3172,28 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, 1);
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ LoadZeroIfNotFPUCondition(scratch);
+ Sd(scratch, MemOperand(dst));
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 80268fc66a..ff83b614e1 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -5,8 +5,11 @@
#ifndef V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
#define V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/assembler.h"
+#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
-
+#include "src/wasm/simd-shuffle.h"
namespace v8 {
namespace internal {
@@ -36,6 +39,7 @@ namespace liftoff {
// | | v
// -----+--------------------+ <-- stack ptr (sp)
//
+//
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
@@ -45,6 +49,12 @@ inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
return MemOperand(fp, -kInstanceOffset - offset + half_offset);
}
+inline MemOperand GetStackSlot(uint32_t offset) {
+ return MemOperand(fp, -static_cast<int32_t>(offset));
+}
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
switch (liftoff_cond) {
case kEqual:
@@ -155,40 +165,53 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
- bailout(kUnsupportedArchitecture, "LoadInstanceFromFrame");
+ LoadU64(dst, liftoff::GetInstanceOperand(), r0);
}
void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int offset, int size) {
- bailout(kUnsupportedArchitecture, "LoadFromInstance");
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ LoadU8(dst, MemOperand(instance, offset), r0);
+ break;
+ case 4:
+ LoadU32(dst, MemOperand(instance, offset), r0);
+ break;
+ case 8:
+ LoadU64(dst, MemOperand(instance, offset), r0);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
- bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
+ LoadTaggedPointerField(dst, MemOperand(instance, offset), r0);
}
void LiftoffAssembler::SpillInstance(Register instance) {
- bailout(kUnsupportedArchitecture, "SpillInstance");
+ StoreU64(instance, liftoff::GetInstanceOperand(), r0);
}
void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- bailout(kUnsupportedArchitecture, "FillInstanceInto");
+ LoadU64(dst, liftoff::GetInstanceOperand(), r0);
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
- bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
+ LoadTaggedPointerField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
int32_t offset_imm) {
- bailout(kUnsupportedArchitecture, "LoadFullPointer");
+ LoadU64(dst, MemOperand(src_addr, offset_imm), r0);
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
@@ -197,7 +220,32 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier skip_write_barrier) {
- bailout(kRefTypes, "GlobalSet");
+ MemOperand dst_op = MemOperand(dst_addr, offset_reg, offset_imm);
+ StoreTaggedField(src.gp(), dst_op, r0);
+
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, r0, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(src.gp(), src.gp());
+ }
+ CheckPageFlag(src.gp(), r0, MemoryChunk::kPointersToHereAreInterestingMask,
+ eq, &exit);
+ mov(ip, Operand(offset_imm));
+ add(ip, ip, dst_addr);
+ if (offset_reg != no_reg) {
+ add(ip, ip, offset_reg);
+ }
+ CallRecordWriteStubSaveRegisters(dst_addr, ip, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -205,14 +253,137 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
- bailout(kUnsupportedArchitecture, "Load");
+ if (!i64_offset && offset_reg != no_reg) {
+ ZeroExtWord32(ip, offset_reg);
+ offset_reg = ip;
+ }
+ MemOperand src_op = MemOperand(src_addr, offset_reg, offset_imm);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ LoadU8(dst.gp(), src_op, r0);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ LoadS8(dst.gp(), src_op, r0);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ if (is_load_mem) {
+ LoadU16LE(dst.gp(), src_op, r0);
+ } else {
+ LoadU16(dst.gp(), src_op, r0);
+ }
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ if (is_load_mem) {
+ LoadS16LE(dst.gp(), src_op, r0);
+ } else {
+ LoadS16(dst.gp(), src_op, r0);
+ }
+ break;
+ case LoadType::kI64Load32U:
+ if (is_load_mem) {
+ LoadU32LE(dst.gp(), src_op, r0);
+ } else {
+ LoadU32(dst.gp(), src_op, r0);
+ }
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ if (is_load_mem) {
+ LoadS32LE(dst.gp(), src_op, r0);
+ } else {
+ LoadS32(dst.gp(), src_op, r0);
+ }
+ break;
+ case LoadType::kI64Load:
+ if (is_load_mem) {
+ LoadU64LE(dst.gp(), src_op, r0);
+ } else {
+ LoadU64(dst.gp(), src_op, r0);
+ }
+ break;
+ case LoadType::kF32Load:
+ if (is_load_mem) {
+ LoadF32LE(dst.fp(), src_op, r0, ip);
+ } else {
+ LoadF32(dst.fp(), src_op, r0);
+ }
+ break;
+ case LoadType::kF64Load:
+ if (is_load_mem) {
+ LoadF64LE(dst.fp(), src_op, r0, ip);
+ } else {
+ LoadF64(dst.fp(), src_op, r0);
+ }
+ break;
+ case LoadType::kS128Load:
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- bailout(kUnsupportedArchitecture, "Store");
+ MemOperand dst_op =
+ MemOperand(dst_addr, offset_reg, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ StoreU8(src.gp(), dst_op, r0);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ if (is_store_mem) {
+ StoreU16LE(src.gp(), dst_op, r0);
+ } else {
+ StoreU16(src.gp(), dst_op, r0);
+ }
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ if (is_store_mem) {
+ StoreU32LE(src.gp(), dst_op, r0);
+ } else {
+ StoreU32(src.gp(), dst_op, r0);
+ }
+ break;
+ case StoreType::kI64Store:
+ if (is_store_mem) {
+ StoreU64LE(src.gp(), dst_op, r0);
+ } else {
+ StoreU64(src.gp(), dst_op, r0);
+ }
+ break;
+ case StoreType::kF32Store:
+ if (is_store_mem) {
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
+ StoreF32LE(src.fp(), dst_op, r0, scratch2);
+ } else {
+ StoreF32(src.fp(), dst_op, r0);
+ }
+ break;
+ case StoreType::kF64Store:
+ if (is_store_mem) {
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
+ StoreF64LE(src.fp(), dst_op, r0, scratch2);
+ } else {
+ StoreF64(src.fp(), dst_op, r0);
+ }
+ break;
+ case StoreType::kS128Store: {
+ bailout(kUnsupportedArchitecture, "SIMD");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
@@ -276,44 +447,239 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueKind kind) {
- bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
+ int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ LoadS32(dst.gp(), MemOperand(fp, offset + 4), r0);
+ break;
+#else
+ LoadS32(dst.gp(), MemOperand(fp, offset), r0);
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kRttWithDepth:
+ case kI64: {
+ LoadU64(dst.gp(), MemOperand(fp, offset), r0);
+ break;
+ }
+ case kF32: {
+ LoadF32(dst.fp(), MemOperand(fp, offset), r0);
+ break;
+ }
+ case kF64: {
+ LoadF64(dst.fp(), MemOperand(fp, offset), r0);
+ break;
+ }
+ case kS128: {
+ bailout(kSimd, "simd load");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
ValueKind kind) {
- bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
+ int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ StoreU32(src.gp(), MemOperand(fp, offset + 4), r0);
+ break;
+#else
+ StoreU32(src.gp(), MemOperand(fp, offset), r0);
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ StoreU64(src.gp(), MemOperand(fp, offset), r0);
+ break;
+ }
+ case kF32: {
+ StoreF32(src.fp(), MemOperand(fp, offset), r0);
+ break;
+ }
+ case kF64: {
+ StoreF64(src.fp(), MemOperand(fp, offset), r0);
+ break;
+ }
+ case kS128: {
+ bailout(kSimd, "simd load");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
ValueKind kind) {
- bailout(kUnsupportedArchitecture, "LoadReturnStackSlot");
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ LoadS32(dst.gp(), MemOperand(sp, offset + 4), r0);
+ break;
+#else
+ LoadS32(dst.gp(), MemOperand(sp, offset), r0);
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kRttWithDepth:
+ case kI64: {
+ LoadU64(dst.gp(), MemOperand(sp, offset), r0);
+ break;
+ }
+ case kF32: {
+ LoadF32(dst.fp(), MemOperand(sp, offset), r0);
+ break;
+ }
+ case kF64: {
+ LoadF64(dst.fp(), MemOperand(sp, offset), r0);
+ break;
+ }
+ case kS128: {
+ bailout(kSimd, "simd load");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
+#ifdef V8_TARGET_BIG_ENDIAN
+constexpr int stack_bias = -4;
+#else
+constexpr int stack_bias = 0;
+#endif
+
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueKind kind) {
- bailout(kUnsupportedArchitecture, "MoveStackValue");
+ DCHECK_NE(dst_offset, src_offset);
+
+ switch (kind) {
+ case kI32:
+ case kF32:
+ LoadU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
+ StoreU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kF64:
+ LoadU64(ip, liftoff::GetStackSlot(dst_offset), r0);
+ StoreU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ break;
+ case kS128:
+ bailout(kSimd, "simd op");
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
- bailout(kUnsupportedArchitecture, "Move Register");
+ mr(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
- bailout(kUnsupportedArchitecture, "Move DoubleRegister");
+ if (kind == kF32 || kind == kF64) {
+ fmr(dst, src);
+ } else {
+ bailout(kSimd, "simd op");
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
- bailout(kUnsupportedArchitecture, "Spill register");
+ DCHECK_LT(0, offset);
+ RecordUsedSpillOffset(offset);
+
+ switch (kind) {
+ case kI32:
+ StoreU32(reg.gp(), liftoff::GetStackSlot(offset + stack_bias), r0);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ StoreU64(reg.gp(), liftoff::GetStackSlot(offset), r0);
+ break;
+ case kF32:
+ StoreF32(reg.fp(), liftoff::GetStackSlot(offset + stack_bias), r0);
+ break;
+ case kF64:
+ StoreF64(reg.fp(), liftoff::GetStackSlot(offset), r0);
+ break;
+ case kS128: {
+ bailout(kSimd, "simd op");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
- bailout(kUnsupportedArchitecture, "Spill value");
+ RecordUsedSpillOffset(offset);
+ UseScratchRegisterScope temps(this);
+ Register src = no_reg;
+ src = ip;
+ switch (value.type().kind()) {
+ case kI32: {
+ mov(src, Operand(value.to_i32()));
+ StoreU32(src, liftoff::GetStackSlot(offset + stack_bias), r0);
+ break;
+ }
+ case kI64: {
+ mov(src, Operand(value.to_i64()));
+ StoreU64(src, liftoff::GetStackSlot(offset), r0);
+ break;
+ }
+ default:
+ // We do not track f32 and f64 constants, hence they are unreachable.
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
- bailout(kUnsupportedArchitecture, "Fill");
+ switch (kind) {
+ case kI32:
+ LoadS32(reg.gp(), liftoff::GetStackSlot(offset + stack_bias), r0);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(reg.gp(), liftoff::GetStackSlot(offset), r0);
+ break;
+ case kF32:
+ LoadF32(reg.fp(), liftoff::GetStackSlot(offset + stack_bias), r0);
+ break;
+ case kF64:
+ LoadF64(reg.fp(), liftoff::GetStackSlot(offset), r0);
+ break;
+ case kS128: {
+ bailout(kSimd, "simd op");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
@@ -334,21 +700,21 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Special straight-line code for up to nine words. Generates one
// instruction per word.
for (int offset = 4; offset <= size; offset += 4) {
- StoreP(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
+ StoreU64(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r4 for start address (inclusive), r5 for end address (exclusive).
push(r4);
push(r5);
- subi(r4, fp, Operand(start + size));
- subi(r5, fp, Operand(start));
+ SubS64(r4, fp, Operand(start + size), r0);
+ SubS64(r5, fp, Operand(start), r0);
Label loop;
bind(&loop);
- StoreP(r0, MemOperand(r0));
+ StoreU64(r0, MemOperand(r0));
addi(r0, r0, Operand(kSystemPointerSize));
- cmp(r4, r5);
+ CmpS64(r4, r5);
bne(&loop);
pop(r4);
@@ -426,8 +792,6 @@ UNIMPLEMENTED_I32_BINOP_I(i32_xor)
UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_add)
-UNIMPLEMENTED_I64_BINOP(i64_sub)
UNIMPLEMENTED_I64_BINOP(i64_mul)
#ifdef V8_TARGET_ARCH_PPC64
UNIMPLEMENTED_I64_BINOP_I(i64_and)
@@ -443,8 +807,6 @@ UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_min)
-UNIMPLEMENTED_FP_BINOP(f32_max)
UNIMPLEMENTED_FP_BINOP(f32_copysign)
UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
@@ -457,8 +819,6 @@ UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
UNIMPLEMENTED_FP_BINOP(f64_div)
-UNIMPLEMENTED_FP_BINOP(f64_min)
-UNIMPLEMENTED_FP_BINOP(f64_max)
UNIMPLEMENTED_FP_BINOP(f64_copysign)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
@@ -479,21 +839,72 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_I32_SHIFTOP
#undef UNIMPLEMENTED_I64_SHIFTOP
-bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "i32_popcnt");
- return true;
-}
-
-bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_popcnt");
- return true;
-}
+#define SIGN_EXT(r) extsw(r, r)
+#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define REGISTER_AND_WITH_1F \
+ ([&](Register rhs) { \
+ andi(r0, rhs, Operand(31)); \
+ return r0; \
+ })
+
+#define LFR_TO_REG(reg) reg.gp()
+
+// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
+#define UNOP_LIST(V) \
+ V(i32_popcnt, Popcnt32, Register, Register, , , USE, true, bool) \
+ V(i64_popcnt, Popcnt64, LiftoffRegister, LiftoffRegister, LFR_TO_REG, \
+ LFR_TO_REG, USE, true, bool)
+
+#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast, \
+ ret, return_type) \
+ return_type LiftoffAssembler::emit_##name(dtype dst, stype src) { \
+ auto _dst = dcast(dst); \
+ auto _src = scast(src); \
+ instr(_dst, _src); \
+ rcast(_dst); \
+ return ret; \
+ }
+UNOP_LIST(EMIT_UNOP_FUNCTION)
+#undef EMIT_UNOP_FUNCTION
+#undef UNOP_LIST
+
+// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
+// return_val, return_type)
+#define BINOP_LIST(V) \
+ V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void)
+
+#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
+ scast2, rcast, ret, return_type) \
+ return_type LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, \
+ stype2 rhs) { \
+ auto _dst = dcast(dst); \
+ auto _lhs = scast1(lhs); \
+ auto _rhs = scast2(rhs); \
+ instr(_dst, _lhs, _rhs); \
+ rcast(_dst); \
+ return ret; \
+ }
-void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int64_t imm) {
- bailout(kUnsupportedArchitecture, "i64_addi");
-}
+BINOP_LIST(EMIT_BINOP_FUNCTION)
+#undef BINOP_LIST
+#undef EMIT_BINOP_FUNCTION
+#undef SIGN_EXT
+#undef INT32_AND_WITH_1F
+#undef REGISTER_AND_WITH_1F
+#undef LFR_TO_REG
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
@@ -605,9 +1016,9 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
switch (kind) {
case kI32:
if (use_signed) {
- cmpw(lhs, rhs);
+ CmpS32(lhs, rhs);
} else {
- cmplw(lhs, rhs);
+ CmpU32(lhs, rhs);
}
break;
case kRef:
@@ -618,9 +1029,9 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
V8_FALLTHROUGH;
case kI64:
if (use_signed) {
- cmp(lhs, rhs);
+ CmpS64(lhs, rhs);
} else {
- cmpl(lhs, rhs);
+ CmpU64(lhs, rhs);
}
break;
default:
@@ -629,7 +1040,7 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
} else {
DCHECK_EQ(kind, kI32);
CHECK(use_signed);
- cmpwi(lhs, Operand::Zero());
+ CmpS32(lhs, Operand::Zero(), r0);
}
b(cond, label);
@@ -639,13 +1050,13 @@ void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
Condition cond = liftoff::ToCondition(liftoff_cond);
- Cmpwi(lhs, Operand(imm), r0);
+ CmpS32(lhs, Operand(imm), r0);
b(cond, label);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
Label done;
- cmpwi(src, Operand(0));
+ CmpS32(src, Operand(0), r0);
mov(dst, Operand(1));
beq(&done);
mov(dst, Operand::Zero());
@@ -657,9 +1068,9 @@ void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
Register rhs) {
bool use_signed = liftoff::UseSignedOp(liftoff_cond);
if (use_signed) {
- cmpw(lhs, rhs);
+ CmpS32(lhs, rhs);
} else {
- cmplw(lhs, rhs);
+ CmpU32(lhs, rhs);
}
Label done;
mov(dst, Operand(1));
@@ -682,9 +1093,9 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
LiftoffRegister rhs) {
bool use_signed = liftoff::UseSignedOp(liftoff_cond);
if (use_signed) {
- cmp(lhs.gp(), rhs.gp());
+ CmpS64(lhs.gp(), rhs.gp());
} else {
- cmpl(lhs.gp(), rhs.gp());
+ CmpU64(lhs.gp(), rhs.gp());
}
Label done;
mov(dst, Operand(1));
@@ -1937,6 +2348,18 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
+ ValueKind kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index 32ecca686e..8b7b0b83e1 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -401,7 +401,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
- LoadFromInstance(dst, instance, offset, kTaggedSize);
+ DCHECK_LE(0, offset);
+ LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -418,9 +419,8 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
- STATIC_ASSERT(kTaggedSize == kInt64Size);
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
- Ld(dst, src_op);
+ LoadTaggedPointerField(dst, src_op);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
@@ -435,10 +435,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier skip_write_barrier) {
- STATIC_ASSERT(kTaggedSize == kInt64Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
- Sd(src.gp(), dst_op);
+ StoreTaggedField(src.gp(), dst_op);
if (skip_write_barrier || FLAG_disable_write_barriers) return;
@@ -453,8 +452,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
Add64(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
- SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, scratch, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -1250,7 +1250,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kComplexOperation, "f32_copysign");
+ fsgnj_s(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -1265,7 +1265,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kComplexOperation, "f64_copysign");
+ fsgnj_d(dst, lhs, rhs);
}
#define FP_BINOP(name, instruction) \
@@ -1362,7 +1362,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
// Checking if trap.
- TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ if (trap != nullptr) {
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ }
return true;
}
@@ -1401,30 +1403,46 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64ReinterpretI64:
fmv_d_x(dst.fp(), src.gp());
return true;
- case kExprI32SConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ case kExprI32SConvertSatF32: {
+ fcvt_w_s(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_s(dst.gp(), src.fp());
return true;
- case kExprI32UConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ }
+ case kExprI32UConvertSatF32: {
+ fcvt_wu_s(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_s(dst.gp(), src.fp());
return true;
- case kExprI32SConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ }
+ case kExprI32SConvertSatF64: {
+ fcvt_w_d(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_d(dst.gp(), src.fp());
return true;
- case kExprI32UConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ }
+ case kExprI32UConvertSatF64: {
+ fcvt_wu_d(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_d(dst.gp(), src.fp());
return true;
- case kExprI64SConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ }
+ case kExprI64SConvertSatF32: {
+ fcvt_l_s(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_s(dst.gp(), src.fp());
return true;
- case kExprI64UConvertSatF32:
- bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ }
+ case kExprI64UConvertSatF32: {
+ fcvt_lu_s(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_s(dst.gp(), src.fp());
return true;
- case kExprI64SConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ }
+ case kExprI64SConvertSatF64: {
+ fcvt_l_d(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_d(dst.gp(), src.fp());
return true;
- case kExprI64UConvertSatF64:
- bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ }
+ case kExprI64UConvertSatF64: {
+ fcvt_lu_d(dst.gp(), src.fp(), RTZ);
+ Clear_if_nan_d(dst.gp(), src.fp());
return true;
+ }
default:
return false;
}
@@ -2654,6 +2672,13 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_replace_lane");
}
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ bailout(kSimd, "emit_s128_set_if_nan");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
TurboAssembler::Uld(limit_address, MemOperand(limit_address));
TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
@@ -2828,6 +2853,21 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, 1);
+ if (kind == kF32) {
+ feq_s(scratch, src, src); // rd <- !isNan(src)
+ } else {
+ DCHECK_EQ(kind, kF64);
+ feq_d(scratch, src, src); // rd <- !isNan(src)
+ }
+ not_(scratch, scratch);
+ Sd(scratch, MemOperand(dst));
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 4b0b5a04f6..e78b9c5f61 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -271,8 +271,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
eq, &exit);
lay(r1, dst_op);
- CallRecordWriteStub(dst_addr, r1, RememberedSetAction::kEmit,
- SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStubSaveRegisters(dst_addr, r1, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -450,6 +451,14 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -514,6 +523,14 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
value, result, tmp1))
.gp();
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -598,6 +615,14 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
value, result, tmp1))
.gp();
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -682,6 +707,14 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
value, result, tmp1))
.gp();
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -766,6 +799,14 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
value, result, tmp1))
.gp();
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -850,6 +891,14 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
value, result, tmp1))
.gp();
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -926,6 +975,14 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -994,6 +1051,14 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
lay(ip,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
@@ -3239,11 +3304,11 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
MultiPush(regs.GetGpList());
- MultiPushDoubles(regs.GetFpList());
+ MultiPushF64OrV128(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- MultiPopDoubles(regs.GetFpList());
+ MultiPopF64OrV128(regs.GetFpList());
MultiPop(regs.GetGpList());
}
@@ -3393,6 +3458,18 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffAssembler::MaybeOSR() {}
+void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
+ ValueKind kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 016a090eed..0744d2e09b 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -219,7 +219,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
-#if V8_OS_WIN
+#if V8_TARGET_OS_WIN
if (frame_size > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see
@@ -381,8 +381,10 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
leaq(scratch, dst_op);
- CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
- SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
+
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, scratch, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
@@ -2849,7 +2851,7 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
- base::Memcpy(vals, imms, sizeof(vals));
+ memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
movq(kScratchRegister, vals[1]);
Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
@@ -4355,6 +4357,36 @@ void LiftoffAssembler::MaybeOSR() {
RelocInfo::WASM_STUB_CALL);
}
+void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
+ ValueKind kind) {
+ if (kind == kF32) {
+ Ucomiss(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ Ucomisd(src, src);
+ }
+ Label ret;
+ j(parity_odd, &ret);
+ movl(Operand(dst, 0), Immediate(1));
+ bind(&ret);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+ Register tmp_gp,
+ DoubleRegister tmp_fp,
+ ValueKind lane_kind) {
+ if (lane_kind == kF32) {
+ movaps(tmp_fp, src);
+ cmpunordps(tmp_fp, tmp_fp);
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ movapd(tmp_fp, src);
+ cmpunordpd(tmp_fp, tmp_fp);
+ }
+ pmovmskb(tmp_gp, tmp_fp);
+ orl(Operand(dst, 0), tmp_gp);
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 72d2e07305..de9904ae4a 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -22,11 +22,13 @@
#include "src/wasm/c-api.h"
#include <cstring>
+#include <iomanip>
#include <iostream>
#include "include/libplatform/libplatform.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
+#include "src/builtins/builtins.h"
#include "src/compiler/wasm-compiler.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/managed.h"
@@ -45,6 +47,10 @@
#error "WASM_API_DEBUG is unsupported"
#endif
+// If you want counters support (what --dump-counters does for the d8 shell),
+// then set this to 1 (in here, or via -DDUMP_COUNTERS=1 compiler argument).
+#define DUMP_COUNTERS 0
+
namespace wasm {
namespace {
@@ -112,7 +118,7 @@ i::wasm::ValueType WasmValKindToV8(ValKind kind) {
}
Name GetNameFromWireBytes(const i::wasm::WireBytesRef& ref,
- const i::Vector<const uint8_t>& wire_bytes) {
+ const v8::base::Vector<const uint8_t>& wire_bytes) {
DCHECK_LE(ref.offset(), wire_bytes.length());
DCHECK_LE(ref.end_offset(), wire_bytes.length());
if (ref.length() == 0) return Name::make();
@@ -163,7 +169,6 @@ own<ExternType> GetImportExportType(const i::wasm::WasmModule* module,
}
case i::wasm::kExternalException:
UNREACHABLE();
- return {};
}
}
@@ -227,17 +232,142 @@ auto Config::make() -> own<Config> {
// Engine
+#if DUMP_COUNTERS
+class Counter {
+ public:
+ static const int kMaxNameSize = 64;
+ int32_t* Bind(const char* name, bool is_histogram) {
+ int i;
+ for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
+ name_[i] = static_cast<char>(name[i]);
+ }
+ name_[i] = '\0';
+ is_histogram_ = is_histogram;
+ return ptr();
+ }
+ int32_t* ptr() { return &count_; }
+ int32_t count() { return count_; }
+ int32_t sample_total() { return sample_total_; }
+ bool is_histogram() { return is_histogram_; }
+ void AddSample(int32_t sample) {
+ count_++;
+ sample_total_ += sample;
+ }
+
+ private:
+ int32_t count_;
+ int32_t sample_total_;
+ bool is_histogram_;
+ uint8_t name_[kMaxNameSize];
+};
+
+class CounterCollection {
+ public:
+ CounterCollection() = default;
+ Counter* GetNextCounter() {
+ if (counters_in_use_ == kMaxCounters) return nullptr;
+ return &counters_[counters_in_use_++];
+ }
+
+ private:
+ static const unsigned kMaxCounters = 512;
+ uint32_t counters_in_use_{0};
+ Counter counters_[kMaxCounters];
+};
+
+using CounterMap = std::unordered_map<std::string, Counter*>;
+
+#endif
+
struct EngineImpl {
static bool created;
std::unique_ptr<v8::Platform> platform;
+#if DUMP_COUNTERS
+ static CounterCollection counters_;
+ static CounterMap* counter_map_;
+
+ static Counter* GetCounter(const char* name, bool is_histogram) {
+ auto map_entry = counter_map_->find(name);
+ Counter* counter =
+ map_entry != counter_map_->end() ? map_entry->second : nullptr;
+
+ if (counter == nullptr) {
+ counter = counters_.GetNextCounter();
+ if (counter != nullptr) {
+ (*counter_map_)[name] = counter;
+ counter->Bind(name, is_histogram);
+ }
+ } else {
+ DCHECK(counter->is_histogram() == is_histogram);
+ }
+ return counter;
+ }
+
+ static int* LookupCounter(const char* name) {
+ Counter* counter = GetCounter(name, false);
+
+ if (counter != nullptr) {
+ return counter->ptr();
+ } else {
+ return nullptr;
+ }
+ }
+
+ static void* CreateHistogram(const char* name, int min, int max,
+ size_t buckets) {
+ return GetCounter(name, true);
+ }
+
+ static void AddHistogramSample(void* histogram, int sample) {
+ Counter* counter = reinterpret_cast<Counter*>(histogram);
+ counter->AddSample(sample);
+ }
+#endif
+
EngineImpl() {
assert(!created);
created = true;
+#if DUMP_COUNTERS
+ counter_map_ = new CounterMap();
+#endif
}
~EngineImpl() {
+#if DUMP_COUNTERS
+ std::vector<std::pair<std::string, Counter*>> counters(
+ counter_map_->begin(), counter_map_->end());
+ std::sort(counters.begin(), counters.end());
+ // Dump counters in formatted boxes.
+ constexpr int kNameBoxSize = 64;
+ constexpr int kValueBoxSize = 13;
+ std::cout << "+" << std::string(kNameBoxSize, '-') << "+"
+ << std::string(kValueBoxSize, '-') << "+\n";
+ std::cout << "| Name" << std::string(kNameBoxSize - 5, ' ') << "| Value"
+ << std::string(kValueBoxSize - 6, ' ') << "|\n";
+ std::cout << "+" << std::string(kNameBoxSize, '-') << "+"
+ << std::string(kValueBoxSize, '-') << "+\n";
+ for (const auto& pair : counters) {
+ std::string key = pair.first;
+ Counter* counter = pair.second;
+ if (counter->is_histogram()) {
+ std::cout << "| c:" << std::setw(kNameBoxSize - 4) << std::left << key
+ << " | " << std::setw(kValueBoxSize - 2) << std::right
+ << counter->count() << " |\n";
+ std::cout << "| t:" << std::setw(kNameBoxSize - 4) << std::left << key
+ << " | " << std::setw(kValueBoxSize - 2) << std::right
+ << counter->sample_total() << " |\n";
+ } else {
+ std::cout << "| " << std::setw(kNameBoxSize - 2) << std::left << key
+ << " | " << std::setw(kValueBoxSize - 2) << std::right
+ << counter->count() << " |\n";
+ }
+ }
+ std::cout << "+" << std::string(kNameBoxSize, '-') << "+"
+ << std::string(kValueBoxSize, '-') << "+\n";
+ delete counter_map_;
+#endif
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
}
@@ -245,6 +375,11 @@ struct EngineImpl {
bool EngineImpl::created = false;
+#if DUMP_COUNTERS
+CounterCollection EngineImpl::counters_;
+CounterMap* EngineImpl::counter_map_;
+#endif
+
template <>
struct implement<Engine> {
using type = EngineImpl;
@@ -265,6 +400,17 @@ auto Engine::make(own<Config>&& config) -> own<Engine> {
return make_own(seal<Engine>(engine));
}
+// This should be called somewhat regularly, especially on potentially hot
+// sections of pure C++ execution. To achieve that, we call it on API entry
+// points that heap-allocate but don't call into generated code.
+// For example, finalization of incremental marking is relying on it.
+void CheckAndHandleInterrupts(i::Isolate* isolate) {
+ i::StackLimitCheck check(isolate);
+ if (check.InterruptRequested()) {
+ isolate->stack_guard()->HandleInterrupts();
+ }
+}
+
// Stores
StoreImpl::~StoreImpl() {
@@ -326,6 +472,12 @@ auto Store::make(Engine*) -> own<Store> {
// Create isolate.
store->create_params_.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
+#if DUMP_COUNTERS
+ store->create_params_.counter_lookup_callback = EngineImpl::LookupCounter;
+ store->create_params_.create_histogram_callback = EngineImpl::CreateHistogram;
+ store->create_params_.add_histogram_sample_callback =
+ EngineImpl::AddHistogramSample;
+#endif
v8::Isolate* isolate = v8::Isolate::New(store->create_params_);
if (!isolate) return own<Store>();
store->isolate_ = isolate;
@@ -854,8 +1006,11 @@ auto Trap::make(Store* store_abs, const Message& message) -> own<Trap> {
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::String> string = VecToString(isolate, message);
- i::Handle<i::JSReceiver> exception = i::Handle<i::JSReceiver>::cast(
- isolate->factory()->NewError(isolate->error_function(), string));
+ i::Handle<i::JSObject> exception =
+ isolate->factory()->NewError(isolate->error_function(), string);
+ i::JSObject::AddProperty(isolate, exception,
+ isolate->factory()->wasm_uncatchable_symbol(),
+ isolate->factory()->true_value(), i::NONE);
return implement<Trap>::type::make(store, exception);
}
@@ -962,19 +1117,20 @@ auto Module::validate(Store* store_abs, const vec<byte_t>& binary) -> bool {
{reinterpret_cast<const uint8_t*>(binary.get()), binary.size()});
i::Isolate* isolate = impl(store_abs)->i_isolate();
i::wasm::WasmFeatures features = i::wasm::WasmFeatures::FromIsolate(isolate);
- return isolate->wasm_engine()->SyncValidate(isolate, features, bytes);
+ return i::wasm::GetWasmEngine()->SyncValidate(isolate, features, bytes);
}
auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module> {
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
i::HandleScope scope(isolate);
+ CheckAndHandleInterrupts(isolate);
i::wasm::ModuleWireBytes bytes(
{reinterpret_cast<const uint8_t*>(binary.get()), binary.size()});
i::wasm::WasmFeatures features = i::wasm::WasmFeatures::FromIsolate(isolate);
i::wasm::ErrorThrower thrower(isolate, "ignored");
i::Handle<i::WasmModuleObject> module;
- if (!isolate->wasm_engine()
+ if (!i::wasm::GetWasmEngine()
->SyncCompile(isolate, features, &thrower, bytes)
.ToHandle(&module)) {
thrower.Reset(); // The API provides no way to expose the error.
@@ -987,7 +1143,8 @@ auto Module::imports() const -> ownvec<ImportType> {
const i::wasm::NativeModule* native_module =
impl(this)->v8_object()->native_module();
const i::wasm::WasmModule* module = native_module->module();
- const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const v8::base::Vector<const uint8_t> wire_bytes =
+ native_module->wire_bytes();
const std::vector<i::wasm::WasmImport>& import_table = module->import_table;
size_t size = import_table.size();
ownvec<ImportType> imports = ownvec<ImportType>::make_uninitialized(size);
@@ -1005,7 +1162,8 @@ auto Module::imports() const -> ownvec<ImportType> {
ownvec<ExportType> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
const i::wasm::NativeModule* native_module = module_obj->native_module();
const i::wasm::WasmModule* module = native_module->module();
- const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const v8::base::Vector<const uint8_t> wire_bytes =
+ native_module->wire_bytes();
const std::vector<i::wasm::WasmExport>& export_table = module->export_table;
size_t size = export_table.size();
ownvec<ExportType> exports = ownvec<ExportType>::make_uninitialized(size);
@@ -1025,7 +1183,7 @@ auto Module::exports() const -> ownvec<ExportType> {
auto Module::serialize() const -> vec<byte_t> {
i::wasm::NativeModule* native_module =
impl(this)->v8_object()->native_module();
- i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ v8::base::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
size_t binary_size = wire_bytes.size();
// We can only serialize after top-tier compilation (TurboFan) finished.
native_module->compilation_state()->WaitForTopTierFinished();
@@ -1272,11 +1430,14 @@ auto make_func(Store* store_abs, FuncData* data) -> own<Func> {
auto store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
+ CheckAndHandleInterrupts(isolate);
i::Handle<i::Managed<FuncData>> embedder_data =
i::Managed<FuncData>::FromRawPtr(isolate, sizeof(FuncData), data);
i::Handle<i::WasmCapiFunction> function = i::WasmCapiFunction::New(
isolate, reinterpret_cast<i::Address>(&FuncData::v8_callback),
embedder_data, SignatureHelper::Serialize(isolate, data->type.get()));
+ i::Tuple2::cast(function->shared().wasm_capi_function_data().ref())
+ .set_value2(*function);
auto func = implement<Func>::type::make(store, function);
return func;
}
@@ -1355,9 +1516,13 @@ void PrepareFunctionData(i::Isolate* isolate,
const i::wasm::FunctionSig* sig,
const i::wasm::WasmModule* module) {
// If the data is already populated, return immediately.
- if (!function_data->c_wrapper_code().IsSmi()) return;
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ if (function_data->c_wrapper_code() !=
+ ToCodeT(*BUILTIN_CODE(isolate, Illegal))) {
+ return;
+ }
// Compile wrapper code.
- i::Handle<i::Code> wrapper_code =
+ i::Handle<i::CodeT> wrapper_code =
i::compiler::CompileCWasmEntry(isolate, sig, module);
function_data->set_c_wrapper_code(*wrapper_code);
// Compute packed args size.
@@ -1397,7 +1562,6 @@ void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
case i::wasm::kVoid:
case i::wasm::kBottom:
UNREACHABLE();
- break;
}
}
}
@@ -1438,7 +1602,6 @@ void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
case i::wasm::kVoid:
case i::wasm::kBottom:
UNREACHABLE();
- break;
}
}
}
@@ -1497,8 +1660,9 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
const i::wasm::FunctionSig* sig =
instance->module()->functions[function_index].sig;
PrepareFunctionData(isolate, function_data, sig, instance->module());
- i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
- i::Code::cast(function_data->c_wrapper_code()), isolate);
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ i::Handle<i::CodeT> wrapper_code = i::Handle<i::CodeT>(
+ i::CodeT::cast(function_data->c_wrapper_code()), isolate);
i::Address call_target = function_data->foreign_address();
i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
@@ -1648,6 +1812,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
+ CheckAndHandleInterrupts(isolate);
DCHECK_EQ(type->content()->kind(), val.kind());
@@ -1749,6 +1914,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
i::HandleScope scope(isolate);
+ CheckAndHandleInterrupts(isolate);
// Get "element".
i::wasm::ValueType i_type;
@@ -1763,7 +1929,6 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
break;
default:
UNREACHABLE();
- return nullptr;
}
const Limits& limits = type->limits();
@@ -1868,6 +2033,7 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory> {
StoreImpl* store = impl(store_abs);
i::Isolate* isolate = store->i_isolate();
i::HandleScope scope(isolate);
+ CheckAndHandleInterrupts(isolate);
const Limits& limits = type->limits();
uint32_t minimum = limits.min;
@@ -1938,6 +2104,7 @@ own<Instance> Instance::make(Store* store_abs, const Module* module_abs,
const implement<Module>::type* module = impl(module_abs);
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
+ CheckAndHandleInterrupts(isolate);
DCHECK_EQ(module->v8_object()->GetIsolate(), isolate);
@@ -1966,7 +2133,7 @@ own<Instance> Instance::make(Store* store_abs, const Module* module_abs,
}
i::wasm::ErrorThrower thrower(isolate, "instantiation");
i::MaybeHandle<i::WasmInstanceObject> instance_obj =
- isolate->wasm_engine()->SyncInstantiate(
+ i::wasm::GetWasmEngine()->SyncInstantiate(
isolate, &thrower, module->v8_object(), imports_obj,
i::MaybeHandle<i::JSArrayBuffer>());
if (trap) {
@@ -2007,6 +2174,7 @@ auto Instance::exports() const -> ownvec<Extern> {
StoreImpl* store = instance->store();
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
+ CheckAndHandleInterrupts(isolate);
i::Handle<i::WasmInstanceObject> instance_obj = instance->v8_object();
i::Handle<i::WasmModuleObject> module_obj(instance_obj->module_object(),
isolate);
@@ -2181,22 +2349,22 @@ struct borrowed_vec {
}
// Vectors with no ownership management of elements
-#define WASM_DEFINE_VEC_PLAIN(name, Name) \
- WASM_DEFINE_VEC_BASE(name, Name, \
- wasm::vec, ) /* NOLINT(whitespace/parens) */ \
- \
- void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size, \
- const wasm_##name##_t data[]) { \
- auto v2 = wasm::vec<Name>::make_uninitialized(size); \
- if (v2.size() != 0) { \
- v8::base::Memcpy(v2.get(), data, size * sizeof(wasm_##name##_t)); \
- } \
- *out = release_##name##_vec(std::move(v2)); \
- } \
- \
- void wasm_##name##_vec_copy(wasm_##name##_vec_t* out, \
- wasm_##name##_vec_t* v) { \
- wasm_##name##_vec_new(out, v->size, v->data); \
+#define WASM_DEFINE_VEC_PLAIN(name, Name) \
+ WASM_DEFINE_VEC_BASE(name, Name, \
+ wasm::vec, ) /* NOLINT(whitespace/parens) */ \
+ \
+ void wasm_##name##_vec_new(wasm_##name##_vec_t* out, size_t size, \
+ const wasm_##name##_t data[]) { \
+ auto v2 = wasm::vec<Name>::make_uninitialized(size); \
+ if (v2.size() != 0) { \
+ memcpy(v2.get(), data, size * sizeof(wasm_##name##_t)); \
+ } \
+ *out = release_##name##_vec(std::move(v2)); \
+ } \
+ \
+ void wasm_##name##_vec_copy(wasm_##name##_vec_t* out, \
+ wasm_##name##_vec_t* v) { \
+ wasm_##name##_vec_new(out, v->size, v->data); \
}
// Vectors that own their elements
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
new file mode 100644
index 0000000000..2705edb634
--- /dev/null
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -0,0 +1,74 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/code-space-access.h"
+
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-engine.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+
+thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
+
+// The {NativeModule} argument is unused; it is just here for a common API with
+// the non-M1 implementation.
+// TODO(jkummerow): Background threads could permanently stay in
+// writable mode; only the main thread has to switch back and forth.
+CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
+ if (code_space_write_nesting_level_ == 0) {
+ SwitchMemoryPermissionsToWritable();
+ }
+ code_space_write_nesting_level_++;
+}
+
+CodeSpaceWriteScope::~CodeSpaceWriteScope() {
+ code_space_write_nesting_level_--;
+ if (code_space_write_nesting_level_ == 0) {
+ SwitchMemoryPermissionsToExecutable();
+ }
+}
+
+#else // Not on MacOS on ARM64 (M1 hardware): Use Intel PKU and/or mprotect.
+
+CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
+ : native_module_(native_module) {
+ DCHECK_NOT_NULL(native_module_);
+ if (FLAG_wasm_memory_protection_keys) {
+ auto* code_manager = GetWasmCodeManager();
+ if (code_manager->HasMemoryProtectionKeySupport()) {
+ code_manager->SetThreadWritable(true);
+ return;
+ }
+ // Fallback to mprotect-based write protection, if enabled.
+ }
+ if (FLAG_wasm_write_protect_code_memory) {
+ bool success = native_module_->SetWritable(true);
+ CHECK(success);
+ }
+}
+
+CodeSpaceWriteScope::~CodeSpaceWriteScope() {
+ if (FLAG_wasm_memory_protection_keys) {
+ auto* code_manager = GetWasmCodeManager();
+ if (code_manager->HasMemoryProtectionKeySupport()) {
+ code_manager->SetThreadWritable(false);
+ return;
+ }
+ // Fallback to mprotect-based write protection, if enabled.
+ }
+ if (FLAG_wasm_write_protect_code_memory) {
+ bool success = native_module_->SetWritable(false);
+ CHECK(success);
+ }
+}
+
+#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index 6fd5ad5f9f..62a252caf9 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -16,7 +16,61 @@
namespace v8 {
namespace internal {
+namespace wasm {
+
+class NativeModule;
+
+// Within the scope, the code space is writable (and for Apple M1 also not
+// executable). After the last (nested) scope is destructed, the code space is
+// not writable.
+// This uses three different implementations, depending on the platform, flags,
+// and runtime support:
+// - On MacOS on ARM64 ("Apple M1"/Apple Silicon), it uses APRR/MAP_JIT to
+// switch only the calling thread between writable and executable. This achieves
+// "real" W^X and is thread-local and fast.
+// - When Intel PKU (aka. memory protection keys) are available, it switches
+// the protection keys' permission between writable and not writable. The
+// executable permission cannot be retracted with PKU. That is, this "only"
+// achieves write-protection, but is similarly thread-local and fast.
+// - As a fallback, we switch with {mprotect()} between R-X and RWX (due to
+// concurrent compilation and execution). This is slow and process-wide. With
+// {mprotect()}, we currently switch permissions for the entire module's memory:
+// - for AOT, that's as efficient as it can be.
+// - for Lazy, we don't have a heuristic for functions that may need patching,
+// and even if we did, the resulting set of pages may be fragmented.
+// Currently, we try and keep the number of syscalls low.
+// - similar argument for debug time.
+// MAP_JIT on Apple M1 cannot switch permissions for smaller ranges of memory,
+// and for PKU we would need multiple keys, so both of them also switch
+// permissions for all code pages.
+class V8_NODISCARD CodeSpaceWriteScope final {
+ public:
+ explicit CodeSpaceWriteScope(NativeModule* native_module);
+ ~CodeSpaceWriteScope();
+
+ // Disable copy constructor and copy-assignment operator, since this manages
+ // a resource and implicit copying of the scope can yield surprising errors.
+ CodeSpaceWriteScope(const CodeSpaceWriteScope&) = delete;
+ CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete;
+
+ private:
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+ static thread_local int code_space_write_nesting_level_;
+#else // On non-M1 hardware:
+ // The M1 implementation knows implicitly from the {MAP_JIT} flag during
+ // allocation which region to switch permissions for. On non-M1 hardware,
+ // however, we either need the protection key or code space from the
+ // {native_module_}.
+ NativeModule* native_module_;
+#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+};
+
+} // namespace wasm
+
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+
+// Low-level API for switching MAP_JIT pages between writable and executable.
+// TODO(wasm): Access to these functions is only needed in tests. Remove?
// Ignoring this warning is considered better than relying on
// __builtin_available.
@@ -30,42 +84,13 @@ inline void SwitchMemoryPermissionsToExecutable() {
}
#pragma clang diagnostic pop
-namespace wasm {
-
-class V8_NODISCARD CodeSpaceWriteScope {
- public:
- // TODO(jkummerow): Background threads could permanently stay in
- // writable mode; only the main thread has to switch back and forth.
- CodeSpaceWriteScope() {
- if (code_space_write_nesting_level_ == 0) {
- SwitchMemoryPermissionsToWritable();
- }
- code_space_write_nesting_level_++;
- }
- ~CodeSpaceWriteScope() {
- code_space_write_nesting_level_--;
- if (code_space_write_nesting_level_ == 0) {
- SwitchMemoryPermissionsToExecutable();
- }
- }
-
- private:
- static thread_local int code_space_write_nesting_level_;
-};
-
-#define CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope _write_access_;
-
-} // namespace wasm
-
#else // Not Mac-on-arm64.
// Nothing to do, we map code memory with rwx permissions.
inline void SwitchMemoryPermissionsToWritable() {}
inline void SwitchMemoryPermissionsToExecutable() {}
-#define CODE_SPACE_WRITE_SCOPE
-
-#endif // V8_OS_MACOSX && V8_HOST_ARCH_ARM64
+#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index a10190f70b..96b9bbb2a5 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -36,7 +36,14 @@ enum RuntimeExceptionSupport : bool {
kNoRuntimeExceptionSupport = false
};
-enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false };
+enum BoundsCheckStrategy : int8_t {
+ // Emit protected instructions, use the trap handler for OOB detection.
+ kTrapHandler,
+ // Emit explicit bounds checks.
+ kExplicitBoundsChecks,
+ // Emit no bounds checks at all (for testing only).
+ kNoBoundsChecks
+};
// The {CompilationEnv} encapsulates the module data that is used during
// compilation. CompilationEnvs are shareable across multiple compilations.
@@ -44,9 +51,8 @@ struct CompilationEnv {
// A pointer to the decoded module's static representation.
const WasmModule* const module;
- // True if trap handling should be used in compiled code, rather than
- // compiling in bounds checks for each memory access.
- const UseTrapHandler use_trap_handler;
+ // The bounds checking strategy to use.
+ const BoundsCheckStrategy bounds_checks;
// If the runtime doesn't support exception propagation,
// we won't generate stack checks, and trap handling will also
@@ -64,29 +70,24 @@ struct CompilationEnv {
// Features enabled for this compilation.
const WasmFeatures enabled_features;
- // We assume that memories of size >= half of the virtual address space
- // cannot be allocated (see https://crbug.com/1201340).
- static constexpr uint32_t kMaxMemoryPagesAtRuntime = std::min(
- kV8MaxWasmMemoryPages,
- (uintptr_t{1} << (kSystemPointerSize == 4 ? 31 : 63)) / kWasmPageSize);
-
constexpr CompilationEnv(const WasmModule* module,
- UseTrapHandler use_trap_handler,
+ BoundsCheckStrategy bounds_checks,
RuntimeExceptionSupport runtime_exception_support,
const WasmFeatures& enabled_features)
: module(module),
- use_trap_handler(use_trap_handler),
+ bounds_checks(bounds_checks),
runtime_exception_support(runtime_exception_support),
// During execution, the memory can never be bigger than what fits in a
// uintptr_t.
- min_memory_size(std::min(kMaxMemoryPagesAtRuntime,
- module ? module->initial_pages : 0) *
- uint64_t{kWasmPageSize}),
- max_memory_size(static_cast<uintptr_t>(
- std::min(kMaxMemoryPagesAtRuntime,
- module && module->has_maximum_pages ? module->maximum_pages
- : max_mem_pages()) *
- uint64_t{kWasmPageSize})),
+ min_memory_size(
+ std::min(kV8MaxWasmMemoryPages,
+ uintptr_t{module ? module->initial_pages : 0}) *
+ kWasmPageSize),
+ max_memory_size((module && module->has_maximum_pages
+ ? std::min(kV8MaxWasmMemoryPages,
+ uintptr_t{module->maximum_pages})
+ : kV8MaxWasmMemoryPages) *
+ kWasmPageSize),
enabled_features(enabled_features) {}
};
@@ -95,7 +96,7 @@ struct CompilationEnv {
class WireBytesStorage {
public:
virtual ~WireBytesStorage() = default;
- virtual Vector<const uint8_t> GetCode(WireBytesRef) const = 0;
+ virtual base::Vector<const uint8_t> GetCode(WireBytesRef) const = 0;
};
// Callbacks will receive either {kFailedCompilation} or both
@@ -117,7 +118,7 @@ class V8_EXPORT_PRIVATE CompilationState {
~CompilationState();
- void InitCompileJob(WasmEngine*);
+ void InitCompileJob();
void CancelCompilation();
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index c181c8df87..7597205246 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -15,9 +15,10 @@
#include "src/base/compiler-specific.h"
#include "src/base/memory.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
#include "src/codegen/signature.h"
#include "src/flags/flags.h"
-#include "src/utils/vector.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone-containers.h"
@@ -53,7 +54,8 @@ class Decoder {
Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, start, end, buffer_offset) {}
- explicit Decoder(const Vector<const byte> bytes, uint32_t buffer_offset = 0)
+ explicit Decoder(const base::Vector<const byte> bytes,
+ uint32_t buffer_offset = 0)
: Decoder(bytes.begin(), bytes.begin() + bytes.length(), buffer_offset) {}
Decoder(const byte* start, const byte* pc, const byte* end,
uint32_t buffer_offset = 0)
@@ -304,7 +306,7 @@ class Decoder {
error_ = {};
}
- void Reset(Vector<const uint8_t> bytes, uint32_t buffer_offset = 0) {
+ void Reset(base::Vector<const uint8_t> bytes, uint32_t buffer_offset = 0) {
Reset(bytes.begin(), bytes.end(), buffer_offset);
}
@@ -354,8 +356,8 @@ class Decoder {
// Only report the first error.
if (!ok()) return;
constexpr int kMaxErrorMsg = 256;
- EmbeddedVector<char, kMaxErrorMsg> buffer;
- int len = VSNPrintF(buffer, format, args);
+ base::EmbeddedVector<char, kMaxErrorMsg> buffer;
+ int len = base::VSNPrintF(buffer, format, args);
CHECK_LT(0, len);
error_ = {offset, {buffer.begin(), static_cast<size_t>(len)}};
onFirstError();
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 7927e58d84..c175acd8a0 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -17,6 +17,7 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/wrappers.h"
#include "src/base/small-vector.h"
+#include "src/base/strings.h"
#include "src/utils/bit-vector.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder.h"
@@ -49,14 +50,14 @@ struct WasmException;
return true; \
}())
-#define CHECK_PROTOTYPE_OPCODE(feat) \
- DCHECK(this->module_->origin == kWasmOrigin); \
- if (!VALIDATE(this->enabled_.has_##feat())) { \
- this->DecodeError( \
- "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat ")", \
- opcode); \
- return 0; \
- } \
+#define CHECK_PROTOTYPE_OPCODE(feat) \
+ DCHECK(this->module_->origin == kWasmOrigin); \
+ if (!VALIDATE(this->enabled_.has_##feat())) { \
+ this->DecodeError( \
+ "Invalid opcode 0x%02x (enable with --experimental-wasm-" #feat ")", \
+ opcode); \
+ return 0; \
+ } \
this->detected_->Add(kFeature_##feat);
#define ATOMIC_OP_LIST(V) \
@@ -417,33 +418,14 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
} // namespace value_type_reader
-// Helpers for decoding different kinds of immediates which follow bytecodes.
-template <Decoder::ValidateFlag validate>
-struct LocalIndexImmediate {
- uint32_t index;
- uint32_t length;
-
- inline LocalIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "local index");
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct ExceptionIndexImmediate {
- uint32_t index;
- const WasmException* exception = nullptr;
- uint32_t length;
-
- inline ExceptionIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "exception index");
- }
-};
+enum DecodingMode { kFunctionBody, kInitExpression };
+// Helpers for decoding different kinds of immediates which follow bytecodes.
template <Decoder::ValidateFlag validate>
struct ImmI32Immediate {
int32_t value;
uint32_t length;
- inline ImmI32Immediate(Decoder* decoder, const byte* pc) {
+ ImmI32Immediate(Decoder* decoder, const byte* pc) {
value = decoder->read_i32v<validate>(pc, &length, "immi32");
}
};
@@ -452,7 +434,7 @@ template <Decoder::ValidateFlag validate>
struct ImmI64Immediate {
int64_t value;
uint32_t length;
- inline ImmI64Immediate(Decoder* decoder, const byte* pc) {
+ ImmI64Immediate(Decoder* decoder, const byte* pc) {
value = decoder->read_i64v<validate>(pc, &length, "immi64");
}
};
@@ -461,12 +443,12 @@ template <Decoder::ValidateFlag validate>
struct ImmF32Immediate {
float value;
uint32_t length = 4;
- inline ImmF32Immediate(Decoder* decoder, const byte* pc) {
+ ImmF32Immediate(Decoder* decoder, const byte* pc) {
// We can't use bit_cast here because calling any helper function that
// returns a float would potentially flip NaN bits per C++ semantics, so we
// have to inline the memcpy call directly.
uint32_t tmp = decoder->read_u32<validate>(pc, "immf32");
- base::Memcpy(&value, &tmp, sizeof(value));
+ memcpy(&value, &tmp, sizeof(value));
}
};
@@ -474,32 +456,81 @@ template <Decoder::ValidateFlag validate>
struct ImmF64Immediate {
double value;
uint32_t length = 8;
- inline ImmF64Immediate(Decoder* decoder, const byte* pc) {
+ ImmF64Immediate(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
uint64_t tmp = decoder->read_u64<validate>(pc, "immf64");
- base::Memcpy(&value, &tmp, sizeof(value));
+ memcpy(&value, &tmp, sizeof(value));
}
};
+// This is different than IndexImmediate because {index} is a byte.
template <Decoder::ValidateFlag validate>
-struct GlobalIndexImmediate {
+struct MemoryIndexImmediate {
+ uint8_t index = 0;
+ uint32_t length = 1;
+ MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u8<validate>(pc, "memory index");
+ }
+};
+
+// Parent class for all Immediates which read a u32v index value in their
+// constructor.
+template <Decoder::ValidateFlag validate>
+struct IndexImmediate {
uint32_t index;
- ValueType type = kWasmVoid;
- const WasmGlobal* global = nullptr;
uint32_t length;
- inline GlobalIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "global index");
+ IndexImmediate(Decoder* decoder, const byte* pc, const char* name) {
+ index = decoder->read_u32v<validate>(pc, &length, name);
}
};
template <Decoder::ValidateFlag validate>
+struct ExceptionIndexImmediate : public IndexImmediate<validate> {
+ const WasmException* exception = nullptr;
+
+ ExceptionIndexImmediate(Decoder* decoder, const byte* pc)
+ : IndexImmediate<validate>(decoder, pc, "exception index") {}
+};
+
+template <Decoder::ValidateFlag validate>
+struct GlobalIndexImmediate : public IndexImmediate<validate> {
+ const WasmGlobal* global = nullptr;
+
+ GlobalIndexImmediate(Decoder* decoder, const byte* pc)
+ : IndexImmediate<validate>(decoder, pc, "global index") {}
+};
+
+template <Decoder::ValidateFlag validate>
+struct StructIndexImmediate : public IndexImmediate<validate> {
+ const StructType* struct_type = nullptr;
+
+ StructIndexImmediate(Decoder* decoder, const byte* pc)
+ : IndexImmediate<validate>(decoder, pc, "struct index") {}
+};
+
+template <Decoder::ValidateFlag validate>
+struct ArrayIndexImmediate : public IndexImmediate<validate> {
+ const ArrayType* array_type = nullptr;
+
+ ArrayIndexImmediate(Decoder* decoder, const byte* pc)
+ : IndexImmediate<validate>(decoder, pc, "array index") {}
+};
+template <Decoder::ValidateFlag validate>
+struct CallFunctionImmediate : public IndexImmediate<validate> {
+ const FunctionSig* sig = nullptr;
+
+ CallFunctionImmediate(Decoder* decoder, const byte* pc)
+ : IndexImmediate<validate>(decoder, pc, "function index") {}
+};
+
+template <Decoder::ValidateFlag validate>
struct SelectTypeImmediate {
uint32_t length;
ValueType type;
- inline SelectTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, const WasmModule* module) {
+ SelectTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc, const WasmModule* module) {
uint8_t num_types =
decoder->read_u32v<validate>(pc, &length, "number of select types");
if (!VALIDATE(num_types == 1)) {
@@ -522,8 +553,8 @@ struct BlockTypeImmediate {
uint32_t sig_index = 0;
const FunctionSig* sig = nullptr;
- inline BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, const WasmModule* module) {
+ BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc, const WasmModule* module) {
int64_t block_type =
decoder->read_i33v<validate>(pc, &length, "block type");
if (block_type < 0) {
@@ -569,117 +600,32 @@ template <Decoder::ValidateFlag validate>
struct BranchDepthImmediate {
uint32_t depth;
uint32_t length;
- inline BranchDepthImmediate(Decoder* decoder, const byte* pc) {
+ BranchDepthImmediate(Decoder* decoder, const byte* pc) {
depth = decoder->read_u32v<validate>(pc, &length, "branch depth");
}
};
template <Decoder::ValidateFlag validate>
-struct FunctionIndexImmediate {
- uint32_t index = 0;
- uint32_t length = 1;
- inline FunctionIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "function index");
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct MemoryIndexImmediate {
- uint32_t index = 0;
- uint32_t length = 1;
- inline MemoryIndexImmediate() = default;
- inline MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u8<validate>(pc, "memory index");
- if (!VALIDATE(index == 0)) {
- DecodeError<validate>(decoder, pc, "expected memory index 0, found %u",
- index);
- }
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct TableIndexImmediate {
- uint32_t index = 0;
- uint32_t length = 1;
- inline TableIndexImmediate() = default;
- inline TableIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "table index");
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct TypeIndexImmediate {
- uint32_t index = 0;
- uint32_t length = 1;
- inline TypeIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "type index");
- }
-};
-
-// TODO(jkummerow): Introduce a common superclass for StructIndexImmediate and
-// ArrayIndexImmediate? Maybe even FunctionIndexImmediate too?
-template <Decoder::ValidateFlag validate>
-struct StructIndexImmediate {
- uint32_t index = 0;
- uint32_t length = 0;
- const StructType* struct_type = nullptr;
- inline StructIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "struct index");
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct FieldIndexImmediate {
- StructIndexImmediate<validate> struct_index;
- uint32_t index = 0;
- uint32_t length = 0;
- inline FieldIndexImmediate(Decoder* decoder, const byte* pc)
- : struct_index(decoder, pc) {
- index = decoder->read_u32v<validate>(pc + struct_index.length, &length,
- "field index");
- length += struct_index.length;
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct ArrayIndexImmediate {
- uint32_t index = 0;
- uint32_t length = 0;
- const ArrayType* array_type = nullptr;
- inline ArrayIndexImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "array index");
- }
+struct FieldImmediate {
+ StructIndexImmediate<validate> struct_imm;
+ IndexImmediate<validate> field_imm;
+ uint32_t length;
+ FieldImmediate(Decoder* decoder, const byte* pc)
+ : struct_imm(decoder, pc),
+ field_imm(decoder, pc + struct_imm.length, "field index"),
+ length(struct_imm.length + field_imm.length) {}
};
template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
- uint32_t table_index;
- uint32_t sig_index;
- const FunctionSig* sig = nullptr;
- uint32_t length = 0;
- inline CallIndirectImmediate(const WasmFeatures enabled, Decoder* decoder,
- const byte* pc) {
- uint32_t len = 0;
- sig_index = decoder->read_u32v<validate>(pc, &len, "signature index");
- TableIndexImmediate<validate> table(decoder, pc + len);
- if (!VALIDATE((table.index == 0 && table.length == 1) ||
- enabled.has_reftypes())) {
- DecodeError<validate>(decoder, pc + len,
- "expected table index 0, found %u", table.index);
- }
- table_index = table.index;
- length = len + table.length;
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct CallFunctionImmediate {
- uint32_t index;
- const FunctionSig* sig = nullptr;
+ IndexImmediate<validate> sig_imm;
+ IndexImmediate<validate> table_imm;
uint32_t length;
- inline CallFunctionImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "function index");
- }
+ const FunctionSig* sig = nullptr;
+ CallIndirectImmediate(Decoder* decoder, const byte* pc)
+ : sig_imm(decoder, pc, "singature index"),
+ table_imm(decoder, pc + sig_imm.length, "table index"),
+ length(sig_imm.length + table_imm.length) {}
};
template <Decoder::ValidateFlag validate>
@@ -687,7 +633,7 @@ struct BranchTableImmediate {
uint32_t table_count;
const byte* start;
const byte* table;
- inline BranchTableImmediate(Decoder* decoder, const byte* pc) {
+ BranchTableImmediate(Decoder* decoder, const byte* pc) {
start = pc;
uint32_t len = 0;
table_count = decoder->read_u32v<validate>(pc, &len, "table count");
@@ -733,7 +679,8 @@ class BranchTableIterator {
const uint32_t table_count_; // the count of entries, not including default.
};
-template <Decoder::ValidateFlag validate>
+template <Decoder::ValidateFlag validate,
+ DecodingMode decoding_mode = kFunctionBody>
class WasmDecoder;
template <Decoder::ValidateFlag validate>
@@ -741,8 +688,8 @@ struct MemoryAccessImmediate {
uint32_t alignment;
uint64_t offset;
uint32_t length = 0;
- inline MemoryAccessImmediate(Decoder* decoder, const byte* pc,
- uint32_t max_alignment, bool is_memory64) {
+ MemoryAccessImmediate(Decoder* decoder, const byte* pc,
+ uint32_t max_alignment, bool is_memory64) {
uint32_t alignment_length;
alignment =
decoder->read_u32v<validate>(pc, &alignment_length, "alignment");
@@ -760,9 +707,6 @@ struct MemoryAccessImmediate {
pc + alignment_length, &offset_length, "offset");
length = alignment_length + offset_length;
}
- // Defined below, after the definition of WasmDecoder.
- inline MemoryAccessImmediate(WasmDecoder<validate>* decoder, const byte* pc,
- uint32_t max_alignment);
};
// Immediate for SIMD lane operations.
@@ -771,7 +715,7 @@ struct SimdLaneImmediate {
uint8_t lane;
uint32_t length = 1;
- inline SimdLaneImmediate(Decoder* decoder, const byte* pc) {
+ SimdLaneImmediate(Decoder* decoder, const byte* pc) {
lane = decoder->read_u8<validate>(pc, "lane");
}
};
@@ -781,7 +725,7 @@ template <Decoder::ValidateFlag validate>
struct Simd128Immediate {
uint8_t value[kSimd128Size] = {0};
- inline Simd128Immediate(Decoder* decoder, const byte* pc) {
+ Simd128Immediate(Decoder* decoder, const byte* pc) {
for (uint32_t i = 0; i < kSimd128Size; ++i) {
value[i] = decoder->read_u8<validate>(pc + i, "value");
}
@@ -790,90 +734,60 @@ struct Simd128Immediate {
template <Decoder::ValidateFlag validate>
struct MemoryInitImmediate {
- uint32_t data_segment_index = 0;
+ IndexImmediate<validate> data_segment;
MemoryIndexImmediate<validate> memory;
- unsigned length = 0;
-
- inline MemoryInitImmediate(Decoder* decoder, const byte* pc) {
- uint32_t len = 0;
- data_segment_index =
- decoder->read_u32v<validate>(pc, &len, "data segment index");
- memory = MemoryIndexImmediate<validate>(decoder, pc + len);
- length = len + memory.length;
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct DataDropImmediate {
- uint32_t index;
- unsigned length;
+ uint32_t length;
- inline DataDropImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "data segment index");
- }
+ MemoryInitImmediate(Decoder* decoder, const byte* pc)
+ : data_segment(decoder, pc, "data segment index"),
+ memory(decoder, pc + data_segment.length),
+ length(data_segment.length + memory.length) {}
};
template <Decoder::ValidateFlag validate>
struct MemoryCopyImmediate {
MemoryIndexImmediate<validate> memory_src;
MemoryIndexImmediate<validate> memory_dst;
- unsigned length = 0;
+ uint32_t length;
- inline MemoryCopyImmediate(Decoder* decoder, const byte* pc) {
- memory_src = MemoryIndexImmediate<validate>(decoder, pc);
- memory_dst =
- MemoryIndexImmediate<validate>(decoder, pc + memory_src.length);
- length = memory_src.length + memory_dst.length;
- }
+ MemoryCopyImmediate(Decoder* decoder, const byte* pc)
+ : memory_src(decoder, pc),
+ memory_dst(decoder, pc + memory_src.length),
+ length(memory_src.length + memory_dst.length) {}
};
template <Decoder::ValidateFlag validate>
struct TableInitImmediate {
- uint32_t elem_segment_index = 0;
- TableIndexImmediate<validate> table;
- unsigned length = 0;
-
- inline TableInitImmediate(Decoder* decoder, const byte* pc) {
- uint32_t len = 0;
- elem_segment_index =
- decoder->read_u32v<validate>(pc, &len, "elem segment index");
- table = TableIndexImmediate<validate>(decoder, pc + len);
- length = len + table.length;
- }
-};
-
-template <Decoder::ValidateFlag validate>
-struct ElemDropImmediate {
- uint32_t index;
- unsigned length;
+ IndexImmediate<validate> element_segment;
+ IndexImmediate<validate> table;
+ uint32_t length;
- inline ElemDropImmediate(Decoder* decoder, const byte* pc) {
- index = decoder->read_u32v<validate>(pc, &length, "elem segment index");
- }
+ TableInitImmediate(Decoder* decoder, const byte* pc)
+ : element_segment(decoder, pc, "element segment index"),
+ table(decoder, pc + element_segment.length, "table index"),
+ length(element_segment.length + table.length) {}
};
template <Decoder::ValidateFlag validate>
struct TableCopyImmediate {
- TableIndexImmediate<validate> table_dst;
- TableIndexImmediate<validate> table_src;
- unsigned length = 0;
+ IndexImmediate<validate> table_dst;
+ IndexImmediate<validate> table_src;
+ uint32_t length;
- inline TableCopyImmediate(Decoder* decoder, const byte* pc) {
- table_dst = TableIndexImmediate<validate>(decoder, pc);
- table_src = TableIndexImmediate<validate>(decoder, pc + table_dst.length);
- length = table_src.length + table_dst.length;
- }
+ TableCopyImmediate(Decoder* decoder, const byte* pc)
+ : table_dst(decoder, pc, "table index"),
+ table_src(decoder, pc + table_dst.length, "table index"),
+ length(table_src.length + table_dst.length) {}
};
template <Decoder::ValidateFlag validate>
struct HeapTypeImmediate {
uint32_t length = 1;
- HeapType type = HeapType(HeapType::kBottom);
- inline HeapTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
- const byte* pc, const WasmModule* module) {
- type = value_type_reader::read_heap_type<validate>(decoder, pc, &length,
- module, enabled);
- }
+ HeapType type;
+ HeapTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc, const WasmModule* module)
+ : type(value_type_reader::read_heap_type<validate>(decoder, pc, &length,
+ module, enabled)) {}
};
template <Decoder::ValidateFlag validate>
@@ -930,7 +844,6 @@ enum ControlKind : uint8_t {
kControlTry,
kControlTryCatch,
kControlTryCatchAll,
- kControlTryUnwind
};
enum Reachability : uint8_t {
@@ -948,6 +861,8 @@ struct ControlBase : public PcForErrors<validate> {
ControlKind kind = kControlBlock;
uint32_t locals_count = 0; // Additional locals introduced in this 'let'.
uint32_t stack_depth = 0; // Stack height at the beginning of the construct.
+ uint32_t init_stack_depth = 0; // Height of "locals initialization" stack
+ // at the beginning of the construct.
int32_t previous_catch = -1; // Depth of the innermost catch containing this
// 'try'.
Reachability reachability = kReachable;
@@ -959,11 +874,13 @@ struct ControlBase : public PcForErrors<validate> {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ControlBase);
ControlBase(ControlKind kind, uint32_t locals_count, uint32_t stack_depth,
- const uint8_t* pc, Reachability reachability)
+ uint32_t init_stack_depth, const uint8_t* pc,
+ Reachability reachability)
: PcForErrors<validate>(pc),
kind(kind),
locals_count(locals_count),
stack_depth(stack_depth),
+ init_stack_depth(init_stack_depth),
reachability(reachability),
start_merge(reachability == kReachable) {
DCHECK(kind == kControlLet || locals_count == 0);
@@ -992,13 +909,11 @@ struct ControlBase : public PcForErrors<validate> {
bool is_incomplete_try() const { return kind == kControlTry; }
bool is_try_catch() const { return kind == kControlTryCatch; }
bool is_try_catchall() const { return kind == kControlTryCatchAll; }
- bool is_try_unwind() const { return kind == kControlTryUnwind; }
bool is_try() const {
- return is_incomplete_try() || is_try_catch() || is_try_catchall() ||
- is_try_unwind();
+ return is_incomplete_try() || is_try_catch() || is_try_catchall();
}
- inline Merge<Value>* br_merge() {
+ Merge<Value>* br_merge() {
return is_loop() ? &this->start_merge : &this->end_merge;
}
};
@@ -1006,159 +921,181 @@ struct ControlBase : public PcForErrors<validate> {
// This is the list of callback functions that an interface for the
// WasmFullDecoder should implement.
// F(Name, args...)
-#define INTERFACE_FUNCTIONS(F) \
- /* General: */ \
- F(StartFunction) \
- F(StartFunctionBody, Control* block) \
- F(FinishFunction) \
- F(OnFirstError) \
- F(NextInstruction, WasmOpcode) \
- /* Control: */ \
- F(Block, Control* block) \
- F(Loop, Control* block) \
- F(Try, Control* block) \
- F(If, const Value& cond, Control* if_block) \
- F(FallThruTo, Control* c) \
- F(PopControl, Control* block) \
- /* Instructions: */ \
- F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
- F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
- Value* result) \
- F(I32Const, Value* result, int32_t value) \
- F(I64Const, Value* result, int64_t value) \
- F(F32Const, Value* result, float value) \
- F(F64Const, Value* result, double value) \
- F(RefNull, ValueType type, Value* result) \
- F(RefFunc, uint32_t function_index, Value* result) \
- F(RefAsNonNull, const Value& arg, Value* result) \
- F(Drop) \
- F(DoReturn, uint32_t drop_values) \
- F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
- F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
- F(LocalTee, const Value& value, Value* result, \
- const LocalIndexImmediate<validate>& imm) \
- F(AllocateLocals, Vector<Value> local_values) \
- F(DeallocateLocals, uint32_t count) \
- F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
- F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(TableGet, const Value& index, Value* result, \
- const TableIndexImmediate<validate>& imm) \
- F(TableSet, const Value& index, const Value& value, \
- const TableIndexImmediate<validate>& imm) \
- F(Trap, TrapReason reason) \
- F(NopForTestingUnsupportedInLiftoff) \
- F(Select, const Value& cond, const Value& fval, const Value& tval, \
- Value* result) \
- F(BrOrRet, uint32_t depth, uint32_t drop_values) \
- F(BrIf, const Value& cond, uint32_t depth) \
- F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
- F(Else, Control* if_block) \
- F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, Value* result) \
- F(LoadTransform, LoadType type, LoadTransformationKind transform, \
- const MemoryAccessImmediate<validate>& imm, const Value& index, \
- Value* result) \
- F(LoadLane, LoadType type, const Value& value, const Value& index, \
- const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
- Value* result) \
- F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value) \
- F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value, const uint8_t laneidx) \
- F(CurrentMemoryPages, Value* result) \
- F(MemoryGrow, const Value& value, Value* result) \
- F(CallDirect, const CallFunctionImmediate<validate>& imm, \
- const Value args[], Value returns[]) \
- F(CallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[], \
- Value returns[]) \
- F(CallRef, const Value& func_ref, const FunctionSig* sig, \
- uint32_t sig_index, const Value args[], const Value returns[]) \
- F(ReturnCallRef, const Value& func_ref, const FunctionSig* sig, \
- uint32_t sig_index, const Value args[]) \
- F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
- const Value args[]) \
- F(ReturnCallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[]) \
- F(BrOnNull, const Value& ref_object, uint32_t depth) \
- F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
- F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
- F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
- const Vector<Value> inputs, Value* result) \
- F(S128Const, const Simd128Immediate<validate>& imm, Value* result) \
- F(Simd8x16ShuffleOp, const Simd128Immediate<validate>& imm, \
- const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexImmediate<validate>& imm, \
- const Vector<Value>& args) \
- F(Rethrow, Control* block) \
- F(CatchException, const ExceptionIndexImmediate<validate>& imm, \
- Control* block, Vector<Value> caught_values) \
- F(Delegate, uint32_t depth, Control* block) \
- F(CatchAll, Control* block) \
- F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result) \
- F(AtomicFence) \
- F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(DataDrop, const DataDropImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
- const Value& value, const Value& size) \
- F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
- F(ElemDrop, const ElemDropImmediate<validate>& imm) \
- F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args) \
- F(TableGrow, const TableIndexImmediate<validate>& imm, const Value& value, \
- const Value& delta, Value* result) \
- F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
- F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
- const Value& value, const Value& count) \
- F(StructNewWithRtt, const StructIndexImmediate<validate>& imm, \
- const Value& rtt, const Value args[], Value* result) \
- F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
- const Value& rtt, Value* result) \
- F(StructGet, const Value& struct_object, \
- const FieldIndexImmediate<validate>& field, bool is_signed, Value* result) \
- F(StructSet, const Value& struct_object, \
- const FieldIndexImmediate<validate>& field, const Value& field_value) \
- F(ArrayNewWithRtt, const ArrayIndexImmediate<validate>& imm, \
- const Value& length, const Value& initial_value, const Value& rtt, \
- Value* result) \
- F(ArrayNewDefault, const ArrayIndexImmediate<validate>& imm, \
- const Value& length, const Value& rtt, Value* result) \
- F(ArrayGet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- bool is_signed, Value* result) \
- F(ArraySet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- const Value& value) \
- F(ArrayLen, const Value& array_obj, Value* result) \
- F(I31New, const Value& input, Value* result) \
- F(I31GetS, const Value& input, Value* result) \
- F(I31GetU, const Value& input, Value* result) \
- F(RttCanon, uint32_t type_index, Value* result) \
- F(RttSub, uint32_t type_index, const Value& parent, Value* result) \
- F(RefTest, const Value& obj, const Value& rtt, Value* result) \
- F(RefCast, const Value& obj, const Value& rtt, Value* result) \
- F(AssertNull, const Value& obj, Value* result) \
- F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
- uint32_t depth) \
- F(BrOnCastFail, const Value& obj, const Value& rtt, \
- Value* result_on_fallthrough, uint32_t depth) \
- F(RefIsData, const Value& object, Value* result) \
- F(RefAsData, const Value& object, Value* result) \
- F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(RefIsFunc, const Value& object, Value* result) \
- F(RefAsFunc, const Value& object, Value* result) \
- F(BrOnFunc, const Value& object, Value* value_on_branch, uint32_t br_depth) \
- F(RefIsI31, const Value& object, Value* result) \
- F(RefAsI31, const Value& object, Value* result) \
- F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+#define INTERFACE_FUNCTIONS(F) \
+ INTERFACE_META_FUNCTIONS(F) \
+ INTERFACE_CONSTANT_FUNCTIONS(F) \
+ INTERFACE_NON_CONSTANT_FUNCTIONS(F)
+
+#define INTERFACE_META_FUNCTIONS(F) \
+ F(StartFunction) \
+ F(StartFunctionBody, Control* block) \
+ F(FinishFunction) \
+ F(OnFirstError) \
+ F(NextInstruction, WasmOpcode) \
F(Forward, const Value& from, Value* to)
+#define INTERFACE_CONSTANT_FUNCTIONS(F) \
+ F(I32Const, Value* result, int32_t value) \
+ F(I64Const, Value* result, int64_t value) \
+ F(F32Const, Value* result, float value) \
+ F(F64Const, Value* result, double value) \
+ F(S128Const, Simd128Immediate<validate>& imm, Value* result) \
+ F(RefNull, ValueType type, Value* result) \
+ F(RefFunc, uint32_t function_index, Value* result) \
+ F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(StructNewWithRtt, const StructIndexImmediate<validate>& imm, \
+ const Value& rtt, const Value args[], Value* result) \
+ F(ArrayInit, const ArrayIndexImmediate<validate>& imm, \
+ const base::Vector<Value>& elements, const Value& rtt, Value* result) \
+ F(RttCanon, uint32_t type_index, Value* result) \
+ F(RttSub, uint32_t type_index, const Value& parent, Value* result, \
+ WasmRttSubMode mode) \
+ F(DoReturn, uint32_t drop_values)
+
+#define INTERFACE_NON_CONSTANT_FUNCTIONS(F) \
+ /* Control: */ \
+ F(Block, Control* block) \
+ F(Loop, Control* block) \
+ F(Try, Control* block) \
+ F(If, const Value& cond, Control* if_block) \
+ F(FallThruTo, Control* c) \
+ F(PopControl, Control* block) \
+ /* Instructions: */ \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
+ F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
+ Value* result) \
+ F(RefAsNonNull, const Value& arg, Value* result) \
+ F(Drop) \
+ F(LocalGet, Value* result, const IndexImmediate<validate>& imm) \
+ F(LocalSet, const Value& value, const IndexImmediate<validate>& imm) \
+ F(LocalTee, const Value& value, Value* result, \
+ const IndexImmediate<validate>& imm) \
+ F(AllocateLocals, base::Vector<Value> local_values) \
+ F(DeallocateLocals, uint32_t count) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(TableGet, const Value& index, Value* result, \
+ const IndexImmediate<validate>& imm) \
+ F(TableSet, const Value& index, const Value& value, \
+ const IndexImmediate<validate>& imm) \
+ F(Trap, TrapReason reason) \
+ F(NopForTestingUnsupportedInLiftoff) \
+ F(Select, const Value& cond, const Value& fval, const Value& tval, \
+ Value* result) \
+ F(BrOrRet, uint32_t depth, uint32_t drop_values) \
+ F(BrIf, const Value& cond, uint32_t depth) \
+ F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
+ F(Else, Control* if_block) \
+ F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, Value* result) \
+ F(LoadTransform, LoadType type, LoadTransformationKind transform, \
+ const MemoryAccessImmediate<validate>& imm, const Value& index, \
+ Value* result) \
+ F(LoadLane, LoadType type, const Value& value, const Value& index, \
+ const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
+ Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value) \
+ F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value, const uint8_t laneidx) \
+ F(CurrentMemoryPages, Value* result) \
+ F(MemoryGrow, const Value& value, Value* result) \
+ F(CallDirect, const CallFunctionImmediate<validate>& imm, \
+ const Value args[], Value returns[]) \
+ F(CallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[], \
+ Value returns[]) \
+ F(CallRef, const Value& func_ref, const FunctionSig* sig, \
+ uint32_t sig_index, const Value args[], const Value returns[]) \
+ F(ReturnCallRef, const Value& func_ref, const FunctionSig* sig, \
+ uint32_t sig_index, const Value args[]) \
+ F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
+ const Value args[]) \
+ F(ReturnCallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[]) \
+ F(BrOnNull, const Value& ref_object, uint32_t depth) \
+ F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
+ F(SimdOp, WasmOpcode opcode, base::Vector<Value> args, Value* result) \
+ F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
+ const base::Vector<Value> inputs, Value* result) \
+ F(S128Const, const Simd128Immediate<validate>& imm, Value* result) \
+ F(Simd8x16ShuffleOp, const Simd128Immediate<validate>& imm, \
+ const Value& input0, const Value& input1, Value* result) \
+ F(Throw, const ExceptionIndexImmediate<validate>& imm, \
+ const base::Vector<Value>& args) \
+ F(Rethrow, Control* block) \
+ F(CatchException, const ExceptionIndexImmediate<validate>& imm, \
+ Control* block, base::Vector<Value> caught_values) \
+ F(Delegate, uint32_t depth, Control* block) \
+ F(CatchAll, Control* block) \
+ F(AtomicOp, WasmOpcode opcode, base::Vector<Value> args, \
+ const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
+ F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(DataDrop, const IndexImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& value, const Value& size) \
+ F(TableInit, const TableInitImmediate<validate>& imm, \
+ base::Vector<Value> args) \
+ F(ElemDrop, const IndexImmediate<validate>& imm) \
+ F(TableCopy, const TableCopyImmediate<validate>& imm, \
+ base::Vector<Value> args) \
+ F(TableGrow, const IndexImmediate<validate>& imm, const Value& value, \
+ const Value& delta, Value* result) \
+ F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
+ F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
+ const Value& value, const Value& count) \
+ F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
+ const Value& rtt, Value* result) \
+ F(StructGet, const Value& struct_object, \
+ const FieldImmediate<validate>& field, bool is_signed, Value* result) \
+ F(StructSet, const Value& struct_object, \
+ const FieldImmediate<validate>& field, const Value& field_value) \
+ F(ArrayNewWithRtt, const ArrayIndexImmediate<validate>& imm, \
+ const Value& length, const Value& initial_value, const Value& rtt, \
+ Value* result) \
+ F(ArrayNewDefault, const ArrayIndexImmediate<validate>& imm, \
+ const Value& length, const Value& rtt, Value* result) \
+ F(ArrayGet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ bool is_signed, Value* result) \
+ F(ArraySet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ const Value& value) \
+ F(ArrayLen, const Value& array_obj, Value* result) \
+ F(ArrayCopy, const Value& src, const Value& src_index, const Value& dst, \
+ const Value& dst_index, const Value& length) \
+ F(I31New, const Value& input, Value* result) \
+ F(I31GetS, const Value& input, Value* result) \
+ F(I31GetU, const Value& input, Value* result) \
+ F(RefTest, const Value& obj, const Value& rtt, Value* result) \
+ F(RefCast, const Value& obj, const Value& rtt, Value* result) \
+ F(AssertNull, const Value& obj, Value* result) \
+ F(BrOnCast, const Value& obj, const Value& rtt, Value* result_on_branch, \
+ uint32_t depth) \
+ F(BrOnCastFail, const Value& obj, const Value& rtt, \
+ Value* result_on_fallthrough, uint32_t depth) \
+ F(RefIsFunc, const Value& object, Value* result) \
+ F(RefIsData, const Value& object, Value* result) \
+ F(RefIsI31, const Value& object, Value* result) \
+ F(RefAsFunc, const Value& object, Value* result) \
+ F(RefAsData, const Value& object, Value* result) \
+ F(RefAsI31, const Value& object, Value* result) \
+ F(BrOnFunc, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnData, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnI31, const Value& object, Value* value_on_branch, uint32_t br_depth) \
+ F(BrOnNonFunc, const Value& object, Value* value_on_fallthrough, \
+ uint32_t br_depth) \
+ F(BrOnNonData, const Value& object, Value* value_on_fallthrough, \
+ uint32_t br_depth) \
+ F(BrOnNonI31, const Value& object, Value* value_on_fallthrough, \
+ uint32_t br_depth)
+
// Generic Wasm bytecode decoder with utilities for decoding immediates,
// lengths, etc.
-template <Decoder::ValidateFlag validate>
+template <Decoder::ValidateFlag validate, DecodingMode decoding_mode>
class WasmDecoder : public Decoder {
public:
WasmDecoder(Zone* zone, const WasmModule* module, const WasmFeatures& enabled,
@@ -1166,6 +1103,8 @@ class WasmDecoder : public Decoder {
const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, end, buffer_offset),
local_types_(zone),
+ initialized_locals_(zone),
+ locals_initializers_stack_(zone),
module_(module),
enabled_(enabled),
detected_(detected),
@@ -1300,7 +1239,7 @@ class WasmDecoder : public Decoder {
}
case kExprLocalSet:
case kExprLocalTee: {
- LocalIndexImmediate<validate> imm(decoder, pc + 1);
+ IndexImmediate<validate> imm(decoder, pc + 1, "local index");
// Unverified code might have an out-of-bounds index.
if (imm.index >= local_offsets[depth] &&
imm.index - local_offsets[depth] < locals_count) {
@@ -1327,83 +1266,70 @@ class WasmDecoder : public Decoder {
return VALIDATE(decoder->ok()) ? assigned : nullptr;
}
- inline bool Validate(const byte* pc, LocalIndexImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < num_locals())) {
- DecodeError(pc, "invalid local index: %u", imm.index);
- return false;
- }
- return true;
- }
-
- inline bool Complete(ExceptionIndexImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < module_->exceptions.size())) return false;
- imm.exception = &module_->exceptions[imm.index];
- return true;
- }
-
- inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
- if (!Complete(imm)) {
+ bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->exceptions.size())) {
DecodeError(pc, "Invalid exception index: %u", imm.index);
return false;
}
+ imm.exception = &module_->exceptions[imm.index];
return true;
}
- inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
+ bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->globals.size())) {
- DecodeError(pc, "invalid global index: %u", imm.index);
+ DecodeError(pc, "Invalid global index: %u", imm.index);
return false;
}
imm.global = &module_->globals[imm.index];
- imm.type = imm.global->type;
- return true;
- }
- inline bool Complete(StructIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_->has_struct(imm.index))) return false;
- imm.struct_type = module_->struct_type(imm.index);
- return true;
- }
+ if (decoding_mode == kInitExpression) {
+ if (!VALIDATE(!imm.global->mutability)) {
+ this->DecodeError(pc,
+ "mutable globals cannot be used in initializer "
+ "expressions");
+ return false;
+ }
+ if (!VALIDATE(imm.global->imported || this->enabled_.has_gc())) {
+ this->DecodeError(
+ pc,
+ "non-imported globals cannot be used in initializer expressions");
+ return false;
+ }
+ }
- inline bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
- if (Complete(imm)) return true;
- DecodeError(pc, "invalid struct index: %u", imm.index);
- return false;
+ return true;
}
- inline bool Validate(const byte* pc, FieldIndexImmediate<validate>& imm) {
- if (!Validate(pc, imm.struct_index)) return false;
- if (!VALIDATE(imm.index < imm.struct_index.struct_type->field_count())) {
- DecodeError(pc + imm.struct_index.length, "invalid field index: %u",
- imm.index);
+ bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_->has_struct(imm.index))) {
+ DecodeError(pc, "invalid struct index: %u", imm.index);
return false;
}
+ imm.struct_type = module_->struct_type(imm.index);
return true;
}
- inline bool Validate(const byte* pc, TypeIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_->has_type(imm.index))) {
- DecodeError(pc, "invalid type index: %u", imm.index);
+ bool Validate(const byte* pc, FieldImmediate<validate>& imm) {
+ if (!Validate(pc, imm.struct_imm)) return false;
+ if (!VALIDATE(imm.field_imm.index <
+ imm.struct_imm.struct_type->field_count())) {
+ DecodeError(pc + imm.struct_imm.length, "invalid field index: %u",
+ imm.field_imm.index);
return false;
}
return true;
}
- inline bool Complete(ArrayIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_->has_array(imm.index))) return false;
- imm.array_type = module_->array_type(imm.index);
- return true;
- }
-
- inline bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
- if (!Complete(imm)) {
+ bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_->has_array(imm.index))) {
DecodeError(pc, "invalid array index: %u", imm.index);
return false;
}
+ imm.array_type = module_->array_type(imm.index);
return true;
}
- inline bool CanReturnCall(const FunctionSig* target_sig) {
+ bool CanReturnCall(const FunctionSig* target_sig) {
if (target_sig == nullptr) return false;
size_t num_returns = sig_->return_count();
if (num_returns != target_sig->return_count()) return false;
@@ -1413,61 +1339,52 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Complete(CallFunctionImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < module_->functions.size())) return false;
- imm.sig = module_->functions[imm.index].sig;
- return true;
- }
-
- inline bool Validate(const byte* pc, CallFunctionImmediate<validate>& imm) {
- if (!Complete(imm)) {
- DecodeError(pc, "invalid function index: %u", imm.index);
+ bool Validate(const byte* pc, CallFunctionImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->functions.size())) {
+ DecodeError(pc, "function index #%u is out of bounds", imm.index);
return false;
}
+ imm.sig = module_->functions[imm.index].sig;
return true;
}
- inline bool Complete(CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
- imm.sig = module_->signature(imm.sig_index);
- return true;
- }
-
- inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
- // Validate immediate table index.
- if (!VALIDATE(imm.table_index < module_->tables.size())) {
- DecodeError(pc, "call_indirect: table index immediate out of bounds");
+ bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
+ if (!ValidateSignature(pc, imm.sig_imm)) return false;
+ // call_indirect is not behind the reftypes feature, so we have to impose
+ // the older format if reftypes is not enabled.
+ if (!VALIDATE((imm.table_imm.index == 0 && imm.table_imm.length == 1) ||
+ this->enabled_.has_reftypes())) {
+ DecodeError(pc + imm.sig_imm.length, "expected table index 0, found %u",
+ imm.table_imm.index);
+ }
+ if (!ValidateTable(pc + imm.sig_imm.length, imm.table_imm)) {
return false;
}
- ValueType table_type = module_->tables[imm.table_index].type;
+ ValueType table_type = module_->tables[imm.table_imm.index].type;
if (!VALIDATE(IsSubtypeOf(table_type, kWasmFuncRef, module_))) {
DecodeError(
pc, "call_indirect: immediate table #%u is not of a function type",
- imm.table_index);
- return false;
- }
-
- // Validate immediate signature index.
- if (!Complete(imm)) {
- DecodeError(pc, "invalid signature index: #%u", imm.sig_index);
+ imm.table_imm.index);
return false;
}
// Check that the dynamic signature for this call is a subtype of the static
// type of the table the function is defined in.
- ValueType immediate_type = ValueType::Ref(imm.sig_index, kNonNullable);
+ ValueType immediate_type = ValueType::Ref(imm.sig_imm.index, kNonNullable);
if (!VALIDATE(IsSubtypeOf(immediate_type, table_type, module_))) {
DecodeError(pc,
"call_indirect: Immediate signature #%u is not a subtype of "
"immediate table #%u",
- imm.sig_index, imm.table_index);
+ imm.sig_imm.index, imm.table_imm.index);
return false;
}
+
+ imm.sig = module_->signature(imm.sig_imm.index);
return true;
}
- inline bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
- size_t control_depth) {
+ bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
+ size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
DecodeError(pc, "invalid branch depth: %u", imm.depth);
return false;
@@ -1475,8 +1392,8 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
- size_t block_depth) {
+ bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
+ size_t block_depth) {
if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
DecodeError(pc, "invalid table count (> max br_table size): %u",
imm.table_count);
@@ -1485,8 +1402,8 @@ class WasmDecoder : public Decoder {
return checkAvailable(imm.table_count);
}
- inline bool Validate(const byte* pc, WasmOpcode opcode,
- SimdLaneImmediate<validate>& imm) {
+ bool Validate(const byte* pc, WasmOpcode opcode,
+ SimdLaneImmediate<validate>& imm) {
uint8_t num_lanes = 0;
switch (opcode) {
case kExprF64x2ExtractLane:
@@ -1531,7 +1448,7 @@ class WasmDecoder : public Decoder {
}
}
- inline bool Validate(const byte* pc, Simd128Immediate<validate>& imm) {
+ bool Validate(const byte* pc, Simd128Immediate<validate>& imm) {
uint8_t max_lane = 0;
for (uint32_t i = 0; i < kSimd128Size; ++i) {
max_lane = std::max(max_lane, imm.value[i]);
@@ -1544,67 +1461,79 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Complete(BlockTypeImmediate<validate>& imm) {
+ bool Validate(const byte* pc, BlockTypeImmediate<validate>& imm) {
if (imm.type != kWasmBottom) return true;
- if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
- imm.sig = module_->signature(imm.sig_index);
- return true;
- }
-
- inline bool Validate(const byte* pc, BlockTypeImmediate<validate>& imm) {
- if (!Complete(imm)) {
+ if (!VALIDATE(module_->has_signature(imm.sig_index))) {
DecodeError(pc, "block type index %u is not a signature definition",
imm.sig_index);
return false;
}
+ imm.sig = module_->signature(imm.sig_index);
return true;
}
- inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < module_->functions.size())) {
- DecodeError(pc, "invalid function index: %u", imm.index);
+ bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
+ if (!VALIDATE(this->module_->has_memory)) {
+ this->DecodeError(pc, "memory instruction with no memory");
return false;
}
- if (!VALIDATE(module_->functions[imm.index].declared)) {
- DecodeError(pc, "undeclared reference to function #%u", imm.index);
+ if (!VALIDATE(imm.index == uint8_t{0})) {
+ DecodeError(pc, "expected memory index 0, found %u", imm.index);
return false;
}
return true;
}
- inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_->has_memory)) {
- DecodeError(pc, "memory instruction with no memory");
+ bool Validate(const byte* pc, MemoryAccessImmediate<validate>& imm) {
+ if (!VALIDATE(this->module_->has_memory)) {
+ this->DecodeError(pc, "memory instruction with no memory");
return false;
}
return true;
}
- inline bool Validate(const byte* pc, MemoryInitImmediate<validate>& imm) {
- if (!VALIDATE(imm.data_segment_index <
- module_->num_declared_data_segments)) {
- DecodeError(pc, "invalid data segment index: %u", imm.data_segment_index);
+ bool Validate(const byte* pc, MemoryInitImmediate<validate>& imm) {
+ return ValidateDataSegment(pc, imm.data_segment) &&
+ Validate(pc + imm.data_segment.length, imm.memory);
+ }
+
+ bool Validate(const byte* pc, MemoryCopyImmediate<validate>& imm) {
+ return Validate(pc, imm.memory_src) &&
+ Validate(pc + imm.memory_src.length, imm.memory_dst);
+ }
+
+ bool Validate(const byte* pc, TableInitImmediate<validate>& imm) {
+ if (!ValidateElementSegment(pc, imm.element_segment)) return false;
+ if (!ValidateTable(pc + imm.element_segment.length, imm.table)) {
return false;
}
- if (!Validate(pc + imm.length - imm.memory.length, imm.memory))
+ ValueType elem_type =
+ module_->elem_segments[imm.element_segment.index].type;
+ if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
+ module_))) {
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table.index,
+ elem_type.name().c_str());
return false;
+ }
return true;
}
- inline bool Validate(const byte* pc, DataDropImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
- DecodeError(pc, "invalid data segment index: %u", imm.index);
+ bool Validate(const byte* pc, TableCopyImmediate<validate>& imm) {
+ if (!ValidateTable(pc, imm.table_src)) return false;
+ if (!ValidateTable(pc + imm.table_src.length, imm.table_dst)) return false;
+ ValueType src_type = module_->tables[imm.table_src.index].type;
+ if (!VALIDATE(IsSubtypeOf(
+ src_type, module_->tables[imm.table_dst.index].type, module_))) {
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table_dst.index,
+ src_type.name().c_str());
return false;
}
return true;
}
- inline bool Validate(const byte* pc, MemoryCopyImmediate<validate>& imm) {
- return Validate(pc, imm.memory_src) &&
- Validate(pc + imm.memory_src.length, imm.memory_dst);
- }
-
- inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
+ // The following Validate* functions all validate an IndexImmediate, albeit
+ // differently according to context.
+ bool ValidateTable(const byte* pc, IndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->tables.size())) {
DecodeError(pc, "invalid table index: %u", imm.index);
return false;
@@ -1612,41 +1541,54 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, TableInitImmediate<validate>& imm) {
- if (!VALIDATE(imm.elem_segment_index < module_->elem_segments.size())) {
- DecodeError(pc, "invalid element segment index: %u",
- imm.elem_segment_index);
+ bool ValidateElementSegment(const byte* pc, IndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->elem_segments.size())) {
+ DecodeError(pc, "invalid element segment index: %u", imm.index);
return false;
}
- if (!Validate(pc + imm.length - imm.table.length, imm.table)) {
+ return true;
+ }
+
+ bool ValidateLocal(const byte* pc, IndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < num_locals())) {
+ DecodeError(pc, "invalid local index: %u", imm.index);
return false;
}
- ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
- if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
- module_))) {
- DecodeError(pc, "table %u is not a super-type of %s", imm.table.index,
- elem_type.name().c_str());
+ return true;
+ }
+
+ bool ValidateType(const byte* pc, IndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_->has_type(imm.index))) {
+ DecodeError(pc, "invalid type index: %u", imm.index);
return false;
}
return true;
}
- inline bool Validate(const byte* pc, ElemDropImmediate<validate>& imm) {
- if (!VALIDATE(imm.index < module_->elem_segments.size())) {
- DecodeError(pc, "invalid element segment index: %u", imm.index);
+ bool ValidateSignature(const byte* pc, IndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_->has_signature(imm.index))) {
+ DecodeError(pc, "invalid signature index: %u", imm.index);
return false;
}
return true;
}
- inline bool Validate(const byte* pc, TableCopyImmediate<validate>& imm) {
- if (!Validate(pc, imm.table_src)) return false;
- if (!Validate(pc + imm.table_src.length, imm.table_dst)) return false;
- ValueType src_type = module_->tables[imm.table_src.index].type;
- if (!VALIDATE(IsSubtypeOf(
- src_type, module_->tables[imm.table_dst.index].type, module_))) {
- DecodeError(pc, "table %u is not a super-type of %s", imm.table_dst.index,
- src_type.name().c_str());
+ bool ValidateFunction(const byte* pc, IndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->functions.size())) {
+ DecodeError(pc, "function index #%u is out of bounds", imm.index);
+ return false;
+ }
+ if (decoding_mode == kFunctionBody &&
+ !VALIDATE(module_->functions[imm.index].declared)) {
+ DecodeError(pc, "undeclared reference to function #%u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
+ bool ValidateDataSegment(const byte* pc, IndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
+ DecodeError(pc, "invalid data segment index: %u", imm.index);
return false;
}
return true;
@@ -1713,8 +1655,7 @@ class WasmDecoder : public Decoder {
}
case kExprCallIndirect:
case kExprReturnCallIndirect: {
- CallIndirectImmediate<validate> imm(WasmFeatures::All(), decoder,
- pc + 1);
+ CallIndirectImmediate<validate> imm(decoder, pc + 1);
return 1 + imm.length;
}
case kExprCallRef:
@@ -1722,7 +1663,6 @@ class WasmDecoder : public Decoder {
case kExprDrop:
case kExprSelect:
case kExprCatchAll:
- case kExprUnwind:
return 1;
case kExprSelectWithType: {
SelectTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc + 1,
@@ -1733,7 +1673,7 @@ class WasmDecoder : public Decoder {
case kExprLocalGet:
case kExprLocalSet:
case kExprLocalTee: {
- LocalIndexImmediate<validate> imm(decoder, pc + 1);
+ IndexImmediate<validate> imm(decoder, pc + 1, "local index");
return 1 + imm.length;
}
case kExprGlobalGet:
@@ -1743,7 +1683,7 @@ class WasmDecoder : public Decoder {
}
case kExprTableGet:
case kExprTableSet: {
- TableIndexImmediate<validate> imm(decoder, pc + 1);
+ IndexImmediate<validate> imm(decoder, pc + 1, "table index");
return 1 + imm.length;
}
case kExprI32Const: {
@@ -1767,7 +1707,7 @@ class WasmDecoder : public Decoder {
return 1;
}
case kExprRefFunc: {
- FunctionIndexImmediate<validate> imm(decoder, pc + 1);
+ IndexImmediate<validate> imm(decoder, pc + 1, "function index");
return 1 + imm.length;
}
case kExprRefAsNonNull:
@@ -1811,7 +1751,8 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(decoder, pc + length);
+ IndexImmediate<validate> imm(decoder, pc + length,
+ "data segment index");
return length + imm.length;
}
case kExprMemoryCopy: {
@@ -1827,7 +1768,8 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(decoder, pc + length);
+ IndexImmediate<validate> imm(decoder, pc + length,
+ "element segment index");
return length + imm.length;
}
case kExprTableCopy: {
@@ -1837,7 +1779,7 @@ class WasmDecoder : public Decoder {
case kExprTableGrow:
case kExprTableSize:
case kExprTableFill: {
- TableIndexImmediate<validate> imm(decoder, pc + length);
+ IndexImmediate<validate> imm(decoder, pc + length, "table index");
return length + imm.length;
}
default:
@@ -1916,7 +1858,7 @@ class WasmDecoder : public Decoder {
case kExprStructGetS:
case kExprStructGetU:
case kExprStructSet: {
- FieldIndexImmediate<validate> imm(decoder, pc + length);
+ FieldImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
case kExprArrayNewWithRtt:
@@ -1929,6 +1871,12 @@ class WasmDecoder : public Decoder {
ArrayIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayCopy: {
+ ArrayIndexImmediate<validate> dst_imm(decoder, pc + length);
+ ArrayIndexImmediate<validate> src_imm(decoder,
+ pc + length + dst_imm.length);
+ return length + dst_imm.length + src_imm.length;
+ }
case kExprBrOnCast:
case kExprBrOnCastFail:
case kExprBrOnData:
@@ -1938,8 +1886,9 @@ class WasmDecoder : public Decoder {
return length + imm.length;
}
case kExprRttCanon:
- case kExprRttSub: {
- TypeIndexImmediate<validate> imm(decoder, pc + length);
+ case kExprRttSub:
+ case kExprRttFreshSub: {
+ IndexImmediate<validate> imm(decoder, pc + length, "type index");
return length + imm.length;
}
case kExprI31New:
@@ -2031,19 +1980,19 @@ class WasmDecoder : public Decoder {
return {0, 1};
case kExprCallFunction: {
CallFunctionImmediate<validate> imm(this, pc + 1);
- CHECK(Complete(imm));
+ CHECK(Validate(pc + 1, imm));
return {imm.sig->parameter_count(), imm.sig->return_count()};
}
case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this->enabled_, this, pc + 1);
- CHECK(Complete(imm));
+ CallIndirectImmediate<validate> imm(this, pc + 1);
+ CHECK(Validate(pc + 1, imm));
// Indirect calls pop an additional argument for the table index.
return {imm.sig->parameter_count() + 1,
imm.sig->return_count()};
}
case kExprThrow: {
ExceptionIndexImmediate<validate> imm(this, pc + 1);
- CHECK(Complete(imm));
+ CHECK(Validate(pc + 1, imm));
DCHECK_EQ(0, imm.exception->sig->return_count());
return {imm.exception->sig->parameter_count(), 0};
}
@@ -2056,7 +2005,6 @@ class WasmDecoder : public Decoder {
case kExprCatch:
case kExprCatchAll:
case kExprDelegate:
- case kExprUnwind:
case kExprRethrow:
case kExprNop:
case kExprNopForTestingUnsupportedInLiftoff:
@@ -2102,6 +2050,7 @@ class WasmDecoder : public Decoder {
case kExprI31GetU:
case kExprArrayLen:
case kExprRttSub:
+ case kExprRttFreshSub:
return {1, 1};
case kExprStructSet:
return {2, 0};
@@ -2116,13 +2065,15 @@ class WasmDecoder : public Decoder {
return {2, 1};
case kExprArraySet:
return {3, 0};
+ case kExprArrayCopy:
+ return {5, 0};
case kExprRttCanon:
return {0, 1};
case kExprArrayNewWithRtt:
return {3, 1};
case kExprStructNewWithRtt: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- this->Complete(imm);
+ StructIndexImmediate<validate> imm(this, pc + 2);
+ CHECK(Validate(pc + 2, imm));
return {imm.struct_type->field_count() + 1, 1};
}
default:
@@ -2138,6 +2089,47 @@ class WasmDecoder : public Decoder {
// clang-format on
}
+ bool is_local_initialized(uint32_t local_index) {
+ return initialized_locals_[local_index];
+ }
+
+ void set_local_initialized(uint32_t local_index) {
+ if (!enabled_.has_nn_locals()) return;
+ // This implicitly covers defaultable locals too (which are always
+ // initialized).
+ if (is_local_initialized(local_index)) return;
+ initialized_locals_[local_index] = true;
+ locals_initializers_stack_.push_back(local_index);
+ }
+
+ uint32_t locals_initialization_stack_depth() const {
+ return static_cast<uint32_t>(locals_initializers_stack_.size());
+ }
+
+ void RollbackLocalsInitialization(uint32_t previous_stack_height) {
+ if (!enabled_.has_nn_locals()) return;
+ while (locals_initializers_stack_.size() > previous_stack_height) {
+ uint32_t local_index = locals_initializers_stack_.back();
+ locals_initializers_stack_.pop_back();
+ initialized_locals_[local_index] = false;
+ }
+ }
+
+ void InitializeInitializedLocalsTracking(int non_defaultable_locals) {
+ initialized_locals_.assign(num_locals_, false);
+ // Parameters count as initialized...
+ const uint32_t num_params = static_cast<uint32_t>(sig_->parameter_count());
+ for (uint32_t i = 0; i < num_params; i++) {
+ initialized_locals_[i] = true;
+ }
+ // ...and so do defaultable locals.
+ for (uint32_t i = num_params; i < num_locals_; i++) {
+ if (local_types_[i].is_defaultable()) initialized_locals_[i] = true;
+ }
+ if (non_defaultable_locals == 0) return;
+ locals_initializers_stack_.reserve(non_defaultable_locals);
+ }
+
// The {Zone} is implicitly stored in the {ZoneAllocator} which is part of
// this {ZoneVector}. Hence save one field and just get it from there if
// needed (see {zone()} accessor below).
@@ -2147,18 +2139,23 @@ class WasmDecoder : public Decoder {
// than to load the start and end pointer from a vector, subtract and shift).
uint32_t num_locals_ = 0;
+ // Indicates whether the local with the given index is currently initialized.
+ // Entries for defaultable locals are meaningless; we have a bit for each
+ // local because we expect that the effort required to densify this bit
+ // vector would more than offset the memory savings.
+ ZoneVector<bool> initialized_locals_;
+ // Keeps track of initializing assignments to non-defaultable locals that
+ // happened, so they can be discarded at the end of the current block.
+ // Contains no duplicates, so the size of this stack is bounded (and pre-
+ // allocated) to the number of non-defaultable locals in the function.
+ ZoneVector<uint32_t> locals_initializers_stack_;
+
const WasmModule* module_;
const WasmFeatures enabled_;
WasmFeatures* detected_;
const FunctionSig* sig_;
};
-template <Decoder::ValidateFlag validate>
-MemoryAccessImmediate<validate>::MemoryAccessImmediate(
- WasmDecoder<validate>* decoder, const byte* pc, uint32_t max_alignment)
- : MemoryAccessImmediate(decoder, pc, max_alignment,
- decoder->module_->is_memory64) {}
-
// Only call this in contexts where {current_code_reachable_and_ok_} is known to
// hold.
#define CALL_INTERFACE(name, ...) \
@@ -2187,11 +2184,12 @@ MemoryAccessImmediate<validate>::MemoryAccessImmediate(
} \
} while (false)
-template <Decoder::ValidateFlag validate, typename Interface>
-class WasmFullDecoder : public WasmDecoder<validate> {
+template <Decoder::ValidateFlag validate, typename Interface,
+ DecodingMode decoding_mode = kFunctionBody>
+class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
- using ArgVector = Vector<Value>;
+ using ArgVector = base::Vector<Value>;
using ReturnVector = base::SmallVector<Value, 2>;
// All Value types should be trivially copyable for performance. We push, pop,
@@ -2203,8 +2201,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
WasmFullDecoder(Zone* zone, const WasmModule* module,
const WasmFeatures& enabled, WasmFeatures* detected,
const FunctionBody& body, InterfaceArgs&&... interface_args)
- : WasmDecoder<validate>(zone, module, enabled, detected, body.sig,
- body.start, body.end, body.offset),
+ : WasmDecoder<validate, decoding_mode>(zone, module, enabled, detected,
+ body.sig, body.start, body.end,
+ body.offset),
interface_(std::forward<InterfaceArgs>(interface_args)...),
control_(zone) {}
@@ -2222,14 +2221,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeLocals(this->pc(), &locals_length, params_count);
if (this->failed()) return TraceFailed();
this->consume_bytes(locals_length);
+ int non_defaultable = 0;
for (uint32_t index = params_count; index < this->num_locals(); index++) {
- if (!VALIDATE(this->local_type(index).is_defaultable())) {
+ if (!VALIDATE(this->enabled_.has_nn_locals() ||
+ this->local_type(index).is_defaultable())) {
this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
this->local_type(index).name().c_str());
return this->TraceFailed();
}
+ if (!this->local_type(index).is_defaultable()) non_defaultable++;
}
+ this->InitializeInitializedLocalsTracking(non_defaultable);
// Cannot use CALL_INTERFACE_* macros because control is empty.
interface().StartFunction(this);
@@ -2316,10 +2319,73 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- inline uint32_t pc_relative_offset() const {
+ uint32_t pc_relative_offset() const {
return this->pc_offset() - first_instruction_offset;
}
+ void DecodeFunctionBody() {
+ TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
+ this->end(), this->pc_offset(),
+ static_cast<int>(this->end() - this->start()));
+
+ // Set up initial function block.
+ {
+ DCHECK(control_.empty());
+ constexpr uint32_t kLocalsCount = 0;
+ constexpr uint32_t kStackDepth = 0;
+ constexpr uint32_t kInitStackDepth = 0;
+ control_.emplace_back(kControlBlock, kLocalsCount, kStackDepth,
+ kInitStackDepth, this->pc_, kReachable);
+ Control* c = &control_.back();
+ if (decoding_mode == kFunctionBody) {
+ InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
+ InitMerge(&c->end_merge,
+ static_cast<uint32_t>(this->sig_->return_count()),
+ [&](uint32_t i) {
+ return Value{this->pc_, this->sig_->GetReturn(i)};
+ });
+ } else {
+ DCHECK_EQ(this->sig_->parameter_count(), 0);
+ DCHECK_EQ(this->sig_->return_count(), 1);
+ c->start_merge.arity = 0;
+ c->end_merge.arity = 1;
+ c->end_merge.vals.first = Value{this->pc_, this->sig_->GetReturn(0)};
+ }
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(StartFunctionBody, c);
+ }
+
+ first_instruction_offset = this->pc_offset();
+ // Decode the function body.
+ while (this->pc_ < this->end_) {
+ // Most operations only grow the stack by at least one element (unary and
+ // binary operations, local.get, constants, ...). Thus check that there is
+ // enough space for those operations centrally, and avoid any bounds
+ // checks in those operations.
+ EnsureStackSpace(1);
+ uint8_t first_byte = *this->pc_;
+ WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
+ int len;
+ // Allowing two of the most common decoding functions to get inlined
+ // appears to be the sweet spot.
+ // Handling _all_ opcodes via a giant switch-statement has been tried
+ // and found to be slower than calling through the handler table.
+ if (opcode == kExprLocalGet) {
+ len = WasmFullDecoder::DecodeLocalGet(this, opcode);
+ } else if (opcode == kExprI32Const) {
+ len = WasmFullDecoder::DecodeI32Const(this, opcode);
+ } else {
+ OpcodeHandler handler = GetOpcodeHandler(first_byte);
+ len = (*handler)(this, opcode);
+ }
+ this->pc_ += len;
+ }
+
+ if (!VALIDATE(this->pc_ == this->end_)) {
+ this->DecodeError("Beyond end of code");
+ }
+ }
+
private:
uint32_t first_instruction_offset = 0;
Interface interface_;
@@ -2344,14 +2410,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return Value{pc, kWasmBottom};
}
- bool CheckHasMemory() {
- if (!VALIDATE(this->module_->has_memory)) {
- this->DecodeError(this->pc_ - 1, "memory instruction with no memory");
- return false;
- }
- return true;
- }
-
bool CheckSimdFeatureFlagOpcode(WasmOpcode opcode) {
if (!FLAG_experimental_wasm_relaxed_simd &&
WasmOpcodes::IsRelaxedSimdOpcode(opcode)) {
@@ -2363,6 +2421,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return true;
}
+ MemoryAccessImmediate<validate> MakeMemoryAccessImmediate(
+ uint32_t pc_offset, uint32_t max_alignment) {
+ return MemoryAccessImmediate<validate>(
+ this, this->pc_ + pc_offset, max_alignment, this->module_->is_memory64);
+ }
+
#ifdef DEBUG
class TraceLine {
public:
@@ -2390,8 +2454,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
va_list va_args;
va_start(va_args, format);
size_t remaining_len = kMaxLen - len_;
- Vector<char> remaining_msg_space(buffer_ + len_, remaining_len);
- int len = VSNPrintF(remaining_msg_space, format, va_args);
+ base::Vector<char> remaining_msg_space(buffer_ + len_, remaining_len);
+ int len = base::VSNPrintF(remaining_msg_space, format, va_args);
va_end(va_args);
len_ += len < 0 ? remaining_len : len;
}
@@ -2417,7 +2481,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kControlIfElse:
case kControlTryCatch:
case kControlTryCatchAll:
- case kControlTryUnwind:
case kControlLet: // TODO(7748): Implement
break;
}
@@ -2505,7 +2568,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ExceptionIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
ArgVector args = PeekArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Throw, imm, VectorOf(args));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Throw, imm, base::VectorOf(args));
DropArgs(imm.exception->ToFunctionSig());
EndControl();
return 1 + imm.length;
@@ -2541,10 +2604,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("catch after catch-all for try");
return 0;
}
- if (!VALIDATE(!c->is_try_unwind())) {
- this->DecodeError("catch after unwind for try");
- return 0;
- }
FallThrough();
c->kind = kControlTryCatch;
// TODO(jkummerow): Consider moving the stack manipulation after the
@@ -2557,7 +2616,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
Push(CreateValue(sig->GetParam(i)));
}
- Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
+ base::Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
current_code_reachable_and_ok_ = this->ok() && c->reachable();
@@ -2581,8 +2640,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"delegate target must be a try block or the function block");
return 0;
}
- if (target->is_try_catch() || target->is_try_catchall() ||
- target->is_try_unwind()) {
+ if (target->is_try_catch() || target->is_try_catchall()) {
this->DecodeError(
"cannot delegate inside the catch handler of the target");
return 0;
@@ -2607,10 +2665,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("catch-all already present for try");
return 0;
}
- if (!VALIDATE(!c->is_try_unwind())) {
- this->error("cannot have catch-all after unwind");
- return 0;
- }
FallThrough();
c->kind = kControlTryCatchAll;
c->reachability = control_at(1)->innerReachability();
@@ -2621,29 +2675,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 1;
}
- DECODE(Unwind) {
- CHECK_PROTOTYPE_OPCODE(eh);
- DCHECK(!control_.empty());
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->DecodeError("unwind does not match a try");
- return 0;
- }
- if (!VALIDATE(!c->is_try_catch() && !c->is_try_catchall() &&
- !c->is_try_unwind())) {
- this->error("catch, catch-all or unwind already present for try");
- return 0;
- }
- FallThrough();
- c->kind = kControlTryUnwind;
- c->reachability = control_at(1)->innerReachability();
- current_catch_ = c->previous_catch; // Pop try scope.
- CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
- stack_end_ = stack_ + c->stack_depth;
- current_code_reachable_and_ok_ = this->ok() && c->reachable();
- return 1;
- }
-
DECODE(BrOnNull) {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
@@ -2740,14 +2771,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
ArgVector let_local_values =
PeekArgs(static_cast<uint32_t>(imm.in_arity()),
- VectorOf(this->local_types_.data(), new_locals_count));
+ base::VectorOf(this->local_types_.data(), new_locals_count));
ArgVector args = PeekArgs(imm.sig, new_locals_count);
Control* let_block = PushControl(kControlLet, new_locals_count,
let_local_values.length() + args.length());
SetBlockType(let_block, imm, args.begin());
CALL_INTERFACE_IF_OK_AND_REACHABLE(Block, let_block);
CALL_INTERFACE_IF_OK_AND_REACHABLE(AllocateLocals,
- VectorOf(let_local_values));
+ base::VectorOf(let_local_values));
Drop(new_locals_count); // Drop {let_local_values}.
DropArgs(imm.sig); // Drop {args}.
PushMergeValues(let_block, &let_block->start_merge);
@@ -2806,40 +2837,43 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(End) {
DCHECK(!control_.empty());
- Control* c = &control_.back();
- if (c->is_try_catch()) {
- // Emulate catch-all + re-throw.
- FallThrough();
- c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
- current_code_reachable_and_ok_ =
- this->ok() && control_.back().reachable();
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
- EndControl();
- PopControl();
- return 1;
- }
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->DecodeError("missing catch or catch-all in try");
- return 0;
- }
- if (c->is_onearmed_if()) {
- if (!VALIDATE(TypeCheckOneArmedIf(c))) return 0;
- }
- if (c->is_try_unwind()) {
- // Unwind implicitly rethrows at the end.
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
- EndControl();
- }
+ if (decoding_mode == kFunctionBody) {
+ Control* c = &control_.back();
+ if (c->is_incomplete_try()) {
+ // Catch-less try, fall through to the implicit catch-all.
+ c->kind = kControlTryCatch;
+ current_catch_ = c->previous_catch; // Pop try scope.
+ }
+ if (c->is_try_catch()) {
+ // Emulate catch-all + re-throw.
+ FallThrough();
+ c->reachability = control_at(1)->innerReachability();
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
+ current_code_reachable_and_ok_ =
+ this->ok() && control_.back().reachable();
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
+ EndControl();
+ PopControl();
+ return 1;
+ }
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(TypeCheckOneArmedIf(c))) return 0;
+ }
- if (c->is_let()) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(DeallocateLocals, c->locals_count);
- this->local_types_.erase(this->local_types_.begin(),
- this->local_types_.begin() + c->locals_count);
- this->num_locals_ -= c->locals_count;
+ if (c->is_let()) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(DeallocateLocals, c->locals_count);
+ this->local_types_.erase(this->local_types_.begin(),
+ this->local_types_.begin() + c->locals_count);
+ this->num_locals_ -= c->locals_count;
+ }
}
if (control_.size() == 1) {
+ // We need to call this first because the interface might set
+ // {this->end_}, making the next check pass.
+ DoReturn<kStrictCounting, decoding_mode == kFunctionBody
+ ? kFallthroughMerge
+ : kInitExprMerge>();
// If at the last (implicit) control, check we are at end.
if (!VALIDATE(this->pc_ + 1 == this->end_)) {
this->DecodeError(this->pc_ + 1, "trailing code after function end");
@@ -2848,7 +2882,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// The result of the block is the return value.
trace_msg->Append("\n" TRACE_INST_FORMAT, startrel(this->pc_),
"(implicit) return");
- DoReturn<kStrictCounting, kFallthroughMerge>();
control_.clear();
return 1;
}
@@ -3052,8 +3085,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(RefFunc) {
CHECK_PROTOTYPE_OPCODE(reftypes);
- FunctionIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + 1, "function index");
+ if (!this->ValidateFunction(this->pc_ + 1, imm)) return 0;
HeapType heap_type(this->enabled_.has_typed_funcref()
? this->module_->functions[imm.index].sig_index
: HeapType::kFunc);
@@ -3088,9 +3121,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- DECODE(LocalGet) {
- LocalIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ V8_INLINE DECODE(LocalGet) {
+ IndexImmediate<validate> imm(this, this->pc_ + 1, "local index");
+ if (!this->ValidateLocal(this->pc_ + 1, imm)) return 0;
+ if (!VALIDATE(!this->enabled_.has_nn_locals() ||
+ this->is_local_initialized(imm.index))) {
+ this->DecodeError(this->pc_, "uninitialized non-defaultable local: %u",
+ imm.index);
+ return 0;
+ }
Value value = CreateValue(this->local_type(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalGet, &value, imm);
Push(value);
@@ -3098,23 +3137,25 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(LocalSet) {
- LocalIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + 1, "local index");
+ if (!this->ValidateLocal(this->pc_ + 1, imm)) return 0;
Value value = Peek(0, 0, this->local_type(imm.index));
CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalSet, value, imm);
Drop(value);
+ this->set_local_initialized(imm.index);
return 1 + imm.length;
}
DECODE(LocalTee) {
- LocalIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + 1, "local index");
+ if (!this->ValidateLocal(this->pc_ + 1, imm)) return 0;
ValueType local_type = this->local_type(imm.index);
Value value = Peek(0, 0, local_type);
Value result = CreateValue(local_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(LocalTee, value, &result, imm);
Drop(value);
Push(result);
+ this->set_local_initialized(imm.index);
return 1 + imm.length;
}
@@ -3128,7 +3169,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(GlobalGet) {
GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value result = CreateValue(imm.type);
+ Value result = CreateValue(imm.global->type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(GlobalGet, &result, imm);
Push(result);
return 1 + imm.length;
@@ -3141,7 +3182,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("immutable global #%u cannot be assigned", imm.index);
return 0;
}
- Value value = Peek(0, 0, imm.type);
+ Value value = Peek(0, 0, imm.global->type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(GlobalSet, value, imm);
Drop(value);
return 1 + imm.length;
@@ -3149,8 +3190,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(TableGet) {
CHECK_PROTOTYPE_OPCODE(reftypes);
- TableIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + 1, "table index");
+ if (!this->ValidateTable(this->pc_ + 1, imm)) return 0;
Value index = Peek(0, 0, kWasmI32);
Value result = CreateValue(this->module_->tables[imm.index].type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableGet, index, &result, imm);
@@ -3161,8 +3202,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(TableSet) {
CHECK_PROTOTYPE_OPCODE(reftypes);
- TableIndexImmediate<validate> imm(this, this->pc_ + 1);
- if (!this->Validate(this->pc_ + 1, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + 1, "table index");
+ if (!this->ValidateTable(this->pc_ + 1, imm)) return 0;
Value value = Peek(0, 1, this->module_->tables[imm.index].type);
Value index = Peek(1, 0, kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableSet, index, value, imm);
@@ -3205,8 +3246,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(MemoryGrow) {
- if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_ + 1, imm)) return 0;
// This opcode will not be emitted by the asm translator.
DCHECK_EQ(kWasmOrigin, this->module_->origin);
ValueType mem_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
@@ -3219,8 +3260,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(MemorySize) {
- if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_ + 1, imm)) return 0;
ValueType result_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value result = CreateValue(result_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(CurrentMemoryPages, &result);
@@ -3241,9 +3282,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(CallIndirect) {
- CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
+ CallIndirectImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
- Value index = Peek(0, 0, kWasmI32);
+ Value index =
+ Peek(0, static_cast<int>(imm.sig->parameter_count()), kWasmI32);
ArgVector args = PeekArgs(imm.sig, 1);
ReturnVector returns = CreateReturnValues(imm.sig);
CALL_INTERFACE_IF_OK_AND_REACHABLE(CallIndirect, index, imm, args.begin(),
@@ -3272,7 +3314,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(ReturnCallIndirect) {
CHECK_PROTOTYPE_OPCODE(return_call);
- CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
+ CallIndirectImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
this->DecodeError("%s: %s",
@@ -3353,7 +3395,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Simd) {
CHECK_PROTOTYPE_OPCODE(simd);
- if (!CheckHardwareSupportsSimd() && !FLAG_wasm_simd_ssse3_codegen) {
+ if (!CheckHardwareSupportsSimd()) {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on missing Wasm SIMD support");
}
@@ -3407,6 +3449,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#undef DECODE
+ static int NonConstError(WasmFullDecoder* decoder, WasmOpcode opcode) {
+ decoder->DecodeError("opcode %s is not allowed in init. expressions",
+ WasmOpcodes::OpcodeName(opcode));
+ return 0;
+ }
+
using OpcodeHandler = int (*)(WasmFullDecoder*, WasmOpcode);
// Ideally we would use template specialization for the different opcodes, but
@@ -3416,7 +3464,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Hence just list all implementations explicitly here, which also gives more
// freedom to use the same implementation for different opcodes.
#define DECODE_IMPL(opcode) DECODE_IMPL2(kExpr##opcode, opcode)
-#define DECODE_IMPL2(opcode, name) \
+#define DECODE_IMPL2(opcode, name) \
+ if (idx == opcode) { \
+ if (decoding_mode == kInitExpression) { \
+ return &WasmFullDecoder::NonConstError; \
+ } else { \
+ return &WasmFullDecoder::Decode##name; \
+ } \
+ }
+#define DECODE_IMPL_CONST(opcode) DECODE_IMPL_CONST2(kExpr##opcode, opcode)
+#define DECODE_IMPL_CONST2(opcode, name) \
if (idx == opcode) return &WasmFullDecoder::Decode##name
static constexpr OpcodeHandler GetOpcodeHandlerTableEntry(size_t idx) {
@@ -3431,14 +3488,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE_IMPL(Catch);
DECODE_IMPL(Delegate);
DECODE_IMPL(CatchAll);
- DECODE_IMPL(Unwind);
DECODE_IMPL(BrOnNull);
DECODE_IMPL(BrOnNonNull);
DECODE_IMPL(Let);
DECODE_IMPL(Loop);
DECODE_IMPL(If);
DECODE_IMPL(Else);
- DECODE_IMPL(End);
+ DECODE_IMPL_CONST(End);
DECODE_IMPL(Select);
DECODE_IMPL(SelectWithType);
DECODE_IMPL(Br);
@@ -3447,19 +3503,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE_IMPL(Return);
DECODE_IMPL(Unreachable);
DECODE_IMPL(NopForTestingUnsupportedInLiftoff);
- DECODE_IMPL(I32Const);
- DECODE_IMPL(I64Const);
- DECODE_IMPL(F32Const);
- DECODE_IMPL(F64Const);
- DECODE_IMPL(RefNull);
+ DECODE_IMPL_CONST(I32Const);
+ DECODE_IMPL_CONST(I64Const);
+ DECODE_IMPL_CONST(F32Const);
+ DECODE_IMPL_CONST(F64Const);
+ DECODE_IMPL_CONST(RefNull);
DECODE_IMPL(RefIsNull);
- DECODE_IMPL(RefFunc);
+ DECODE_IMPL_CONST(RefFunc);
DECODE_IMPL(RefAsNonNull);
DECODE_IMPL(LocalGet);
DECODE_IMPL(LocalSet);
DECODE_IMPL(LocalTee);
DECODE_IMPL(Drop);
- DECODE_IMPL(GlobalGet);
+ DECODE_IMPL_CONST(GlobalGet);
DECODE_IMPL(GlobalSet);
DECODE_IMPL(TableGet);
DECODE_IMPL(TableSet);
@@ -3478,9 +3534,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE_IMPL(CallRef);
DECODE_IMPL(ReturnCallRef);
DECODE_IMPL2(kNumericPrefix, Numeric);
- DECODE_IMPL2(kSimdPrefix, Simd);
+ DECODE_IMPL_CONST2(kSimdPrefix, Simd);
DECODE_IMPL2(kAtomicPrefix, Atomic);
- DECODE_IMPL2(kGCPrefix, GC);
+ DECODE_IMPL_CONST2(kGCPrefix, GC);
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) DECODE_IMPL(name);
FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
@@ -3496,57 +3552,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return kOpcodeHandlers[opcode];
}
- void DecodeFunctionBody() {
- TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
- this->end(), this->pc_offset(),
- static_cast<int>(this->end() - this->start()));
-
- // Set up initial function block.
- {
- DCHECK(control_.empty());
- control_.emplace_back(kControlBlock, 0, 0, this->pc_, kReachable);
- Control* c = &control_.back();
- InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
- InitMerge(&c->end_merge,
- static_cast<uint32_t>(this->sig_->return_count()),
- [&](uint32_t i) {
- return Value{this->pc_, this->sig_->GetReturn(i)};
- });
- CALL_INTERFACE_IF_OK_AND_REACHABLE(StartFunctionBody, c);
- }
-
- first_instruction_offset = this->pc_offset();
- // Decode the function body.
- while (this->pc_ < this->end_) {
- // Most operations only grow the stack by at least one element (unary and
- // binary operations, local.get, constants, ...). Thus check that there is
- // enough space for those operations centrally, and avoid any bounds
- // checks in those operations.
- EnsureStackSpace(1);
- uint8_t first_byte = *this->pc_;
- WasmOpcode opcode = static_cast<WasmOpcode>(first_byte);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(NextInstruction, opcode);
- int len;
- // Allowing two of the most common decoding functions to get inlined
- // appears to be the sweet spot.
- // Handling _all_ opcodes via a giant switch-statement has been tried
- // and found to be slower than calling through the handler table.
- if (opcode == kExprLocalGet) {
- len = WasmFullDecoder::DecodeLocalGet(this, opcode);
- } else if (opcode == kExprI32Const) {
- len = WasmFullDecoder::DecodeI32Const(this, opcode);
- } else {
- OpcodeHandler handler = GetOpcodeHandler(first_byte);
- len = (*handler)(this, opcode);
- }
- this->pc_ += len;
- }
-
- if (!VALIDATE(this->pc_ == this->end_)) {
- this->DecodeError("Beyond end of code");
- }
- }
-
void EndControl() {
DCHECK(!control_.empty());
Control* current = &control_.back();
@@ -3578,30 +3583,60 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return Value{pc, imm.out_type(i)};
});
InitMerge(&c->start_merge, imm.in_arity(),
- [args](uint32_t i) { return args[i]; });
- }
-
- V8_INLINE void EnsureStackArguments(int count) {
+#ifdef DEBUG
+ [this, pc, &imm, args](uint32_t i) {
+#else
+ [pc, &imm, args](uint32_t i) {
+#endif
+ // The merge needs to be instantiated with Values of the correct
+ // type even in the presence of bottom values (i.e. in
+ // unreachable code). Since bottom Values will never be used for
+ // code generation, we can safely instantiate new ones in that
+ // case.
+ DCHECK_IMPLIES(current_code_reachable_and_ok_,
+ args[i].type != kWasmBottom);
+ // Warning: Do not use a ternary operator here, as gcc bugs out
+ // (as of version 10.2.1).
+ if (args[i].type != kWasmBottom) {
+ return args[i];
+ } else {
+ return Value{pc, imm.in_type(i)};
+ }
+ });
+ }
+
+ // In reachable code, check if there are at least {count} values on the stack.
+ // In unreachable code, if there are less than {count} values on the stack,
+ // insert a number of unreachable values underneath the current values equal
+ // to the difference, and return that number.
+ V8_INLINE int EnsureStackArguments(int count) {
uint32_t limit = control_.back().stack_depth;
- if (stack_size() >= count + limit) return;
- EnsureStackArguments_Slow(count, limit);
+ if (V8_LIKELY(stack_size() >= count + limit)) return 0;
+ return EnsureStackArguments_Slow(count, limit);
}
- V8_NOINLINE void EnsureStackArguments_Slow(int count, uint32_t limit) {
+ V8_NOINLINE int EnsureStackArguments_Slow(int count, uint32_t limit) {
if (!VALIDATE(control_.back().unreachable())) {
int index = count - stack_size() - 1;
NotEnoughArgumentsError(index);
}
- // Silently create unreachable values out of thin air. Since we push them
- // onto the stack, while conceptually we should be inserting them under
- // any existing elements, we have to avoid validation failures that would
- // be caused by finding non-unreachable values in the wrong slot, so we
- // replace the entire current scope's values.
- Drop(static_cast<int>(stack_size() - limit));
- EnsureStackSpace(count + limit - stack_size());
- while (stack_size() < count + limit) {
- Push(UnreachableValue(this->pc_));
+ // Silently create unreachable values out of thin air underneath the
+ // existing stack values. To do so, we have to move existing stack values
+ // upwards in the stack, then instantiate the new Values as
+ // {UnreachableValue}.
+ int current_values = stack_size() - limit;
+ int additional_values = count - current_values;
+ DCHECK_GT(additional_values, 0);
+ EnsureStackSpace(additional_values);
+ stack_end_ += additional_values;
+ Value* stack_base = stack_value(current_values + additional_values);
+ for (int i = current_values - 1; i >= 0; i--) {
+ stack_base[additional_values + i] = stack_base[i];
}
+ for (int i = 0; i < additional_values; i++) {
+ stack_base[i] = UnreachableValue(this->pc_);
+ }
+ return additional_values;
}
// Peeks arguments as required by signature.
@@ -3615,6 +3650,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
return args;
}
+ // Drops a number of stack elements equal to the {sig}'s parameter count (0 if
+ // {sig} is null), or all of them if less are present.
V8_INLINE void DropArgs(const FunctionSig* sig) {
int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
Drop(count);
@@ -3630,12 +3667,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
return args;
}
+ // Drops a number of stack elements equal to the struct's field count, or all
+ // of them if less are present.
V8_INLINE void DropArgs(const StructType* type) {
Drop(static_cast<int>(type->field_count()));
}
V8_INLINE ArgVector PeekArgs(uint32_t base_index,
- Vector<ValueType> arg_types) {
+ base::Vector<ValueType> arg_types) {
int size = static_cast<int>(arg_types.size());
EnsureStackArguments(size);
ArgVector args(stack_value(size), arg_types.size());
@@ -3661,8 +3700,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t stack_depth =
stack_size() >= drop_values ? stack_size() - drop_values : 0;
stack_depth = std::max(stack_depth, control_.back().stack_depth);
- control_.emplace_back(kind, locals_count, stack_depth, this->pc_,
- reachability);
+ uint32_t init_stack_depth = this->locals_initialization_stack_depth();
+ control_.emplace_back(kind, locals_count, stack_depth, init_stack_depth,
+ this->pc_, reachability);
current_code_reachable_and_ok_ = this->ok() && reachability == kReachable;
return &control_.back();
}
@@ -3675,8 +3715,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(PopControl, c);
- // A loop just leaves the values on the stack.
- if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
+ // - In non-unreachable code, a loop just leaves the values on the stack.
+ // - In unreachable code, it is not guaranteed that we have Values of the
+ // correct types on the stack, so we have to make sure we do. Their values
+ // do not matter, so we might as well push the (uninitialized) values of
+ // the loop's end merge.
+ if (!c->is_loop() || c->unreachable()) {
+ PushMergeValues(c, &c->end_merge);
+ }
+ this->RollbackLocalsInitialization(c->init_stack_depth);
bool parent_reached =
c->reachable() || c->end_merge.reached || c->is_onearmed_if();
@@ -3688,9 +3735,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
int DecodeLoadMem(LoadType type, int prefix_len = 1) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
- type.size_log_2());
+ MemoryAccessImmediate<validate> imm =
+ MakeMemoryAccessImmediate(prefix_len, type.size_log_2());
+ if (!this->Validate(this->pc_ + prefix_len, imm)) return 0;
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(0, 0, index_type);
Value result = CreateValue(type.value_type());
@@ -3702,12 +3749,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int DecodeLoadTransformMem(LoadType type, LoadTransformationKind transform,
uint32_t opcode_length) {
- if (!CheckHasMemory()) return 0;
// Load extends always load 64-bits.
uint32_t max_alignment =
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
- MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
- max_alignment);
+ MemoryAccessImmediate<validate> imm =
+ MakeMemoryAccessImmediate(opcode_length, max_alignment);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(0, 0, index_type);
Value result = CreateValue(kWasmS128);
@@ -3719,9 +3766,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
int DecodeLoadLane(WasmOpcode opcode, LoadType type, uint32_t opcode_length) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
- type.size_log_2());
+ MemoryAccessImmediate<validate> mem_imm =
+ MakeMemoryAccessImmediate(opcode_length, type.size_log_2());
+ if (!this->Validate(this->pc_ + opcode_length, mem_imm)) return 0;
SimdLaneImmediate<validate> lane_imm(
this, this->pc_ + opcode_length + mem_imm.length);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
@@ -3738,9 +3785,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int DecodeStoreLane(WasmOpcode opcode, StoreType type,
uint32_t opcode_length) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
- type.size_log_2());
+ MemoryAccessImmediate<validate> mem_imm =
+ MakeMemoryAccessImmediate(opcode_length, type.size_log_2());
+ if (!this->Validate(this->pc_ + opcode_length, mem_imm)) return 0;
SimdLaneImmediate<validate> lane_imm(
this, this->pc_ + opcode_length + mem_imm.length);
if (!this->Validate(this->pc_ + opcode_length, opcode, lane_imm)) return 0;
@@ -3754,9 +3801,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
- if (!CheckHasMemory()) return 0;
- MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
- store.size_log_2());
+ MemoryAccessImmediate<validate> imm =
+ MakeMemoryAccessImmediate(prefix_len, store.size_log_2());
+ if (!this->Validate(this->pc_ + prefix_len, imm)) return 0;
Value value = Peek(0, 1, store.value_type());
ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
Value index = Peek(1, 0, index_type);
@@ -3780,7 +3827,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value inputs[] = {Peek(0, 0, kWasmS128)};
Value result = CreateValue(type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdLaneOp, opcode, imm,
- ArrayVector(inputs), &result);
+ base::ArrayVector(inputs), &result);
Drop(1);
Push(result);
}
@@ -3794,7 +3841,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value inputs[2] = {Peek(1, 0, kWasmS128), Peek(0, 1, type)};
Value result = CreateValue(kWasmS128);
CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdLaneOp, opcode, imm,
- ArrayVector(inputs), &result);
+ base::ArrayVector(inputs), &result);
Drop(2);
Push(result);
}
@@ -3816,6 +3863,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
uint32_t DecodeSimdOpcode(WasmOpcode opcode, uint32_t opcode_length) {
+ if (decoding_mode == kInitExpression) {
+ // Currently, only s128.const is allowed in initializer expressions.
+ if (opcode != kExprS128Const) {
+ this->DecodeError("opcode %s is not allowed in init. expressions",
+ this->SafeOpcodeNameAt(this->pc()));
+ return 0;
+ }
+ return SimdConstOp(opcode_length);
+ }
// opcode_length is the number of bytes that this SIMD-specific opcode takes
// up in the LEB128 encoded form.
switch (opcode) {
@@ -3927,13 +3983,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
ArgVector args = PeekArgs(sig);
if (sig->return_count() == 0) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdOp, opcode, VectorOf(args),
- nullptr);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdOp, opcode,
+ base::VectorOf(args), nullptr);
DropArgs(sig);
} else {
ReturnVector results = CreateReturnValues(sig);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(SimdOp, opcode, VectorOf(args),
- results.begin());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(
+ SimdOp, opcode, base::VectorOf(args), results.begin());
DropArgs(sig);
PushReturns(results);
}
@@ -3950,6 +4006,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->module_);
}
+#define NON_CONST_ONLY \
+ if (decoding_mode == kInitExpression) { \
+ this->DecodeError("opcode %s is not allowed in init. expressions", \
+ this->SafeOpcodeNameAt(this->pc())); \
+ return 0; \
+ }
+
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
case kExprStructNewWithRtt: {
@@ -3966,7 +4029,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
rtt.type.is_bottom() ||
(rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
PopTypeError(imm.struct_type->field_count(), rtt,
- "rtt for type " + std::to_string(imm.index));
+ "rtt with depth for type " + std::to_string(imm.index));
return 0;
}
ArgVector args = PeekArgs(imm.struct_type, 1);
@@ -3979,6 +4042,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprStructNewDefault: {
+ NON_CONST_ONLY
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
@@ -4003,7 +4067,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(
rtt.type.is_bottom() ||
(rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(0, rtt, "rtt for type " + std::to_string(imm.index));
+ PopTypeError(0, rtt,
+ "rtt with depth for type " + std::to_string(imm.index));
return 0;
}
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4013,19 +4078,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprStructGet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ NON_CONST_ONLY
+ FieldImmediate<validate> field(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
- field.struct_index.struct_type->field(field.index);
+ field.struct_imm.struct_type->field(field.field_imm.index);
if (!VALIDATE(!field_type.is_packed())) {
this->DecodeError(
"struct.get: Immediate field %d of type %d has packed type %s. "
"Use struct.get_s or struct.get_u instead.",
- field.index, field.struct_index.index, field_type.name().c_str());
+ field.field_imm.index, field.struct_imm.index,
+ field_type.name().c_str());
return 0;
}
Value struct_obj =
- Peek(0, 0, ValueType::Ref(field.struct_index.index, kNullable));
+ Peek(0, 0, ValueType::Ref(field.struct_imm.index, kNullable));
Value value = CreateValue(field_type);
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructGet, struct_obj, field, true,
&value);
@@ -4035,20 +4102,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprStructGetU:
case kExprStructGetS: {
- FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ NON_CONST_ONLY
+ FieldImmediate<validate> field(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
- field.struct_index.struct_type->field(field.index);
+ field.struct_imm.struct_type->field(field.field_imm.index);
if (!VALIDATE(field_type.is_packed())) {
this->DecodeError(
"%s: Immediate field %d of type %d has non-packed type %s. Use "
"struct.get instead.",
- WasmOpcodes::OpcodeName(opcode), field.index,
- field.struct_index.index, field_type.name().c_str());
+ WasmOpcodes::OpcodeName(opcode), field.field_imm.index,
+ field.struct_imm.index, field_type.name().c_str());
return 0;
}
Value struct_obj =
- Peek(0, 0, ValueType::Ref(field.struct_index.index, kNullable));
+ Peek(0, 0, ValueType::Ref(field.struct_imm.index, kNullable));
Value value = CreateValue(field_type.Unpacked());
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructGet, struct_obj, field,
opcode == kExprStructGetS, &value);
@@ -4057,24 +4125,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + field.length;
}
case kExprStructSet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ NON_CONST_ONLY
+ FieldImmediate<validate> field(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
- const StructType* struct_type = field.struct_index.struct_type;
- if (!VALIDATE(struct_type->mutability(field.index))) {
+ const StructType* struct_type = field.struct_imm.struct_type;
+ if (!VALIDATE(struct_type->mutability(field.field_imm.index))) {
this->DecodeError("struct.set: Field %d of type %d is immutable.",
- field.index, field.struct_index.index);
+ field.field_imm.index, field.struct_imm.index);
return 0;
}
Value field_value =
- Peek(0, 1, struct_type->field(field.index).Unpacked());
+ Peek(0, 1, struct_type->field(field.field_imm.index).Unpacked());
Value struct_obj =
- Peek(1, 0, ValueType::Ref(field.struct_index.index, kNullable));
+ Peek(1, 0, ValueType::Ref(field.struct_imm.index, kNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructSet, struct_obj, field,
field_value);
Drop(2);
return opcode_length + field.length;
}
case kExprArrayNewWithRtt: {
+ NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Peek(0, 2);
@@ -4087,7 +4157,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(
rtt.type.is_bottom() ||
(rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(2, rtt, "rtt for type " + std::to_string(imm.index));
+ PopTypeError(2, rtt,
+ "rtt with depth for type " + std::to_string(imm.index));
return 0;
}
Value length = Peek(1, 1, kWasmI32);
@@ -4101,6 +4172,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprArrayNewDefault: {
+ NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
@@ -4120,7 +4192,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(
rtt.type.is_bottom() ||
(rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(1, rtt, "rtt for type " + std::to_string(imm.index));
+ PopTypeError(1, rtt,
+ "rtt with depth for type " + std::to_string(imm.index));
return 0;
}
Value length = Peek(1, 0, kWasmI32);
@@ -4133,6 +4206,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprArrayGetS:
case kExprArrayGetU: {
+ NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_packed())) {
@@ -4153,6 +4227,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprArrayGet: {
+ NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(!imm.array_type->element_type().is_packed())) {
@@ -4172,6 +4247,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprArraySet: {
+ NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->mutability())) {
@@ -4188,6 +4264,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprArrayLen: {
+ NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value array_obj = Peek(0, 0, ValueType::Ref(imm.index, kNullable));
@@ -4197,7 +4274,77 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Push(value);
return opcode_length + imm.length;
}
+ case kExprArrayCopy: {
+ NON_CONST_ONLY
+ CHECK_PROTOTYPE_OPCODE(gc_experiments);
+ ArrayIndexImmediate<validate> dst_imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, dst_imm)) return 0;
+ if (!VALIDATE(dst_imm.array_type->mutability())) {
+ this->DecodeError(
+ "array.copy: immediate destination array type #%d is immutable",
+ dst_imm.index);
+ return 0;
+ }
+ ArrayIndexImmediate<validate> src_imm(
+ this, this->pc_ + opcode_length + dst_imm.length);
+ if (!this->Validate(this->pc_ + opcode_length + dst_imm.length,
+ src_imm)) {
+ return 0;
+ }
+ if (!IsSubtypeOf(src_imm.array_type->element_type(),
+ dst_imm.array_type->element_type(), this->module_)) {
+ this->DecodeError(
+ "array.copy: source array's #%d element type is not a subtype of "
+ "destination array's #%d element type",
+ src_imm.index, dst_imm.index);
+ return 0;
+ }
+ // [dst, dst_index, src, src_index, length]
+ Value dst = Peek(4, 0, ValueType::Ref(dst_imm.index, kNullable));
+ Value dst_index = Peek(3, 1, kWasmI32);
+ Value src = Peek(2, 2, ValueType::Ref(src_imm.index, kNullable));
+ Value src_index = Peek(1, 3, kWasmI32);
+ Value length = Peek(0, 4, kWasmI32);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayCopy, dst, dst_index, src,
+ src_index, length);
+ Drop(5);
+ return opcode_length + dst_imm.length + src_imm.length;
+ }
+ case kExprArrayInit: {
+ CHECK_PROTOTYPE_OPCODE(gc_experiments);
+ if (decoding_mode != kInitExpression) {
+ this->DecodeError("array.init is only allowed in init. expressions");
+ return 0;
+ }
+ ArrayIndexImmediate<validate> array_imm(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
+ IndexImmediate<validate> length_imm(
+ this, this->pc_ + opcode_length + array_imm.length,
+ "array.init length");
+ uint32_t elem_count = length_imm.index;
+ if (!VALIDATE(elem_count <= kV8MaxWasmArrayInitLength)) {
+ this->DecodeError(
+ "Requested length %u for array.init too large, maximum is %zu",
+ length_imm.index, kV8MaxWasmArrayInitLength);
+ return 0;
+ }
+ ValueType element_type = array_imm.array_type->element_type();
+ std::vector<ValueType> element_types(elem_count,
+ element_type.Unpacked());
+ FunctionSig element_sig(0, elem_count, element_types.data());
+ ArgVector elements = PeekArgs(&element_sig, 1);
+ Value rtt = Peek(0, elem_count, ValueType::Rtt(array_imm.index));
+ Value result =
+ CreateValue(ValueType::Ref(array_imm.index, kNonNullable));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayInit, array_imm, elements, rtt,
+ &result);
+ Drop(elem_count + 1);
+ Push(result);
+ return opcode_length + array_imm.length + length_imm.length;
+ }
case kExprI31New: {
+ NON_CONST_ONLY
Value input = Peek(0, 0, kWasmI32);
Value value = CreateValue(kWasmI31Ref);
CALL_INTERFACE_IF_OK_AND_REACHABLE(I31New, input, &value);
@@ -4206,6 +4353,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length;
}
case kExprI31GetS: {
+ NON_CONST_ONLY
Value i31 = Peek(0, 0, kWasmI31Ref);
Value value = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(I31GetS, i31, &value);
@@ -4214,6 +4362,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length;
}
case kExprI31GetU: {
+ NON_CONST_ONLY
Value i31 = Peek(0, 0, kWasmI31Ref);
Value value = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(I31GetU, i31, &value);
@@ -4222,16 +4371,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length;
}
case kExprRttCanon: {
- TypeIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
Value value = CreateValue(ValueType::Rtt(imm.index, 0));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &value);
Push(value);
return opcode_length + imm.length;
}
+ case kExprRttFreshSub:
+ CHECK_PROTOTYPE_OPCODE(gc_experiments);
+ V8_FALLTHROUGH;
case kExprRttSub: {
- TypeIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
Value parent = Peek(0, 0);
if (parent.type.is_bottom()) {
DCHECK(!current_code_reachable_and_ok_);
@@ -4250,13 +4404,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
imm.index, parent.type.depth() + 1))
: CreateValue(ValueType::Rtt(imm.index));
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RttSub, imm.index, parent, &value);
+ WasmRttSubMode mode = opcode == kExprRttSub
+ ? WasmRttSubMode::kCanonicalize
+ : WasmRttSubMode::kFresh;
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttSub, imm.index, parent, &value,
+ mode);
Drop(parent);
Push(value);
}
return opcode_length + imm.length;
}
case kExprRefTest: {
+ NON_CONST_ONLY
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
Value rtt = Peek(0, 1);
Value obj = Peek(1, 0);
@@ -4273,14 +4432,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
return 0;
}
- if (!obj.type.is_bottom() && !rtt.type.is_bottom()) {
+ if (current_code_reachable_and_ok_) {
// This logic ensures that code generation can assume that functions
// can only be cast to function types, and data objects to data types.
if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RefTest, obj, rtt, &value);
+ CALL_INTERFACE(RefTest, obj, rtt, &value);
} else {
// Unrelated types. Will always fail.
- CALL_INTERFACE_IF_OK_AND_REACHABLE(I32Const, &value, 0);
+ CALL_INTERFACE(I32Const, &value, 0);
}
}
Drop(2);
@@ -4288,6 +4447,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length;
}
case kExprRefCast: {
+ NON_CONST_ONLY
Value rtt = Peek(0, 1);
Value obj = Peek(1, 0);
if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
@@ -4302,32 +4462,38 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(0, obj, "subtype of (ref null func) or (ref null data)");
return 0;
}
- if (!obj.type.is_bottom() && !rtt.type.is_bottom()) {
- Value value = CreateValue(
- ValueType::Ref(rtt.type.ref_index(), obj.type.nullability()));
+ // If either value is bottom, we emit the most specific type possible.
+ Value value =
+ CreateValue(rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.ref_index(),
+ obj.type.is_bottom()
+ ? kNonNullable
+ : obj.type.nullability()));
+ if (current_code_reachable_and_ok_) {
// This logic ensures that code generation can assume that functions
// can only be cast to function types, and data objects to data types.
if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RefCast, obj, rtt, &value);
+ CALL_INTERFACE(RefCast, obj, rtt, &value);
} else {
// Unrelated types. The only way this will not trap is if the object
// is null.
if (obj.type.is_nullable()) {
// Drop rtt from the stack, then assert that obj is null.
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(AssertNull, obj, &value);
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(AssertNull, obj, &value);
} else {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap,
- TrapReason::kTrapIllegalCast);
+ CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast);
EndControl();
}
}
- Drop(2);
- Push(value);
}
+ Drop(2);
+ Push(value);
return opcode_length;
}
case kExprBrOnCast: {
+ NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
@@ -4385,6 +4551,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + branch_depth.length;
}
case kExprBrOnCastFail: {
+ NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
@@ -4447,6 +4614,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
#define ABSTRACT_TYPE_CHECK(heap_type) \
case kExprRefIs##heap_type: { \
+ NON_CONST_ONLY \
Value arg = Peek(0, 0, kWasmAnyRef); \
Value result = CreateValue(kWasmI32); \
CALL_INTERFACE_IF_OK_AND_REACHABLE(RefIs##heap_type, arg, &result); \
@@ -4460,17 +4628,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ABSTRACT_TYPE_CHECK(I31)
#undef ABSTRACT_TYPE_CHECK
-#define ABSTRACT_TYPE_CAST(heap_type) \
- case kExprRefAs##heap_type: { \
- Value arg = Peek(0, 0, kWasmAnyRef); \
- if (!arg.type.is_bottom()) { \
- Value result = \
- CreateValue(ValueType::Ref(HeapType::k##heap_type, kNonNullable)); \
- CALL_INTERFACE_IF_OK_AND_REACHABLE(RefAs##heap_type, arg, &result); \
- Drop(arg); \
- Push(result); \
- } \
- return opcode_length; \
+#define ABSTRACT_TYPE_CAST(heap_type) \
+ case kExprRefAs##heap_type: { \
+ NON_CONST_ONLY \
+ Value arg = Peek(0, 0, kWasmAnyRef); \
+ Value result = \
+ CreateValue(ValueType::Ref(HeapType::k##heap_type, kNonNullable)); \
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RefAs##heap_type, arg, &result); \
+ Drop(arg); \
+ Push(result); \
+ return opcode_length; \
}
ABSTRACT_TYPE_CAST(Data)
@@ -4481,6 +4648,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrOnData:
case kExprBrOnFunc:
case kExprBrOnI31: {
+ NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, branch_depth,
@@ -4488,23 +4656,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return 0;
}
- Value obj = Peek(0, 0, kWasmAnyRef);
Control* c = control_at(branch_depth.depth);
- HeapType::Representation heap_type =
- opcode == kExprBrOnFunc
- ? HeapType::kFunc
- : opcode == kExprBrOnData ? HeapType::kData : HeapType::kI31;
if (c->br_merge()->arity == 0) {
this->DecodeError("%s must target a branch of arity at least 1",
SafeOpcodeNameAt(this->pc_));
return 0;
}
+
// Attention: contrary to most other instructions, we modify the
// stack before calling the interface function. This makes it
// significantly more convenient to pass around the values that
// will be on the stack when the branch is taken.
// TODO(jkummerow): Reconsider this choice.
+ Value obj = Peek(0, 0, kWasmAnyRef);
Drop(obj);
+ HeapType::Representation heap_type =
+ opcode == kExprBrOnFunc
+ ? HeapType::kFunc
+ : opcode == kExprBrOnData ? HeapType::kData : HeapType::kI31;
Value result_on_branch =
CreateValue(ValueType::Ref(heap_type, kNonNullable));
Push(result_on_branch);
@@ -4527,11 +4696,56 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
+ case kExprBrOnNonData:
+ case kExprBrOnNonFunc:
+ case kExprBrOnNonI31: {
+ NON_CONST_ONLY
+ BranchDepthImmediate<validate> branch_depth(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
+ return 0;
+ }
+
+ Control* c = control_at(branch_depth.depth);
+ if (c->br_merge()->arity == 0) {
+ this->DecodeError("%s must target a branch of arity at least 1",
+ SafeOpcodeNameAt(this->pc_));
+ return 0;
+ }
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+
+ Value obj = Peek(0, 0, kWasmAnyRef);
+ HeapType::Representation heap_type =
+ opcode == kExprBrOnNonFunc
+ ? HeapType::kFunc
+ : opcode == kExprBrOnNonData ? HeapType::kData : HeapType::kI31;
+ Value value_on_fallthrough =
+ CreateValue(ValueType::Ref(heap_type, kNonNullable));
+
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ if (opcode == kExprBrOnNonFunc) {
+ CALL_INTERFACE(BrOnNonFunc, obj, &value_on_fallthrough,
+ branch_depth.depth);
+ } else if (opcode == kExprBrOnNonData) {
+ CALL_INTERFACE(BrOnNonData, obj, &value_on_fallthrough,
+ branch_depth.depth);
+ } else {
+ CALL_INTERFACE(BrOnNonI31, obj, &value_on_fallthrough,
+ branch_depth.depth);
+ }
+ c->br_merge()->reached = true;
+ }
+ Drop(obj);
+ Push(value_on_fallthrough);
+ return opcode_length + branch_depth.length;
+ }
default:
this->DecodeError("invalid gc opcode");
return 0;
}
}
+#undef NON_CONST_ONLY
uint32_t DecodeAtomicOpcode(WasmOpcode opcode, uint32_t opcode_length) {
ValueType ret_type;
@@ -4573,22 +4787,23 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("invalid atomic opcode");
return 0;
}
- if (!CheckHasMemory()) return 0;
- MemoryAccessImmediate<validate> imm(
- this, this->pc_ + opcode_length,
- ElementSizeLog2Of(memtype.representation()));
+
+ MemoryAccessImmediate<validate> imm = MakeMemoryAccessImmediate(
+ opcode_length, ElementSizeLog2Of(memtype.representation()));
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return false;
+
// TODO(10949): Fix this for memory64 (index type should be kWasmI64
// then).
CHECK(!this->module_->is_memory64);
ArgVector args = PeekArgs(sig);
if (ret_type == kWasmVoid) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
- nullptr);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
+ imm, nullptr);
DropArgs(sig);
} else {
Value result = CreateValue(GetReturnType(sig));
- CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
- &result);
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(AtomicOp, opcode, base::VectorOf(args),
+ imm, &result);
DropArgs(sig);
Push(result);
}
@@ -4624,8 +4839,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "data segment index");
+ if (!this->ValidateDataSegment(this->pc_ + opcode_length, imm)) {
+ return 0;
+ }
CALL_INTERFACE_IF_OK_AND_REACHABLE(DataDrop, imm);
return opcode_length + imm.length;
}
@@ -4653,13 +4871,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableInitImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PeekArgs(sig);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(TableInit, imm, VectorOf(args));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableInit, imm,
+ base::VectorOf(args));
DropArgs(sig);
return opcode_length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "element segment index");
+ if (!this->ValidateElementSegment(this->pc_ + opcode_length, imm)) {
+ return 0;
+ }
CALL_INTERFACE_IF_OK_AND_REACHABLE(ElemDrop, imm);
return opcode_length + imm.length;
}
@@ -4667,13 +4889,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PeekArgs(sig);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(TableCopy, imm, VectorOf(args));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(TableCopy, imm,
+ base::VectorOf(args));
DropArgs(sig);
return opcode_length + imm.length;
}
case kExprTableGrow: {
- TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "table index");
+ if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
Value delta = Peek(0, 1, sig->GetParam(1));
Value value = Peek(1, 0, this->module_->tables[imm.index].type);
Value result = CreateValue(kWasmI32);
@@ -4684,16 +4908,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return opcode_length + imm.length;
}
case kExprTableSize: {
- TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "table index");
+ if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
Value result = CreateValue(kWasmI32);
CALL_INTERFACE_IF_OK_AND_REACHABLE(TableSize, imm, &result);
Push(result);
return opcode_length + imm.length;
}
case kExprTableFill: {
- TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
- if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "table index");
+ if (!this->ValidateTable(this->pc_ + opcode_length, imm)) return 0;
Value count = Peek(0, 2, sig->GetParam(2));
Value value = Peek(1, 1, this->module_->tables[imm.index].type);
Value start = Peek(2, 0, sig->GetParam(0));
@@ -4738,6 +4964,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
void PushMergeValues(Control* c, Merge<Value>* merge) {
+ if (decoding_mode == kInitExpression) return;
DCHECK_EQ(c, &control_.back());
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
DCHECK_LE(stack_ + c->stack_depth, stack_end_);
@@ -4806,7 +5033,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t limit = control_.back().stack_depth;
if (V8_UNLIKELY(stack_size() <= limit + depth)) {
// Peeking past the current control start in reachable code.
- if (!VALIDATE(control_.back().unreachable())) {
+ if (!VALIDATE(decoding_mode == kFunctionBody &&
+ control_.back().unreachable())) {
NotEnoughArgumentsError(index);
}
return UnreachableValue(this->pc_);
@@ -4824,22 +5052,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
+ // Drop the top {count} stack elements, or all of them if less than {count}
+ // are present.
V8_INLINE void Drop(int count = 1) {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
- // TODO(wasm): This check is often redundant.
if (V8_UNLIKELY(stack_size() < limit + count)) {
- // Popping past the current control start in reachable code.
- if (!VALIDATE(!current_code_reachable_and_ok_)) {
- NotEnoughArgumentsError(0);
- }
// Pop what we can.
count = std::min(count, static_cast<int>(stack_size() - limit));
}
DCHECK_LE(stack_, stack_end_ - count);
stack_end_ -= count;
}
- // For more descriptive call sites:
+ // Drop the top stack element if present. Takes a Value input for more
+ // descriptive call sites.
V8_INLINE void Drop(const Value& /* unused */) { Drop(1); }
enum StackElementsCountMode : bool {
@@ -4847,7 +5073,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
kStrictCounting = true
};
- enum MergeType { kBranchMerge, kReturnMerge, kFallthroughMerge };
+ enum MergeType {
+ kBranchMerge,
+ kReturnMerge,
+ kFallthroughMerge,
+ kInitExprMerge
+ };
// - If the current code is reachable, check if the current stack values are
// compatible with {merge} based on their number and types. Disregard the
@@ -4869,10 +5100,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
constexpr const char* merge_description =
merge_type == kBranchMerge
? "branch"
- : merge_type == kReturnMerge ? "return" : "fallthru";
+ : merge_type == kReturnMerge
+ ? "return"
+ : merge_type == kInitExprMerge ? "init. expression"
+ : "fallthru";
uint32_t arity = merge->arity;
uint32_t actual = stack_size() - control_.back().stack_depth;
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ // Here we have to check for !unreachable(), because we need to typecheck as
+ // if the current code is reachable even if it is spec-only reachable.
+ if (V8_LIKELY(decoding_mode == kInitExpression ||
+ !control_.back().unreachable())) {
if (V8_UNLIKELY(strict_count ? actual != drop_values + arity
: actual < drop_values + arity)) {
this->DecodeError("expected %u elements on the stack for %s, found %u",
@@ -4906,19 +5143,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Peek(depth, i, (*merge)[i].type);
}
if (push_branch_values) {
- Drop(drop_values);
- Drop(arity);
- // {Drop} is adaptive for polymorphic stacks: it might drop fewer values
- // than requested. So ensuring stack space here is not redundant.
- EnsureStackSpace(drop_values + arity);
- // Push values of the correct type onto the stack.
- for (int i = 0; i < static_cast<int>(arity); i++) {
- Push(CreateValue((*merge)[i].type));
- }
- // {drop_values} are about to be dropped anyway, so we can forget their
- // previous types, but we do have to maintain the correct stack height.
- for (uint32_t i = 0; i < drop_values; i++) {
- Push(UnreachableValue(this->pc_));
+ uint32_t inserted_value_count =
+ static_cast<uint32_t>(EnsureStackArguments(drop_values + arity));
+ if (inserted_value_count > 0) {
+ // EnsureStackSpace may have inserted unreachable values into the bottom
+ // of the stack. If so, mark them with the correct type. If drop values
+ // were also inserted, disregard them, as they will be dropped anyway.
+ Value* stack_base = stack_value(drop_values + arity);
+ for (uint32_t i = 0; i < std::min(arity, inserted_value_count); i++) {
+ if (stack_base[i].type == kWasmBottom) {
+ stack_base[i].type = (*merge)[i].type;
+ }
+ }
}
}
return this->ok();
@@ -5060,6 +5296,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
class EmptyInterface {
public:
static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ static constexpr DecodingMode decoding_mode = kFunctionBody;
using Value = ValueBase<validate>;
using Control = ControlBase<Value, validate>;
using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 0733359055..19a862d0d4 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -191,7 +191,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
if (line_numbers) line_numbers->push_back(i.position());
if (opcode == kExprElse || opcode == kExprCatch ||
- opcode == kExprCatchAll || opcode == kExprUnwind) {
+ opcode == kExprCatchAll) {
control_depth--;
}
@@ -241,7 +241,6 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprElse:
case kExprCatch:
case kExprCatchAll:
- case kExprUnwind:
os << " @" << i.pc_offset();
control_depth++;
break;
@@ -253,10 +252,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(), &i,
i.pc() + 1, module);
os << " @" << i.pc_offset();
- if (decoder.Complete(imm)) {
- for (uint32_t i = 0; i < imm.out_arity(); i++) {
- os << " " << imm.out_type(i).name();
- }
+ CHECK(decoder.Validate(i.pc() + 1, imm));
+ for (uint32_t i = 0; i < imm.out_arity(); i++) {
+ os << " " << imm.out_type(i).name();
}
control_depth++;
break;
@@ -281,20 +279,17 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
- &i, i.pc() + 1);
- os << " sig #" << imm.sig_index;
- if (decoder.Complete(imm)) {
- os << ": " << *imm.sig;
- }
+ CallIndirectImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ os << " sig #" << imm.sig_imm.index;
+ CHECK(decoder.Validate(i.pc() + 1, imm));
+ os << ": " << *imm.sig;
break;
}
case kExprCallFunction: {
CallFunctionImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " function #" << imm.index;
- if (decoder.Complete(imm)) {
- os << ": " << *imm.sig;
- }
+ CHECK(decoder.Validate(i.pc() + 1, imm));
+ os << ": " << *imm.sig;
break;
}
default:
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index b1e1cebe2f..f0451a3cf2 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -98,15 +98,15 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
// Base class for both iterators defined below.
class iterator_base {
public:
- inline iterator_base& operator++() {
+ iterator_base& operator++() {
DCHECK_LT(ptr_, end_);
ptr_ += OpcodeLength(ptr_, end_);
return *this;
}
- inline bool operator==(const iterator_base& that) {
+ bool operator==(const iterator_base& that) {
return this->ptr_ == that.ptr_;
}
- inline bool operator!=(const iterator_base& that) {
+ bool operator!=(const iterator_base& that) {
return this->ptr_ != that.ptr_;
}
@@ -122,7 +122,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
: public iterator_base,
public base::iterator<std::input_iterator_tag, WasmOpcode> {
public:
- inline WasmOpcode operator*() {
+ WasmOpcode operator*() {
DCHECK_LT(ptr_, end_);
return static_cast<WasmOpcode>(*ptr_);
}
@@ -138,7 +138,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
: public iterator_base,
public base::iterator<std::input_iterator_tag, uint32_t> {
public:
- inline uint32_t operator*() {
+ uint32_t operator*() {
DCHECK_LT(ptr_, end_);
return static_cast<uint32_t>(ptr_ - start_);
}
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 4a2db3d496..ae7962f86f 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -4,6 +4,8 @@
#include "src/wasm/function-compiler.h"
+#include "src/base/platform/time.h"
+#include "src/base/strings.h"
#include "src/codegen/compiler.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -21,96 +23,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-namespace {
-
-class WasmInstructionBufferImpl {
- public:
- class View : public AssemblerBuffer {
- public:
- View(Vector<uint8_t> buffer, WasmInstructionBufferImpl* holder)
- : buffer_(buffer), holder_(holder) {}
-
- ~View() override {
- if (buffer_.begin() == holder_->old_buffer_.start()) {
- DCHECK_EQ(buffer_.size(), holder_->old_buffer_.size());
- holder_->old_buffer_ = {};
- }
- }
-
- byte* start() const override { return buffer_.begin(); }
-
- int size() const override { return static_cast<int>(buffer_.size()); }
-
- std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
- // If we grow, we must be the current buffer of {holder_}.
- DCHECK_EQ(buffer_.begin(), holder_->buffer_.start());
- DCHECK_EQ(buffer_.size(), holder_->buffer_.size());
- DCHECK_NULL(holder_->old_buffer_);
-
- DCHECK_LT(size(), new_size);
-
- holder_->old_buffer_ = std::move(holder_->buffer_);
- holder_->buffer_ = OwnedVector<uint8_t>::NewForOverwrite(new_size);
- return std::make_unique<View>(holder_->buffer_.as_vector(), holder_);
- }
-
- private:
- const Vector<uint8_t> buffer_;
- WasmInstructionBufferImpl* const holder_;
- };
-
- explicit WasmInstructionBufferImpl(size_t size)
- : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {}
-
- std::unique_ptr<AssemblerBuffer> CreateView() {
- DCHECK_NOT_NULL(buffer_);
- return std::make_unique<View>(buffer_.as_vector(), this);
- }
-
- std::unique_ptr<uint8_t[]> ReleaseBuffer() {
- DCHECK_NULL(old_buffer_);
- DCHECK_NOT_NULL(buffer_);
- return buffer_.ReleaseData();
- }
-
- bool released() const { return buffer_ == nullptr; }
-
- private:
- // The current buffer used to emit code.
- OwnedVector<uint8_t> buffer_;
-
- // While the buffer is grown, we need to temporarily also keep the old buffer
- // alive.
- OwnedVector<uint8_t> old_buffer_;
-};
-
-WasmInstructionBufferImpl* Impl(WasmInstructionBuffer* buf) {
- return reinterpret_cast<WasmInstructionBufferImpl*>(buf);
-}
-
-} // namespace
-
-// PIMPL interface WasmInstructionBuffer for WasmInstBufferImpl
-WasmInstructionBuffer::~WasmInstructionBuffer() {
- Impl(this)->~WasmInstructionBufferImpl();
-}
-
-std::unique_ptr<AssemblerBuffer> WasmInstructionBuffer::CreateView() {
- return Impl(this)->CreateView();
-}
-
-std::unique_ptr<uint8_t[]> WasmInstructionBuffer::ReleaseBuffer() {
- return Impl(this)->ReleaseBuffer();
-}
-
-// static
-std::unique_ptr<WasmInstructionBuffer> WasmInstructionBuffer::New(size_t size) {
- return std::unique_ptr<WasmInstructionBuffer>{
- reinterpret_cast<WasmInstructionBuffer*>(new WasmInstructionBufferImpl(
- std::max(size_t{AssemblerBase::kMinimalBufferSize}, size)))};
-}
-// End of PIMPL interface WasmInstructionBuffer for WasmInstBufferImpl
-
// static
ExecutionTier WasmCompilationUnit::GetBaselineExecutionTier(
const WasmModule* module) {
@@ -121,15 +33,14 @@ ExecutionTier WasmCompilationUnit::GetBaselineExecutionTier(
}
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
- WasmEngine* engine, CompilationEnv* env,
- const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ CompilationEnv* env, const WireBytesStorage* wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
WasmCompilationResult result;
if (func_index_ < static_cast<int>(env->module->num_imported_functions)) {
- result = ExecuteImportWrapperCompilation(engine, env);
+ result = ExecuteImportWrapperCompilation(env);
} else {
- result = ExecuteFunctionCompilation(engine, env, wire_bytes_storage,
- counters, detected);
+ result =
+ ExecuteFunctionCompilation(env, wire_bytes_storage, counters, detected);
}
if (result.succeeded() && counters) {
@@ -145,33 +56,34 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
}
WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
- WasmEngine* engine, CompilationEnv* env) {
+ CompilationEnv* env) {
const FunctionSig* sig = env->module->functions[func_index_].sig;
// Assume the wrapper is going to be a JS function with matching arity at
// instantiation time.
auto kind = compiler::kDefaultImportCallKind;
bool source_positions = is_asmjs_module(env->module);
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- engine, env, kind, sig, source_positions,
+ env, kind, sig, source_positions,
static_cast<int>(sig->parameter_count()));
return result;
}
WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
- WasmEngine* wasm_engine, CompilationEnv* env,
- const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ CompilationEnv* env, const WireBytesStorage* wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
auto* func = &env->module->functions[func_index_];
- Vector<const uint8_t> code = wire_bytes_storage->GetCode(func->code);
+ base::Vector<const uint8_t> code = wire_bytes_storage->GetCode(func->code);
wasm::FunctionBody func_body{func->sig, func->code.offset(), code.begin(),
code.end()};
base::Optional<TimedHistogramScope> wasm_compile_function_time_scope;
if (counters) {
- auto size_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
- wasm, function_size_bytes);
- size_histogram->AddSample(
- static_cast<int>(func_body.end - func_body.start));
+ if ((func_body.end - func_body.start) >= 100 * KB) {
+ auto huge_size_histogram = SELECT_WASM_COUNTER(
+ counters, env->module->origin, wasm, huge_function_size_bytes);
+ huge_size_histogram->AddSample(
+ static_cast<int>(func_body.end - func_body.start));
+ }
auto timed_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
wasm_compile, function_time);
wasm_compile_function_time_scope.emplace(timed_histogram);
@@ -198,15 +110,14 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
if (V8_LIKELY(func_index_ >= 32 || (FLAG_wasm_debug_mask_for_testing &
(1 << func_index_)) == 0)) {
result = ExecuteLiftoffCompilation(
- wasm_engine->allocator(), env, func_body, func_index_,
- for_debugging_, counters, detected);
+ env, func_body, func_index_, for_debugging_, counters, detected);
} else {
// We don't use the debug side table, we only pass it to cover
// different code paths in Liftoff for testing.
std::unique_ptr<DebugSideTable> debug_sidetable;
- result = ExecuteLiftoffCompilation(
- wasm_engine->allocator(), env, func_body, func_index_,
- kForDebugging, counters, detected, {}, &debug_sidetable);
+ result = ExecuteLiftoffCompilation(env, func_body, func_index_,
+ kForDebugging, counters, detected,
+ {}, &debug_sidetable);
}
if (result.succeeded()) break;
}
@@ -218,7 +129,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kTurbofan:
result = compiler::ExecuteTurbofanWasmCompilation(
- wasm_engine, env, func_body, func_index_, counters, detected);
+ env, func_body, func_index_, counters, detected);
result.for_debugging = for_debugging_;
break;
}
@@ -237,10 +148,10 @@ void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
const char* format, ...) {
DCHECK(must_record_function_compilation(isolate));
- ScopedVector<char> buffer(128);
+ base::ScopedVector<char> buffer(128);
va_list arguments;
va_start(arguments, format);
- int len = VSNPrintF(buffer, format, arguments);
+ int len = base::VSNPrintF(buffer, format, arguments);
CHECK_LT(0, len);
va_end(arguments);
Handle<String> name_str =
@@ -266,8 +177,7 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
WasmCompilationUnit unit(function->func_index, tier, kNoDebugging);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
- isolate->wasm_engine(), &env,
- native_module->compilation_state()->GetWireBytesStorage(),
+ &env, native_module->compilation_state()->GetWireBytesStorage().get(),
isolate->counters(), detected);
if (result.succeeded()) {
WasmCodeRefScope code_ref_scope;
@@ -303,18 +213,18 @@ bool UseGenericWrapper(const FunctionSig* sig) {
} // namespace
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
- Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
- const WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features, AllowGeneric allow_generic)
+ Isolate* isolate, const FunctionSig* sig, const WasmModule* module,
+ bool is_import, const WasmFeatures& enabled_features,
+ AllowGeneric allow_generic)
: isolate_(isolate),
is_import_(is_import),
sig_(sig),
use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) &&
!is_import),
- job_(use_generic_wrapper_ ? nullptr
- : compiler::NewJSToWasmCompilationJob(
- isolate, wasm_engine, sig, module,
- is_import, enabled_features)) {}
+ job_(use_generic_wrapper_
+ ? nullptr
+ : compiler::NewJSToWasmCompilationJob(
+ isolate, sig, module, is_import, enabled_features)) {}
JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
@@ -330,8 +240,7 @@ void JSToWasmWrapperCompilationUnit::Execute() {
Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
Handle<Code> code;
if (use_generic_wrapper_) {
- code =
- isolate_->builtins()->builtin_handle(Builtins::kGenericJSToWasmWrapper);
+ code = isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
} else {
CompilationJob::Status status = job_->FinalizeJob(isolate_);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
@@ -350,9 +259,8 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
bool is_import) {
// Run the compilation unit synchronously.
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
- JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
- module, is_import, enabled_features,
- kAllowGeneric);
+ JSToWasmWrapperCompilationUnit unit(isolate, sig, module, is_import,
+ enabled_features, kAllowGeneric);
unit.Execute();
return unit.Finalize();
}
@@ -363,9 +271,8 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
// Run the compilation unit synchronously.
const bool is_import = false;
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
- JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
- module, is_import, enabled_features,
- kDontAllowGeneric);
+ JSToWasmWrapperCompilationUnit unit(isolate, sig, module, is_import,
+ enabled_features, kDontAllowGeneric);
unit.Execute();
return unit.Finalize();
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 80cd1a7b67..3f719fae67 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -11,6 +11,7 @@
#include <memory>
+#include "src/codegen/assembler.h"
#include "src/codegen/code-desc.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-environment.h"
@@ -22,7 +23,6 @@
namespace v8 {
namespace internal {
-class AssemblerBuffer;
class Counters;
class OptimizedCompilationJob;
@@ -33,24 +33,6 @@ class WasmCode;
class WasmEngine;
struct WasmFunction;
-class WasmInstructionBuffer final {
- public:
- WasmInstructionBuffer() = delete;
- WasmInstructionBuffer(const WasmInstructionBuffer&) = delete;
- WasmInstructionBuffer& operator=(const WasmInstructionBuffer&) = delete;
- ~WasmInstructionBuffer();
- std::unique_ptr<AssemblerBuffer> CreateView();
- std::unique_ptr<uint8_t[]> ReleaseBuffer();
-
- // Allocate a new {WasmInstructionBuffer}. The size is the maximum of {size}
- // and {AssemblerBase::kMinimalSize}.
- static std::unique_ptr<WasmInstructionBuffer> New(size_t size = 0);
-
- // Override {operator delete} to avoid implicit instantiation of {operator
- // delete} with {size_t} argument. The {size_t} argument would be incorrect.
- void operator delete(void* ptr) { ::operator delete(ptr); }
-};
-
struct WasmCompilationResult {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
@@ -65,11 +47,11 @@ struct WasmCompilationResult {
operator bool() const { return succeeded(); }
CodeDesc code_desc;
- std::unique_ptr<uint8_t[]> instr_buffer;
+ std::unique_ptr<AssemblerBuffer> instr_buffer;
uint32_t frame_slot_count = 0;
uint32_t tagged_parameter_slots = 0;
- OwnedVector<byte> source_positions;
- OwnedVector<byte> protected_instructions_data;
+ base::OwnedVector<byte> source_positions;
+ base::OwnedVector<byte> protected_instructions_data;
int func_index = kAnonymousFuncIndex;
ExecutionTier requested_tier;
ExecutionTier result_tier;
@@ -84,11 +66,12 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging)
: func_index_(index), tier_(tier), for_debugging_(for_debugging) {}
- WasmCompilationResult ExecuteCompilation(
- WasmEngine*, CompilationEnv*, const std::shared_ptr<WireBytesStorage>&,
- Counters*, WasmFeatures* detected);
+ WasmCompilationResult ExecuteCompilation(CompilationEnv*,
+ const WireBytesStorage*, Counters*,
+ WasmFeatures* detected);
ExecutionTier tier() const { return tier_; }
+ ForDebugging for_debugging() const { return for_debugging_; }
int func_index() const { return func_index_; }
static void CompileWasmFunction(Isolate*, NativeModule*,
@@ -96,13 +79,12 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ExecutionTier);
private:
- WasmCompilationResult ExecuteFunctionCompilation(
- WasmEngine* wasm_engine, CompilationEnv* env,
- const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
- Counters* counters, WasmFeatures* detected);
+ WasmCompilationResult ExecuteFunctionCompilation(CompilationEnv*,
+ const WireBytesStorage*,
+ Counters*,
+ WasmFeatures* detected);
- WasmCompilationResult ExecuteImportWrapperCompilation(WasmEngine* engine,
- CompilationEnv* env);
+ WasmCompilationResult ExecuteImportWrapperCompilation(CompilationEnv*);
int func_index_;
ExecutionTier tier_;
@@ -120,8 +102,7 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
// and return the builtin (generic) wrapper, when available.
enum AllowGeneric : bool { kAllowGeneric = true, kDontAllowGeneric = false };
- JSToWasmWrapperCompilationUnit(Isolate* isolate, WasmEngine* wasm_engine,
- const FunctionSig* sig,
+ JSToWasmWrapperCompilationUnit(Isolate* isolate, const FunctionSig* sig,
const wasm::WasmModule* module, bool is_import,
const WasmFeatures& enabled_features,
AllowGeneric allow_generic);
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index a81457faa7..e53269d72e 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -137,7 +137,15 @@ class WasmGraphBuildingInterface {
}
while (index < num_locals) {
ValueType type = decoder->local_type(index);
- TFNode* node = DefaultValue(type);
+ TFNode* node;
+ if (decoder->enabled_.has_nn_locals() && !type.is_defaultable()) {
+ DCHECK(type.is_reference());
+ // TODO(jkummerow): Consider using "the hole" instead, to make any
+ // illegal uses more obvious.
+ node = builder_->RefNull();
+ } else {
+ node = DefaultValue(type);
+ }
while (index < num_locals && decoder->local_type(index) == type) {
// Do a whole run of like-typed locals at a time.
ssa_env->locals[index++] = node;
@@ -364,22 +372,22 @@ class WasmGraphBuildingInterface {
void Drop(FullDecoder* decoder) {}
void LocalGet(FullDecoder* decoder, Value* result,
- const LocalIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
result->node = ssa_env_->locals[imm.index];
}
void LocalSet(FullDecoder* decoder, const Value& value,
- const LocalIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
ssa_env_->locals[imm.index] = value.node;
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
- const LocalIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
result->node = value.node;
ssa_env_->locals[imm.index] = value.node;
}
- void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
+ void AllocateLocals(FullDecoder* decoder, base::Vector<Value> local_values) {
ZoneVector<TFNode*>* locals = &ssa_env_->locals;
locals->insert(locals->begin(), local_values.size(), nullptr);
for (uint32_t i = 0; i < local_values.size(); i++) {
@@ -403,13 +411,13 @@ class WasmGraphBuildingInterface {
}
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
- const TableIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
result->node =
builder_->TableGet(imm.index, index.node, decoder->position());
}
void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
- const TableIndexImmediate<validate>& imm) {
+ const IndexImmediate<validate>& imm) {
builder_->TableSet(imm.index, index.node, value.node, decoder->position());
}
@@ -460,7 +468,7 @@ class WasmGraphBuildingInterface {
auto stack_values = CopyStackValues(decoder, ret_count, drop_values);
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
stack_values);
- GetNodes(values.begin(), VectorOf(stack_values));
+ GetNodes(values.begin(), base::VectorOf(stack_values));
} else {
Value* stack_base = ret_count == 0
? nullptr
@@ -468,9 +476,9 @@ class WasmGraphBuildingInterface {
GetNodes(values.begin(), stack_base, ret_count);
}
if (FLAG_trace_wasm) {
- builder_->TraceFunctionExit(VectorOf(values), decoder->position());
+ builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
- builder_->Return(VectorOf(values));
+ builder_->Return(base::VectorOf(values));
SetEnv(internal_env);
}
@@ -625,17 +633,17 @@ class WasmGraphBuildingInterface {
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallIndirect, imm.table_index,
- CheckForNull::kWithoutNullCheck, index.node, imm.sig, imm.sig_index,
- args, returns);
+ DoCall(decoder, kCallIndirect, imm.table_imm.index,
+ CheckForNull::kWithoutNullCheck, index.node, imm.sig,
+ imm.sig_imm.index, args, returns);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallIndirect, imm.table_index,
- CheckForNull::kWithoutNullCheck, index, imm.sig, imm.sig_index,
- args);
+ DoReturnCall(decoder, kCallIndirect, imm.table_imm.index,
+ CheckForNull::kWithoutNullCheck, index, imm.sig,
+ imm.sig_imm.index, args);
}
void CallRef(FullDecoder* decoder, const Value& func_ref,
@@ -683,7 +691,7 @@ class WasmGraphBuildingInterface {
SetEnv(false_env);
}
- void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void SimdOp(FullDecoder* decoder, WasmOpcode opcode, base::Vector<Value> args,
Value* result) {
NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
@@ -692,8 +700,8 @@ class WasmGraphBuildingInterface {
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
- const SimdLaneImmediate<validate>& imm, Vector<Value> inputs,
- Value* result) {
+ const SimdLaneImmediate<validate>& imm,
+ base::Vector<Value> inputs, Value* result) {
NodeVector nodes(inputs.size());
GetNodes(nodes.begin(), inputs);
result->node = builder_->SimdLaneOp(opcode, imm.lane, nodes.begin());
@@ -708,21 +716,20 @@ class WasmGraphBuildingInterface {
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
- const Vector<Value>& value_args) {
+ const base::Vector<Value>& value_args) {
int count = value_args.length();
ZoneVector<TFNode*> args(count, decoder->zone());
for (int i = 0; i < count; ++i) {
args[i] = value_args[i].node;
}
- CheckForException(decoder,
- builder_->Throw(imm.index, imm.exception, VectorOf(args),
- decoder->position()));
+ CheckForException(
+ decoder, builder_->Throw(imm.index, imm.exception, base::VectorOf(args),
+ decoder->position()));
TerminateThrow(decoder);
}
void Rethrow(FullDecoder* decoder, Control* block) {
- DCHECK(block->is_try_catchall() || block->is_try_catch() ||
- block->is_try_unwind());
+ DCHECK(block->is_try_catchall() || block->is_try_catch());
TFNode* exception = block->try_info->exception;
DCHECK_NOT_NULL(exception);
CheckForException(decoder, builder_->Rethrow(exception));
@@ -731,7 +738,7 @@ class WasmGraphBuildingInterface {
void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
- Control* block, Vector<Value> values) {
+ Control* block, base::Vector<Value> values) {
DCHECK(block->is_try_catch());
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
@@ -765,7 +772,7 @@ class WasmGraphBuildingInterface {
// push them onto the operand stack using the passed {values} vector.
SetEnv(if_catch_env);
NodeVector caught_values(values.size());
- Vector<TFNode*> caught_vector = VectorOf(caught_values);
+ base::Vector<TFNode*> caught_vector = base::VectorOf(caught_values);
builder_->GetExceptionValues(exception, imm.exception, caught_vector);
for (size_t i = 0, e = values.size(); i < e; ++i) {
values[i].node = caught_values[i];
@@ -808,8 +815,7 @@ class WasmGraphBuildingInterface {
}
void CatchAll(FullDecoder* decoder, Control* block) {
- DCHECK(block->is_try_catchall() || block->is_try_catch() ||
- block->is_try_unwind());
+ DCHECK(block->is_try_catchall() || block->is_try_catch());
DCHECK_EQ(decoder->control_at(0), block);
// The catch block is unreachable if no possible throws in the try block
@@ -823,7 +829,8 @@ class WasmGraphBuildingInterface {
SetEnv(block->try_info->catch_env);
}
- void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void AtomicOp(FullDecoder* decoder, WasmOpcode opcode,
+ base::Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
NodeVector inputs(args.size());
GetNodes(inputs.begin(), args);
@@ -837,11 +844,11 @@ class WasmGraphBuildingInterface {
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- builder_->MemoryInit(imm.data_segment_index, dst.node, src.node, size.node,
+ builder_->MemoryInit(imm.data_segment.index, dst.node, src.node, size.node,
decoder->position());
}
- void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
+ void DataDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
builder_->DataDrop(imm.index, decoder->position());
}
@@ -858,32 +865,33 @@ class WasmGraphBuildingInterface {
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
- Vector<Value> args) {
- builder_->TableInit(imm.table.index, imm.elem_segment_index, args[0].node,
- args[1].node, args[2].node, decoder->position());
+ base::Vector<Value> args) {
+ builder_->TableInit(imm.table.index, imm.element_segment.index,
+ args[0].node, args[1].node, args[2].node,
+ decoder->position());
}
- void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
+ void ElemDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
builder_->ElemDrop(imm.index, decoder->position());
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
- Vector<Value> args) {
+ base::Vector<Value> args) {
builder_->TableCopy(imm.table_dst.index, imm.table_src.index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
- void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableGrow(FullDecoder* decoder, const IndexImmediate<validate>& imm,
const Value& value, const Value& delta, Value* result) {
result->node = builder_->TableGrow(imm.index, value.node, delta.node);
}
- void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableSize(FullDecoder* decoder, const IndexImmediate<validate>& imm,
Value* result) {
result->node = builder_->TableSize(imm.index);
}
- void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ void TableFill(FullDecoder* decoder, const IndexImmediate<validate>& imm,
const Value& start, const Value& value, const Value& count) {
builder_->TableFill(imm.index, start.node, value.node, count.node);
}
@@ -896,8 +904,8 @@ class WasmGraphBuildingInterface {
for (uint32_t i = 0; i < field_count; i++) {
arg_nodes[i] = args[i].node;
}
- result->node = builder_->StructNewWithRtt(imm.index, imm.struct_type,
- rtt.node, VectorOf(arg_nodes));
+ result->node = builder_->StructNewWithRtt(
+ imm.index, imm.struct_type, rtt.node, base::VectorOf(arg_nodes));
}
void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
@@ -907,29 +915,29 @@ class WasmGraphBuildingInterface {
for (uint32_t i = 0; i < field_count; i++) {
arg_nodes[i] = DefaultValue(imm.struct_type->field(i));
}
- result->node = builder_->StructNewWithRtt(imm.index, imm.struct_type,
- rtt.node, VectorOf(arg_nodes));
+ result->node = builder_->StructNewWithRtt(
+ imm.index, imm.struct_type, rtt.node, base::VectorOf(arg_nodes));
}
void StructGet(FullDecoder* decoder, const Value& struct_object,
- const FieldIndexImmediate<validate>& field, bool is_signed,
+ const FieldImmediate<validate>& field, bool is_signed,
Value* result) {
CheckForNull null_check = struct_object.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
result->node = builder_->StructGet(
- struct_object.node, field.struct_index.struct_type, field.index,
+ struct_object.node, field.struct_imm.struct_type, field.field_imm.index,
null_check, is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
- const FieldIndexImmediate<validate>& field,
+ const FieldImmediate<validate>& field,
const Value& field_value) {
CheckForNull null_check = struct_object.type.is_nullable()
? CheckForNull::kWithNullCheck
: CheckForNull::kWithoutNullCheck;
- builder_->StructSet(struct_object.node, field.struct_index.struct_type,
- field.index, field_value.node, null_check,
+ builder_->StructSet(struct_object.node, field.struct_imm.struct_type,
+ field.field_imm.index, field_value.node, null_check,
decoder->position());
}
@@ -945,7 +953,9 @@ class WasmGraphBuildingInterface {
void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) {
- TFNode* initial_value = DefaultValue(imm.array_type->element_type());
+ // This will cause the default value to be chosen automatically based
+ // on the element type.
+ TFNode* initial_value = nullptr;
result->node =
builder_->ArrayNewWithRtt(imm.index, imm.array_type, length.node,
initial_value, rtt.node, decoder->position());
@@ -980,6 +990,19 @@ class WasmGraphBuildingInterface {
builder_->ArrayLen(array_obj.node, null_check, decoder->position());
}
+ void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
+ const Value& src, const Value& src_index,
+ const Value& length) {
+ builder_->ArrayCopy(dst.node, dst_index.node, src.node, src_index.node,
+ length.node, decoder->position());
+ }
+
+ void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
+ const base::Vector<Value>& elements, const Value& rtt,
+ Value* result) {
+ UNREACHABLE();
+ }
+
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
result->node = builder_->I31New(input.node);
}
@@ -997,8 +1020,8 @@ class WasmGraphBuildingInterface {
}
void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
- Value* result) {
- result->node = builder_->RttSub(type_index, parent.node);
+ Value* result, WasmRttSubMode mode) {
+ result->node = builder_->RttSub(type_index, parent.node, mode);
}
using StaticKnowledge = compiler::WasmGraphBuilder::ObjectReferenceKnowledge;
@@ -1086,6 +1109,13 @@ class WasmGraphBuildingInterface {
true);
}
+ void BrOnNonData(FullDecoder* decoder, const Value& object,
+ Value* value_on_fallthrough, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnData>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
+ br_depth, false);
+ }
+
void RefIsFunc(FullDecoder* decoder, const Value& object, Value* result) {
result->node = builder_->RefIsFunc(object.node, object.type.is_nullable());
}
@@ -1102,6 +1132,13 @@ class WasmGraphBuildingInterface {
true);
}
+ void BrOnNonFunc(FullDecoder* decoder, const Value& object,
+ Value* value_on_fallthrough, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnFunc>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
+ br_depth, false);
+ }
+
void RefIsI31(FullDecoder* decoder, const Value& object, Value* result) {
result->node = builder_->RefIsI31(object.node);
}
@@ -1117,6 +1154,13 @@ class WasmGraphBuildingInterface {
true);
}
+ void BrOnNonI31(FullDecoder* decoder, const Value& object,
+ Value* value_on_fallthrough, uint32_t br_depth) {
+ BrOnCastAbs<&compiler::WasmGraphBuilder::BrOnI31>(
+ decoder, object, Value{nullptr, kWasmBottom}, value_on_fallthrough,
+ br_depth, false);
+ }
+
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
to->node = from.node;
}
@@ -1147,7 +1191,7 @@ class WasmGraphBuildingInterface {
}
}
- void GetNodes(TFNode** nodes, Vector<Value> values) {
+ void GetNodes(TFNode** nodes, base::Vector<Value> values) {
GetNodes(nodes, values.begin(), values.size());
}
@@ -1412,22 +1456,22 @@ class WasmGraphBuildingInterface {
}
switch (call_mode) {
case kCallIndirect:
- CheckForException(decoder,
- builder_->CallIndirect(
- table_index, sig_index, VectorOf(arg_nodes),
- VectorOf(return_nodes), decoder->position()));
+ CheckForException(
+ decoder, builder_->CallIndirect(
+ table_index, sig_index, base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes), decoder->position()));
break;
case kCallDirect:
CheckForException(
- decoder,
- builder_->CallDirect(sig_index, VectorOf(arg_nodes),
- VectorOf(return_nodes), decoder->position()));
+ decoder, builder_->CallDirect(sig_index, base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes),
+ decoder->position()));
break;
case kCallRef:
- CheckForException(decoder,
- builder_->CallRef(sig_index, VectorOf(arg_nodes),
- VectorOf(return_nodes), null_check,
- decoder->position()));
+ CheckForException(
+ decoder, builder_->CallRef(sig_index, base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes), null_check,
+ decoder->position()));
break;
}
for (size_t i = 0; i < return_count; ++i) {
@@ -1455,24 +1499,24 @@ class WasmGraphBuildingInterface {
}
NodeVector arg_nodes(arg_count + 1);
- GetNodes(arg_nodes.data(), VectorOf(arg_values));
+ GetNodes(arg_nodes.data(), base::VectorOf(arg_values));
switch (call_mode) {
case kCallIndirect:
- CheckForException(decoder,
- builder_->ReturnCallIndirect(table_index, sig_index,
- VectorOf(arg_nodes),
- decoder->position()));
+ CheckForException(
+ decoder, builder_->ReturnCallIndirect(table_index, sig_index,
+ base::VectorOf(arg_nodes),
+ decoder->position()));
break;
case kCallDirect:
- CheckForException(decoder,
- builder_->ReturnCall(sig_index, VectorOf(arg_nodes),
- decoder->position()));
+ CheckForException(
+ decoder, builder_->ReturnCall(sig_index, base::VectorOf(arg_nodes),
+ decoder->position()));
break;
case kCallRef:
- CheckForException(
- decoder, builder_->ReturnCallRef(sig_index, VectorOf(arg_nodes),
- null_check, decoder->position()));
+ CheckForException(decoder, builder_->ReturnCallRef(
+ sig_index, base::VectorOf(arg_nodes),
+ null_check, decoder->position()));
break;
}
}
diff --git a/deps/v8/src/wasm/init-expr-interface.cc b/deps/v8/src/wasm/init-expr-interface.cc
new file mode 100644
index 0000000000..52c45bd18b
--- /dev/null
+++ b/deps/v8/src/wasm/init-expr-interface.cc
@@ -0,0 +1,143 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/init-expr-interface.h"
+
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/oddball.h"
+#include "src/roots/roots-inl.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void InitExprInterface::I32Const(FullDecoder* decoder, Value* result,
+ int32_t value) {
+ if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+}
+
+void InitExprInterface::I64Const(FullDecoder* decoder, Value* result,
+ int64_t value) {
+ if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+}
+
+void InitExprInterface::F32Const(FullDecoder* decoder, Value* result,
+ float value) {
+ if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+}
+
+void InitExprInterface::F64Const(FullDecoder* decoder, Value* result,
+ double value) {
+ if (isolate_ != nullptr) result->runtime_value = WasmValue(value);
+}
+
+void InitExprInterface::S128Const(FullDecoder* decoder,
+ Simd128Immediate<validate>& imm,
+ Value* result) {
+ if (isolate_ == nullptr) return;
+ result->runtime_value = WasmValue(imm.value, kWasmS128);
+}
+
+void InitExprInterface::RefNull(FullDecoder* decoder, ValueType type,
+ Value* result) {
+ if (isolate_ == nullptr) return;
+ result->runtime_value = WasmValue(isolate_->factory()->null_value(), type);
+}
+
+void InitExprInterface::RefFunc(FullDecoder* decoder, uint32_t function_index,
+ Value* result) {
+ if (isolate_ != nullptr) {
+ auto function = WasmInstanceObject::GetOrCreateWasmExternalFunction(
+ isolate_, instance_, function_index);
+ result->runtime_value = WasmValue(
+ function, ValueType::Ref(module_->functions[function_index].sig_index,
+ kNonNullable));
+ } else {
+ outer_module_->functions[function_index].declared = true;
+ }
+}
+
+void InitExprInterface::GlobalGet(FullDecoder* decoder, Value* result,
+ const GlobalIndexImmediate<validate>& imm) {
+ if (isolate_ == nullptr) return;
+ const WasmGlobal& global = module_->globals[imm.index];
+ result->runtime_value =
+ global.type.is_numeric()
+ ? WasmValue(GetRawUntaggedGlobalPtr(global), global.type)
+ : WasmValue(handle(tagged_globals_->get(global.offset), isolate_),
+ global.type);
+}
+
+void InitExprInterface::StructNewWithRtt(
+ FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
+ const Value& rtt, const Value args[], Value* result) {
+ if (isolate_ == nullptr) return;
+ std::vector<WasmValue> field_values(imm.struct_type->field_count());
+ for (size_t i = 0; i < field_values.size(); i++) {
+ field_values[i] = args[i].runtime_value;
+ }
+ result->runtime_value =
+ WasmValue(isolate_->factory()->NewWasmStruct(
+ imm.struct_type, field_values.data(),
+ Handle<Map>::cast(rtt.runtime_value.to_ref())),
+ ValueType::Ref(HeapType(imm.index), kNonNullable));
+}
+
+void InitExprInterface::ArrayInit(FullDecoder* decoder,
+ const ArrayIndexImmediate<validate>& imm,
+ const base::Vector<Value>& elements,
+ const Value& rtt, Value* result) {
+ if (isolate_ == nullptr) return;
+ std::vector<WasmValue> element_values;
+ for (Value elem : elements) element_values.push_back(elem.runtime_value);
+ result->runtime_value =
+ WasmValue(isolate_->factory()->NewWasmArray(
+ imm.array_type, element_values,
+ Handle<Map>::cast(rtt.runtime_value.to_ref())),
+ ValueType::Ref(HeapType(imm.index), kNonNullable));
+}
+
+void InitExprInterface::RttCanon(FullDecoder* decoder, uint32_t type_index,
+ Value* result) {
+ if (isolate_ == nullptr) return;
+ result->runtime_value = WasmValue(
+ handle(instance_->managed_object_maps().get(type_index), isolate_),
+ ValueType::Rtt(type_index, 0));
+}
+
+void InitExprInterface::RttSub(FullDecoder* decoder, uint32_t type_index,
+ const Value& parent, Value* result,
+ WasmRttSubMode mode) {
+ if (isolate_ == nullptr) return;
+ ValueType type = parent.type.has_depth()
+ ? ValueType::Rtt(type_index, parent.type.depth() + 1)
+ : ValueType::Rtt(type_index);
+ result->runtime_value =
+ WasmValue(Handle<Object>::cast(AllocateSubRtt(
+ isolate_, instance_, type_index,
+ Handle<Map>::cast(parent.runtime_value.to_ref()), mode)),
+ type);
+}
+
+void InitExprInterface::DoReturn(FullDecoder* decoder,
+ uint32_t /*drop_values*/) {
+ end_found_ = true;
+ // End decoding on "end".
+ decoder->set_end(decoder->pc() + 1);
+ if (isolate_ != nullptr) result_ = decoder->stack_value(1)->runtime_value;
+}
+
+byte* InitExprInterface::GetRawUntaggedGlobalPtr(const WasmGlobal& global) {
+ return reinterpret_cast<byte*>(untagged_globals_->backing_store()) +
+ global.offset;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/init-expr-interface.h b/deps/v8/src/wasm/init-expr-interface.h
new file mode 100644
index 0000000000..535d2286c6
--- /dev/null
+++ b/deps/v8/src/wasm/init-expr-interface.h
@@ -0,0 +1,97 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_INIT_EXPR_INTERFACE_H_
+#define V8_WASM_INIT_EXPR_INTERFACE_H_
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-value.h"
+
+namespace v8 {
+namespace internal {
+
+class WasmInstanceObject;
+class JSArrayBuffer;
+
+namespace wasm {
+
+// An interface for WasmFullDecoder used to decode initializer expressions. This
+// interface has two modes: only validation (when {isolate_ == nullptr}), which
+// is used in module-decoder, and code-generation (when {isolate_ != nullptr}),
+// which is used in module-instantiate. We merge two distinct functionalities
+// in one class to reduce the number of WasmFullDecoder instantiations, and thus
+// V8 binary code size.
+class InitExprInterface {
+ public:
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ static constexpr DecodingMode decoding_mode = kInitExpression;
+
+ struct Value : public ValueBase<validate> {
+ WasmValue runtime_value;
+
+ template <typename... Args>
+ explicit Value(Args&&... args) V8_NOEXCEPT
+ : ValueBase(std::forward<Args>(args)...) {}
+ };
+
+ using Control = ControlBase<Value, validate>;
+ using FullDecoder =
+ WasmFullDecoder<validate, InitExprInterface, decoding_mode>;
+
+ InitExprInterface(const WasmModule* module, Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> tagged_globals,
+ Handle<JSArrayBuffer> untagged_globals)
+ : module_(module),
+ outer_module_(nullptr),
+ isolate_(isolate),
+ instance_(instance),
+ tagged_globals_(tagged_globals),
+ untagged_globals_(untagged_globals) {
+ DCHECK_NOT_NULL(isolate);
+ }
+
+ explicit InitExprInterface(WasmModule* outer_module)
+ : module_(nullptr), outer_module_(outer_module), isolate_(nullptr) {}
+
+#define EMPTY_INTERFACE_FUNCTION(name, ...) \
+ V8_INLINE void name(FullDecoder* decoder, ##__VA_ARGS__) {}
+ INTERFACE_META_FUNCTIONS(EMPTY_INTERFACE_FUNCTION)
+ INTERFACE_NON_CONSTANT_FUNCTIONS(EMPTY_INTERFACE_FUNCTION)
+#undef EMPTY_INTERFACE_FUNCTION
+
+#define DECLARE_INTERFACE_FUNCTION(name, ...) \
+ void name(FullDecoder* decoder, ##__VA_ARGS__);
+ INTERFACE_CONSTANT_FUNCTIONS(DECLARE_INTERFACE_FUNCTION)
+#undef DECLARE_INTERFACE_FUNCTION
+
+ WasmValue result() {
+ DCHECK_NOT_NULL(isolate_);
+ return result_;
+ }
+ bool end_found() { return end_found_; }
+
+ private:
+ byte* GetRawUntaggedGlobalPtr(const WasmGlobal& global);
+
+ bool end_found_ = false;
+ WasmValue result_;
+ const WasmModule* module_;
+ WasmModule* outer_module_;
+ Isolate* isolate_;
+ Handle<WasmInstanceObject> instance_;
+ Handle<FixedArray> tagged_globals_;
+ Handle<JSArrayBuffer> untagged_globals_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_INIT_EXPR_INTERFACE_H_
diff --git a/deps/v8/src/wasm/leb-helper.h b/deps/v8/src/wasm/leb-helper.h
index f3737600df..5327b1e32d 100644
--- a/deps/v8/src/wasm/leb-helper.h
+++ b/deps/v8/src/wasm/leb-helper.h
@@ -81,7 +81,7 @@ class LEBHelper {
// TODO(titzer): move core logic for decoding LEBs from decoder.h to here.
// Compute the size of {val} if emitted as an LEB32.
- static inline size_t sizeof_u32v(size_t val) {
+ static size_t sizeof_u32v(size_t val) {
size_t size = 0;
do {
size++;
@@ -91,7 +91,7 @@ class LEBHelper {
}
// Compute the size of {val} if emitted as an LEB32.
- static inline size_t sizeof_i32v(int32_t val) {
+ static size_t sizeof_i32v(int32_t val) {
size_t size = 1;
if (val >= 0) {
while (val >= 0x40) { // prevent sign extension.
@@ -108,7 +108,7 @@ class LEBHelper {
}
// Compute the size of {val} if emitted as an unsigned LEB64.
- static inline size_t sizeof_u64v(uint64_t val) {
+ static size_t sizeof_u64v(uint64_t val) {
size_t size = 0;
do {
size++;
@@ -118,7 +118,7 @@ class LEBHelper {
}
// Compute the size of {val} if emitted as a signed LEB64.
- static inline size_t sizeof_i64v(int64_t val) {
+ static size_t sizeof_i64v(int64_t val) {
size_t size = 1;
if (val >= 0) {
while (val >= 0x40) { // prevent sign extension.
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index e7d1c5f21b..a26e306b83 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -21,7 +21,7 @@ void LocalDeclEncoder::Prepend(Zone* zone, const byte** start,
byte* buffer = zone->NewArray<byte, LocalDeclEncoderBuffer>(Size() + size);
size_t pos = Emit(buffer);
if (size > 0) {
- base::Memcpy(buffer + pos, *start, size);
+ memcpy(buffer + pos, *start, size);
}
pos += size;
*start = buffer;
diff --git a/deps/v8/src/wasm/memory-protection-key.cc b/deps/v8/src/wasm/memory-protection-key.cc
index e8252cd9ce..441826e707 100644
--- a/deps/v8/src/wasm/memory-protection-key.cc
+++ b/deps/v8/src/wasm/memory-protection-key.cc
@@ -164,9 +164,9 @@ bool SetPermissionsAndMemoryProtectionKey(
}
DISABLE_CFI_ICALL
-bool SetPermissionsForMemoryProtectionKey(
+void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions) {
- if (key == kNoMemoryProtectionKey) return false;
+ CHECK_NE(kNoMemoryProtectionKey, key);
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
typedef int (*pkey_set_t)(int, unsigned int);
@@ -175,11 +175,10 @@ bool SetPermissionsForMemoryProtectionKey(
DCHECK_NOT_NULL(pkey_set);
int ret = pkey_set(key, permissions);
-
- return ret == /* success */ 0;
+ CHECK_EQ(0 /* success */, ret);
#else
- // On platforms without PKU support, we should have already returned because
- // the key must be {kNoMemoryProtectionKey}.
+ // On platforms without PKU support, we should have failed the CHECK above
+ // because the key must be {kNoMemoryProtectionKey}.
UNREACHABLE();
#endif
}
diff --git a/deps/v8/src/wasm/memory-protection-key.h b/deps/v8/src/wasm/memory-protection-key.h
index 9f9a200cdf..c435357567 100644
--- a/deps/v8/src/wasm/memory-protection-key.h
+++ b/deps/v8/src/wasm/memory-protection-key.h
@@ -77,10 +77,9 @@ bool SetPermissionsAndMemoryProtectionKey(
PageAllocator* page_allocator, base::AddressRegion region,
PageAllocator::Permission page_permissions, int key);
-// Set the key's permissions and return whether this was successful.
-// Returns false on platforms without PKU support or when the operation failed,
-// e.g., because the key was invalid.
-bool SetPermissionsForMemoryProtectionKey(
+// Set the key's permissions. {key} must be valid, i.e. not
+// {kNoMemoryProtectionKey}.
+void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions);
} // namespace wasm
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 68310a03f3..ecf344add2 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -7,8 +7,8 @@
#include <cinttypes>
#include "src/base/memory.h"
-#include "src/utils/utils.h"
-#include "src/utils/vector.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
namespace v8 {
namespace internal {
@@ -17,15 +17,15 @@ namespace wasm {
void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
const MemoryTracingInfo* info, int func_index,
int position, uint8_t* mem_start) {
- EmbeddedVector<char, 91> value;
+ base::EmbeddedVector<char, 91> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
Address address = reinterpret_cast<Address>(mem_start) + info->offset;
switch (mem_rep) {
-#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
- case MachineRepresentation::rep: \
- SNPrintF(value, str ":" format, \
- base::ReadLittleEndianValue<ctype1>(address), \
- base::ReadLittleEndianValue<ctype2>(address)); \
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ base::SNPrintF(value, str ":" format, \
+ base::ReadLittleEndianValue<ctype1>(address), \
+ base::ReadLittleEndianValue<ctype2>(address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 4742a85070..2b3ad8fd8b 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -24,6 +24,7 @@
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/identity-map.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -56,8 +57,6 @@ namespace wasm {
namespace {
-enum class CompileMode : uint8_t { kRegular, kTiering };
-
enum class CompileStrategy : uint8_t {
// Compiles functions on first use. In this case, execution will block until
// the function's baseline is reached and top tier compilation starts in
@@ -79,6 +78,7 @@ enum class CompileStrategy : uint8_t {
};
class CompilationStateImpl;
+class CompilationUnitBuilder;
class V8_NODISCARD BackgroundCompileScope {
public:
@@ -186,8 +186,8 @@ class CompilationUnitQueues {
return {};
}
- void AddUnits(Vector<WasmCompilationUnit> baseline_units,
- Vector<WasmCompilationUnit> top_tier_units,
+ void AddUnits(base::Vector<WasmCompilationUnit> baseline_units,
+ base::Vector<WasmCompilationUnit> top_tier_units,
const WasmModule* module) {
DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
// Add to the individual queues in a round-robin fashion. No special care is
@@ -209,7 +209,7 @@ class CompilationUnitQueues {
for (auto pair : {std::make_pair(int{kBaseline}, baseline_units),
std::make_pair(int{kTopTier}, top_tier_units)}) {
int tier = pair.first;
- Vector<WasmCompilationUnit> units = pair.second;
+ base::Vector<WasmCompilationUnit> units = pair.second;
if (units.empty()) continue;
num_units_[tier].fetch_add(units.size(), std::memory_order_relaxed);
for (WasmCompilationUnit unit : units) {
@@ -535,7 +535,7 @@ class CompilationStateImpl {
// Call right after the constructor, after the {compilation_state_} field in
// the {NativeModule} has been initialized.
- void InitCompileJob(WasmEngine*);
+ void InitCompileJob();
// {kCancelUnconditionally}: Cancel all compilation.
// {kCancelInitialCompilation}: Cancel all compilation if initial (baseline)
@@ -546,8 +546,8 @@ class CompilationStateImpl {
bool cancelled() const;
// Initialize compilation progress. Set compilation tiers to expect for
- // baseline and top tier compilation. Must be set before {AddCompilationUnits}
- // is invoked which triggers background compilation.
+ // baseline and top tier compilation. Must be set before
+ // {CommitCompilationUnits} is invoked which triggers background compilation.
void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers,
int num_export_wrappers);
@@ -555,6 +555,16 @@ class CompilationStateImpl {
// for recompilation (e.g. for tier down) to work later.
void InitializeCompilationProgressAfterDeserialization();
+ // Initializes compilation units based on the information encoded in the
+ // {compilation_progress_}.
+ void InitializeCompilationUnits(
+ std::unique_ptr<CompilationUnitBuilder> builder);
+
+ // Adds compilation units for another function to the
+ // {CompilationUnitBuilder}. This function is the streaming compilation
+ // equivalent to {InitializeCompilationUnits}.
+ void AddCompilationUnit(CompilationUnitBuilder* builder, int func_index);
+
// Initialize recompilation of the whole module: Setup compilation progress
// for recompilation and add the respective compilation units. The callback is
// called immediately if no recompilation is needed, or called later
@@ -564,17 +574,17 @@ class CompilationStateImpl {
CompilationState::callback_t recompilation_finished_callback);
// Add the callback function to be called on compilation events. Needs to be
- // set before {AddCompilationUnits} is run to ensure that it receives all
+ // set before {CommitCompilationUnits} is run to ensure that it receives all
// events. The callback object must support being deleted from any thread.
void AddCallback(CompilationState::callback_t);
// Inserts new functions to compile and kicks off compilation.
- void AddCompilationUnits(
- Vector<WasmCompilationUnit> baseline_units,
- Vector<WasmCompilationUnit> top_tier_units,
- Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
+ void CommitCompilationUnits(
+ base::Vector<WasmCompilationUnit> baseline_units,
+ base::Vector<WasmCompilationUnit> top_tier_units,
+ base::Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units);
- void AddTopTierCompilationUnit(WasmCompilationUnit);
+ void CommitTopTierCompilationUnit(WasmCompilationUnit);
void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t);
CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
@@ -587,7 +597,7 @@ class CompilationStateImpl {
void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray>* export_wrappers_out);
- void OnFinishedUnits(Vector<WasmCode*>);
+ void OnFinishedUnits(base::Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
void OnCompilationStopped(WasmFeatures detected);
@@ -626,13 +636,12 @@ class CompilationStateImpl {
return outstanding_recompilation_functions_ == 0;
}
- CompileMode compile_mode() const { return compile_mode_; }
Counters* counters() const { return async_counters_.get(); }
void SetWireBytesStorage(
std::shared_ptr<WireBytesStorage> wire_bytes_storage) {
base::MutexGuard guard(&mutex_);
- wire_bytes_storage_ = wire_bytes_storage;
+ wire_bytes_storage_ = std::move(wire_bytes_storage);
}
std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const {
@@ -651,6 +660,10 @@ class CompilationStateImpl {
}
private:
+ void AddCompilationUnitInternal(CompilationUnitBuilder* builder,
+ int function_index,
+ uint8_t function_progress);
+
// Trigger callbacks according to the internal counters below
// (outstanding_...), plus the given events.
// Hold the {callbacks_mutex_} when calling this method.
@@ -658,11 +671,10 @@ class CompilationStateImpl {
void PublishCompilationResults(
std::vector<std::unique_ptr<WasmCode>> unpublished_code);
- void PublishCode(Vector<std::unique_ptr<WasmCode>> codes);
+ void PublishCode(base::Vector<std::unique_ptr<WasmCode>> codes);
NativeModule* const native_module_;
std::weak_ptr<NativeModule> const native_module_weak_;
- const CompileMode compile_mode_;
const std::shared_ptr<Counters> async_counters_;
// Compilation error, atomically updated. This flag can be updated and read
@@ -779,17 +791,12 @@ void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
} // namespace
-// static
-constexpr uint32_t CompilationEnv::kMaxMemoryPagesAtRuntime;
-
//////////////////////////////////////////////////////
// PIMPL implementation of {CompilationState}.
CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
-void CompilationState::InitCompileJob(WasmEngine* engine) {
- Impl(this)->InitCompileJob(engine);
-}
+void CompilationState::InitCompileJob() { Impl(this)->InitCompileJob(); }
void CompilationState::CancelCompilation() {
Impl(this)->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
@@ -911,42 +918,38 @@ struct ExecutionTierPair {
};
ExecutionTierPair GetRequestedExecutionTiers(
- const WasmModule* module, CompileMode compile_mode,
- const WasmFeatures& enabled_features, uint32_t func_index) {
+ const WasmModule* module, const WasmFeatures& enabled_features,
+ uint32_t func_index) {
ExecutionTierPair result;
result.baseline_tier = WasmCompilationUnit::GetBaselineExecutionTier(module);
- switch (compile_mode) {
- case CompileMode::kRegular:
- result.top_tier = result.baseline_tier;
- return result;
-
- case CompileMode::kTiering:
-
- // Default tiering behaviour.
- result.top_tier = ExecutionTier::kTurbofan;
-
- // Check if compilation hints override default tiering behaviour.
- if (enabled_features.has_compilation_hints()) {
- const WasmCompilationHint* hint =
- GetCompilationHint(module, func_index);
- if (hint != nullptr) {
- result.baseline_tier = ApplyHintToExecutionTier(hint->baseline_tier,
- result.baseline_tier);
- result.top_tier =
- ApplyHintToExecutionTier(hint->top_tier, result.top_tier);
- }
- }
- // Correct top tier if necessary.
- static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
- "Assume an order on execution tiers");
- if (result.baseline_tier > result.top_tier) {
- result.top_tier = result.baseline_tier;
- }
- return result;
+ if (module->origin != kWasmOrigin || !FLAG_wasm_tier_up) {
+ result.top_tier = result.baseline_tier;
+ return result;
}
- UNREACHABLE();
+
+ // Default tiering behaviour.
+ result.top_tier = ExecutionTier::kTurbofan;
+
+ // Check if compilation hints override default tiering behaviour.
+ if (enabled_features.has_compilation_hints()) {
+ const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
+ if (hint != nullptr) {
+ result.baseline_tier =
+ ApplyHintToExecutionTier(hint->baseline_tier, result.baseline_tier);
+ result.top_tier =
+ ApplyHintToExecutionTier(hint->top_tier, result.top_tier);
+ }
+ }
+
+ // Correct top tier if necessary.
+ static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+ if (result.baseline_tier > result.top_tier) {
+ result.top_tier = result.baseline_tier;
+ }
+ return result;
}
// The {CompilationUnitBuilder} builds compilation units and stores them in an
@@ -964,8 +967,8 @@ class CompilationUnitBuilder {
return;
}
ExecutionTierPair tiers = GetRequestedExecutionTiers(
- native_module_->module(), compilation_state()->compile_mode(),
- native_module_->enabled_features(), func_index);
+ native_module_->module(), native_module_->enabled_features(),
+ func_index);
// Compile everything for non-debugging initially. If needed, we will tier
// down when the module is fully compiled. Synchronization would be pretty
// difficult otherwise.
@@ -980,21 +983,17 @@ class CompilationUnitBuilder {
js_to_wasm_wrapper_units_.emplace_back(std::move(unit));
}
- void AddTopTierUnit(int func_index) {
- ExecutionTierPair tiers = GetRequestedExecutionTiers(
- native_module_->module(), compilation_state()->compile_mode(),
- native_module_->enabled_features(), func_index);
- // In this case, the baseline is lazily compiled, if at all. The compilation
- // unit is added even if the baseline tier is the same.
-#ifdef DEBUG
- auto* module = native_module_->module();
- DCHECK_EQ(kWasmOrigin, module->origin);
- const bool lazy_module = false;
- DCHECK_EQ(CompileStrategy::kLazyBaselineEagerTopTier,
- GetCompileStrategy(module, native_module_->enabled_features(),
- func_index, lazy_module));
-#endif
- tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging);
+ void AddBaselineUnit(int func_index, ExecutionTier tier) {
+ baseline_units_.emplace_back(func_index, tier, kNoDebugging);
+ }
+
+ void AddTopTierUnit(int func_index, ExecutionTier tier) {
+ tiering_units_.emplace_back(func_index, tier, kNoDebugging);
+ }
+
+ void AddDebugUnit(int func_index) {
+ baseline_units_.emplace_back(func_index, ExecutionTier::kLiftoff,
+ kForDebugging);
}
void AddRecompilationUnit(int func_index, ExecutionTier tier) {
@@ -1009,9 +1008,9 @@ class CompilationUnitBuilder {
js_to_wasm_wrapper_units_.empty()) {
return false;
}
- compilation_state()->AddCompilationUnits(
- VectorOf(baseline_units_), VectorOf(tiering_units_),
- VectorOf(js_to_wasm_wrapper_units_));
+ compilation_state()->CommitCompilationUnits(
+ base::VectorOf(baseline_units_), base::VectorOf(tiering_units_),
+ base::VectorOf(js_to_wasm_wrapper_units_));
Clear();
return true;
}
@@ -1052,7 +1051,7 @@ void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes,
}
DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
- Vector<const uint8_t> code,
+ base::Vector<const uint8_t> code,
Counters* counters,
AccountingAllocator* allocator,
WasmFeatures enabled_features) {
@@ -1090,7 +1089,7 @@ void ValidateSequentially(
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
const WasmFunction* func = &module->functions[func_index];
- Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
+ base::Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
DecodeResult result = ValidateSingleFunction(
module, func_index, code, counters, allocator, enabled_features);
if (result.failed()) {
@@ -1113,26 +1112,44 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
auto enabled_features = native_module->enabled_features();
Counters* counters = isolate->counters();
+ // Put the timer scope around everything, including the {CodeSpaceWriteScope}
+ // and its destruction, to measure complete overhead (apart from the runtime
+ // function itself, which has constant overhead).
+ base::Optional<TimedHistogramScope> lazy_compile_time_scope;
+ if (base::TimeTicks::IsHighResolution()) {
+ lazy_compile_time_scope.emplace(counters->wasm_lazy_compile_time());
+ }
+
DCHECK(!native_module->lazy_compile_frozen());
- NativeModuleModificationScope native_module_modification_scope(native_module);
+ CodeSpaceWriteScope code_space_write_scope(native_module);
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
+ base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported()
+ ? base::ThreadTicks::Now()
+ : base::ThreadTicks();
+
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
- ExecutionTierPair tiers = GetRequestedExecutionTiers(
- module, compilation_state->compile_mode(), enabled_features, func_index);
+ ExecutionTierPair tiers =
+ GetRequestedExecutionTiers(module, enabled_features, func_index);
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier,
kNoDebugging};
CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmEngine* engine = GetWasmEngine();
WasmFeatures detected_features;
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
- isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(),
- counters, &detected_features);
+ &env, compilation_state->GetWireBytesStorage().get(), counters,
+ &detected_features);
compilation_state->OnCompilationStopped(detected_features);
+ if (!thread_ticks.IsNull()) {
+ native_module->UpdateCPUDuration(
+ (base::ThreadTicks::Now() - thread_ticks).InMicroseconds(),
+ tiers.baseline_tier);
+ }
// During lazy compilation, we can only get compilation errors when
// {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
@@ -1141,11 +1158,11 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
const WasmFunction* func = &module->functions[func_index];
if (result.failed()) {
ErrorThrower thrower(isolate, nullptr);
- Vector<const uint8_t> code =
+ base::Vector<const uint8_t> code =
compilation_state->GetWireBytesStorage()->GetCode(func->code);
- DecodeResult decode_result = ValidateSingleFunction(
- module, func_index, code, counters, isolate->wasm_engine()->allocator(),
- enabled_features);
+ DecodeResult decode_result =
+ ValidateSingleFunction(module, func_index, code, counters,
+ engine->allocator(), enabled_features);
CHECK(decode_result.failed());
SetCompileError(&thrower, ModuleWireBytes(native_module->wire_bytes()),
func, module, decode_result.error());
@@ -1173,7 +1190,7 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
CompileStrategy::kLazy &&
tiers.baseline_tier < tiers.top_tier) {
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
- compilation_state->AddTopTierCompilationUnit(tiering_unit);
+ compilation_state->CommitTopTierCompilationUnit(tiering_unit);
}
return true;
@@ -1220,8 +1237,7 @@ CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
if (!wrapper_unit) return kNoMoreUnits;
isolate = wrapper_unit->isolate();
wrapper_compilation_token =
- compile_scope.native_module()->engine()->StartWrapperCompilation(
- isolate);
+ wasm::GetWasmEngine()->StartWrapperCompilation(isolate);
if (!wrapper_compilation_token) return kNoMoreUnits;
}
@@ -1279,7 +1295,6 @@ CompilationExecutionResult ExecuteCompilationUnits(
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
- WasmEngine* engine;
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
@@ -1293,12 +1308,15 @@ CompilationExecutionResult ExecuteCompilationUnits(
WasmFeatures detected_features = WasmFeatures::None();
+ base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported()
+ ? base::ThreadTicks::Now()
+ : base::ThreadTicks();
+
// Preparation (synchronized): Initialize the fields above and get the first
// compilation unit.
{
BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kYield;
- engine = compile_scope.native_module()->engine();
env.emplace(compile_scope.native_module()->CreateCompilationEnv());
wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
@@ -1317,7 +1335,7 @@ CompilationExecutionResult ExecuteCompilationUnits(
while (unit->tier() == current_tier) {
// (asynchronous): Execute the compilation.
WasmCompilationResult result = unit->ExecuteCompilation(
- engine, &env.value(), wire_bytes, counters, &detected_features);
+ &env.value(), wire_bytes.get(), counters, &detected_features);
results_to_publish.emplace_back(std::move(result));
bool yield = delegate && delegate->ShouldYield();
@@ -1331,13 +1349,22 @@ CompilationExecutionResult ExecuteCompilationUnits(
return kNoMoreUnits;
}
+ if (!unit->for_debugging() && result.result_tier != current_tier) {
+ compile_scope.native_module()->AddLiftoffBailout();
+ }
+
// Yield or get next unit.
if (yield ||
!(unit = compile_scope.compilation_state()->GetNextCompilationUnit(
queue, baseline_only))) {
+ if (!thread_ticks.IsNull()) {
+ compile_scope.native_module()->UpdateCPUDuration(
+ (base::ThreadTicks::Now() - thread_ticks).InMicroseconds(),
+ current_tier);
+ }
std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope.native_module()->AddCompiledCode(
- VectorOf(std::move(results_to_publish)));
+ base::VectorOf(std::move(results_to_publish)));
results_to_publish.clear();
compile_scope.compilation_state()->SchedulePublishCompilationResults(
std::move(unpublished_code));
@@ -1346,17 +1373,25 @@ CompilationExecutionResult ExecuteCompilationUnits(
return yield ? kYield : kNoMoreUnits;
}
- // Before executing a TurboFan unit, ensure to publish all previous
- // units. If we compiled Liftoff before, we need to publish them anyway
- // to ensure fast completion of baseline compilation, if we compiled
- // TurboFan before, we publish to reduce peak memory consumption.
- // Also publish after finishing a certain amount of units, to avoid
- // contention when all threads publish at the end.
- if (unit->tier() == ExecutionTier::kTurbofan ||
- queue->ShouldPublish(static_cast<int>(results_to_publish.size()))) {
+ // Publish after finishing a certain amount of units, to avoid contention
+ // when all threads publish at the end.
+ bool batch_full =
+ queue->ShouldPublish(static_cast<int>(results_to_publish.size()));
+ // Also publish each time the compilation tier changes from Liftoff to
+ // TurboFan, such that we immediately publish the baseline compilation
+ // results to start execution, and do not wait for a batch to fill up.
+ bool liftoff_finished = unit->tier() != current_tier &&
+ unit->tier() == ExecutionTier::kTurbofan;
+ if (batch_full || liftoff_finished) {
+ if (!thread_ticks.IsNull()) {
+ base::ThreadTicks thread_ticks_now = base::ThreadTicks::Now();
+ compile_scope.native_module()->UpdateCPUDuration(
+ (thread_ticks_now - thread_ticks).InMicroseconds(), current_tier);
+ thread_ticks = thread_ticks_now;
+ }
std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope.native_module()->AddCompiledCode(
- VectorOf(std::move(results_to_publish)));
+ base::VectorOf(std::move(results_to_publish)));
results_to_publish.clear();
compile_scope.compilation_state()->SchedulePublishCompilationResults(
std::move(unpublished_code));
@@ -1369,10 +1404,8 @@ CompilationExecutionResult ExecuteCompilationUnits(
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
// Returns the number of units added.
-int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
- NativeModule* native_module,
- CompilationUnitBuilder* builder,
- const WasmFeatures& enabled_features) {
+int AddExportWrapperUnits(Isolate* isolate, NativeModule* native_module,
+ CompilationUnitBuilder* builder) {
std::unordered_set<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>> keys;
for (auto exp : native_module->module()->export_table) {
if (exp.kind != kExternalFunction) continue;
@@ -1380,8 +1413,8 @@ int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
JSToWasmWrapperKey key(function.imported, *function.sig);
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
- isolate, wasm_engine, function.sig, native_module->module(),
- function.imported, enabled_features,
+ isolate, function.sig, native_module->module(), function.imported,
+ native_module->enabled_features(),
JSToWasmWrapperCompilationUnit::kAllowGeneric);
builder->AddJSToWasmWrapperUnit(std::move(unit));
}
@@ -1417,41 +1450,41 @@ int AddImportWrapperUnits(NativeModule* native_module,
return static_cast<int>(keys.size());
}
-void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
- CompilationStateImpl* compilation_state =
- Impl(native_module->compilation_state());
+void InitializeLazyCompilation(NativeModule* native_module) {
const bool lazy_module = IsLazyModule(native_module->module());
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
- CompilationUnitBuilder builder(native_module);
auto* module = native_module->module();
- const bool prefer_liftoff = native_module->IsTieredDown();
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
+ base::Optional<CodeSpaceWriteScope> lazy_code_space_write_scope;
for (uint32_t func_index = start; func_index < end; func_index++) {
- if (prefer_liftoff) {
- builder.AddRecompilationUnit(func_index, ExecutionTier::kLiftoff);
- continue;
- }
CompileStrategy strategy = GetCompileStrategy(
module, native_module->enabled_features(), func_index, lazy_module);
- if (strategy == CompileStrategy::kLazy) {
- native_module->UseLazyStub(func_index);
- } else if (strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
- builder.AddTopTierUnit(func_index);
+ if (strategy == CompileStrategy::kLazy ||
+ strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
+ // Open a single scope for all following calls to {UseLazyStub()}, instead
+ // of flipping page permissions for each {func_index} individually.
+ if (!lazy_code_space_write_scope.has_value()) {
+ lazy_code_space_write_scope.emplace(native_module);
+ }
native_module->UseLazyStub(func_index);
- } else {
- DCHECK_EQ(strategy, CompileStrategy::kEager);
- builder.AddUnits(func_index);
}
}
- int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
+}
+
+std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
+ Isolate* isolate, NativeModule* native_module) {
+ InitializeLazyCompilation(native_module);
+ CompilationStateImpl* compilation_state =
+ Impl(native_module->compilation_state());
+ const bool lazy_module = IsLazyModule(native_module->module());
+ auto builder = std::make_unique<CompilationUnitBuilder>(native_module);
+ int num_import_wrappers = AddImportWrapperUnits(native_module, builder.get());
int num_export_wrappers =
- AddExportWrapperUnits(isolate, isolate->wasm_engine(), native_module,
- &builder, WasmFeatures::FromIsolate(isolate));
+ AddExportWrapperUnits(isolate, native_module, builder.get());
compilation_state->InitializeCompilationProgress(
lazy_module, num_import_wrappers, num_export_wrappers);
- builder.Commit();
+ return builder;
}
bool MayCompriseLazyFunctions(const WasmModule* module,
@@ -1501,7 +1534,6 @@ class CompilationTimeCallback {
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
}
- // TODO(sartang@microsoft.com): Remove wall_clock_time_in_us field
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -1511,8 +1543,9 @@ class CompilationTimeCallback {
true, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_duration_in_us
- };
+ duration.InMicroseconds(), // wall_clock_duration_in_us
+ static_cast<int64_t>( // cpu_time_duration_in_us
+ native_module->baseline_compilation_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
if (event == CompilationEvent::kFinishedTopTierCompilation) {
@@ -1522,8 +1555,9 @@ class CompilationTimeCallback {
v8::metrics::WasmModuleTieredUp event{
FLAG_wasm_lazy_compilation, // lazy
native_module->turbofan_code_size(), // code_size_in_bytes
- duration.InMicroseconds() // wall_clock_duration_in_us
- };
+ duration.InMicroseconds(), // wall_clock_duration_in_us
+ static_cast<int64_t>( // cpu_time_duration_in_us
+ native_module->tier_up_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
if (event == CompilationEvent::kFailedCompilation) {
@@ -1536,8 +1570,9 @@ class CompilationTimeCallback {
false, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_duration_in_us
- };
+ duration.InMicroseconds(), // wall_clock_duration_in_us
+ static_cast<int64_t>( // cpu_time_duration_in_us
+ native_module->baseline_compilation_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
}
@@ -1583,7 +1618,9 @@ void CompileNativeModule(Isolate* isolate,
}
// Initialize the compilation units and kick off background compile tasks.
- InitializeCompilationUnits(isolate, native_module.get());
+ std::unique_ptr<CompilationUnitBuilder> builder =
+ InitializeCompilation(isolate, native_module.get());
+ compilation_state->InitializeCompilationUnits(std::move(builder));
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedExportWrappers);
@@ -1622,10 +1659,9 @@ void CompileNativeModule(Isolate* isolate,
class BackgroundCompileJob final : public JobTask {
public:
explicit BackgroundCompileJob(std::weak_ptr<NativeModule> native_module,
- WasmEngine* engine,
std::shared_ptr<Counters> async_counters)
: native_module_(std::move(native_module)),
- engine_barrier_(engine->GetBarrierForBackgroundCompile()),
+ engine_barrier_(GetWasmEngine()->GetBarrierForBackgroundCompile()),
async_counters_(std::move(async_counters)) {}
void Run(JobDelegate* delegate) override {
@@ -1659,15 +1695,15 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out, int compilation_id) {
const WasmModule* wasm_module = module.get();
- OwnedVector<uint8_t> wire_bytes_copy =
- OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
+ WasmEngine* engine = GetWasmEngine();
+ base::OwnedVector<uint8_t> wire_bytes_copy =
+ base::OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
// Prefer {wire_bytes_copy} to {wire_bytes.module_bytes()} for the temporary
// cache key. When we eventually install the module in the cache, the wire
// bytes of the temporary key and the new key have the same base pointer and
// we can skip the full bytes comparison.
- std::shared_ptr<NativeModule> native_module =
- isolate->wasm_engine()->MaybeGetNativeModule(
- wasm_module->origin, wire_bytes_copy.as_vector(), isolate);
+ std::shared_ptr<NativeModule> native_module = engine->MaybeGetNativeModule(
+ wasm_module->origin, wire_bytes_copy.as_vector(), isolate);
if (native_module) {
// TODO(thibaudm): Look into sharing export wrappers.
CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out);
@@ -1687,8 +1723,8 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
size_t code_size_estimate =
wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get(),
uses_liftoff);
- native_module = isolate->wasm_engine()->NewNativeModule(
- isolate, enabled, module, code_size_estimate);
+ native_module =
+ engine->NewNativeModule(isolate, enabled, module, code_size_estimate);
native_module->SetWireBytes(std::move(wire_bytes_copy));
native_module->compilation_state()->set_compilation_id(compilation_id);
// Sync compilation is user blocking, so we increase the priority.
@@ -1698,8 +1734,8 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
isolate->GetOrRegisterRecorderContextId(isolate->native_context());
CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module,
export_wrappers_out);
- bool cache_hit = !isolate->wasm_engine()->UpdateNativeModuleCache(
- thrower->error(), &native_module, isolate);
+ bool cache_hit = !engine->UpdateNativeModuleCache(thrower->error(),
+ &native_module, isolate);
if (thrower->error()) return {};
if (cache_hit) {
@@ -1708,7 +1744,7 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
}
// Ensure that the code objects are logged before returning.
- isolate->wasm_engine()->LogOutstandingCodesForIsolate(isolate);
+ engine->LogOutstandingCodesForIsolate(isolate);
return native_module;
}
@@ -1775,7 +1811,7 @@ void AsyncCompileJob::Start() {
void AsyncCompileJob::Abort() {
// Removing this job will trigger the destructor, which will cancel all
// compilation.
- isolate_->wasm_engine()->RemoveCompileJob(this);
+ GetWasmEngine()->RemoveCompileJob(this);
}
class AsyncStreamingProcessor final : public StreamingProcessor {
@@ -1786,10 +1822,11 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
~AsyncStreamingProcessor() override;
- bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ bool ProcessModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) override;
- bool ProcessSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ bool ProcessSection(SectionCode section_code,
+ base::Vector<const uint8_t> bytes,
uint32_t offset) override;
bool ProcessCodeSectionHeader(int num_functions,
@@ -1798,19 +1835,19 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
int code_section_start,
int code_section_length) override;
- bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ bool ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t offset) override;
void OnFinishedChunk() override;
- void OnFinishedStream(OwnedVector<uint8_t> bytes) override;
+ void OnFinishedStream(base::OwnedVector<uint8_t> bytes) override;
void OnError(const WasmError&) override;
void OnAbort() override;
- bool Deserialize(Vector<const uint8_t> wire_bytes,
- Vector<const uint8_t> module_bytes) override;
+ bool Deserialize(base::Vector<const uint8_t> wire_bytes,
+ base::Vector<const uint8_t> module_bytes) override;
private:
// Finishes the AsyncCompileJob with an error.
@@ -1820,7 +1857,6 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
ModuleDecoder decoder_;
AsyncCompileJob* job_;
- WasmEngine* wasm_engine_;
std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
int num_functions_ = 0;
bool prefix_cache_hit_ = false;
@@ -1876,7 +1912,7 @@ void AsyncCompileJob::CreateNativeModule(
// Create the module object and populate with compiled functions and
// information needed at instantiation time.
- native_module_ = isolate_->wasm_engine()->NewNativeModule(
+ native_module_ = GetWasmEngine()->NewNativeModule(
isolate_, enabled_features_, std::move(module), code_size_estimate);
native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
native_module_->compilation_state()->set_compilation_id(compilation_id_);
@@ -1884,7 +1920,7 @@ void AsyncCompileJob::CreateNativeModule(
bool AsyncCompileJob::GetOrCreateNativeModule(
std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
- native_module_ = isolate_->wasm_engine()->MaybeGetNativeModule(
+ native_module_ = GetWasmEngine()->MaybeGetNativeModule(
module->origin, wire_bytes_.module_bytes(), isolate_);
if (native_module_ == nullptr) {
CreateNativeModule(std::move(module), code_size_estimate);
@@ -1897,9 +1933,9 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// Create heap objects for script and module bytes to be stored in the
// module object. Asm.js is not compiled asynchronously.
DCHECK(module_object_.is_null());
- auto source_url = stream_ ? stream_->url() : Vector<const char>();
- auto script = isolate_->wasm_engine()->GetOrCreateScript(
- isolate_, native_module_, source_url);
+ auto source_url = stream_ ? stream_->url() : base::Vector<const char>();
+ auto script =
+ GetWasmEngine()->GetOrCreateScript(isolate_, native_module_, source_url);
Handle<WasmModuleObject> module_object =
WasmModuleObject::New(isolate_, native_module_, script);
@@ -1935,10 +1971,11 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
is_after_deserialization, // deserialized
wasm_lazy_compilation_, // lazy
!compilation_state->failed(), // success
- native_module_->liftoff_code_size(), // code_size_in_bytes
+ native_module_->turbofan_code_size(), // code_size_in_bytes
native_module_->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_duration_in_us
- };
+ duration.InMicroseconds(), // wall_clock_duration_in_us
+ static_cast<int64_t>( // cpu_time_duration_in_us
+ native_module_->baseline_compilation_cpu_duration())};
isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_);
}
}
@@ -1996,7 +2033,7 @@ void AsyncCompileJob::DecodeFailed(const WasmError& error) {
thrower.CompileFailed(error);
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
- isolate_->wasm_engine()->RemoveCompileJob(this);
+ GetWasmEngine()->RemoveCompileJob(this);
resolver_->OnCompilationFailed(thrower.Reify());
}
@@ -2010,7 +2047,7 @@ void AsyncCompileJob::AsyncCompileFailed() {
DCHECK(thrower.error());
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
- isolate_->wasm_engine()->RemoveCompileJob(this);
+ GetWasmEngine()->RemoveCompileJob(this);
resolver_->OnCompilationFailed(thrower.Reify());
}
@@ -2044,9 +2081,8 @@ class AsyncCompileJob::CompilationStateCallback {
// If we get a conflicting module, wait until we are back in the
// main thread to update {job_->native_module_} to avoid a data race.
std::shared_ptr<NativeModule> native_module = job_->native_module_;
- bool cache_hit =
- !job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
- false, &native_module, job_->isolate_);
+ bool cache_hit = !GetWasmEngine()->UpdateNativeModuleCache(
+ false, &native_module, job_->isolate_);
DCHECK_EQ(cache_hit, native_module != job_->native_module_);
job_->DoSync<CompileFinished>(cache_hit ? std::move(native_module)
: nullptr);
@@ -2064,8 +2100,8 @@ class AsyncCompileJob::CompilationStateCallback {
// Don't update {job_->native_module_} to avoid data races with other
// compilation threads. Use a copy of the shared pointer instead.
std::shared_ptr<NativeModule> native_module = job_->native_module_;
- job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
- true, &native_module, job_->isolate_);
+ GetWasmEngine()->UpdateNativeModuleCache(true, &native_module,
+ job_->isolate_);
job_->DoSync<CompileFailed>();
}
break;
@@ -2232,7 +2268,7 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
result = DecodeWasmModule(
enabled_features, job->wire_bytes_.start(), job->wire_bytes_.end(),
false, kWasmOrigin, counters_, metrics_recorder_, job->context_id(),
- DecodingMethod::kAsync, job->isolate()->wasm_engine()->allocator());
+ DecodingMethod::kAsync, GetWasmEngine()->allocator());
// Validate lazy functions here if requested.
if (!FLAG_wasm_lazy_validation && result.ok()) {
@@ -2240,13 +2276,13 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
DCHECK_EQ(module->origin, kWasmOrigin);
const bool lazy_module = job->wasm_lazy_compilation_;
if (MayCompriseLazyFunctions(module, enabled_features, lazy_module)) {
- auto allocator = job->isolate()->wasm_engine()->allocator();
+ auto allocator = GetWasmEngine()->allocator();
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
for (int func_index = start; func_index < end; func_index++) {
const WasmFunction* func = &module->functions[func_index];
- Vector<const uint8_t> code =
+ base::Vector<const uint8_t> code =
job->wire_bytes_.GetFunctionBytes(func);
CompileStrategy strategy = GetCompileStrategy(
@@ -2350,13 +2386,9 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
if (start_compilation_) {
- // TODO(ahaas): Try to remove the {start_compilation_} check when
- // streaming decoding is done in the background. If
- // InitializeCompilationUnits always returns 0 for streaming compilation,
- // then DoAsync would do the same as NextStep already.
-
- // Add compilation units and kick off compilation.
- InitializeCompilationUnits(job->isolate(), job->native_module_.get());
+ std::unique_ptr<CompilationUnitBuilder> builder =
+ InitializeCompilation(job->isolate(), job->native_module_.get());
+ compilation_state->InitializeCompilationUnits(std::move(builder));
// We are in single-threaded mode, so there are no worker tasks that will
// do the compilation. We call {WaitForCompilationEvent} here so that the
// main thread paticipates and finishes the compilation.
@@ -2392,8 +2424,7 @@ class SampleTopTierCodeSizeCallback {
void operator()(CompilationEvent event) {
if (event != CompilationEvent::kFinishedTopTierCompilation) return;
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
- native_module->engine()->SampleTopTierCodeSizeInAllIsolates(
- native_module);
+ GetWasmEngine()->SampleTopTierCodeSizeInAllIsolates(native_module);
}
}
@@ -2435,7 +2466,7 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
void AsyncCompileJob::FinishModule() {
TRACE_COMPILE("(4) Finish module...\n");
AsyncCompileSucceeded(module_object_);
- isolate_->wasm_engine()->RemoveCompileJob(this);
+ GetWasmEngine()->RemoveCompileJob(this);
}
AsyncStreamingProcessor::AsyncStreamingProcessor(
@@ -2443,7 +2474,6 @@ AsyncStreamingProcessor::AsyncStreamingProcessor(
AccountingAllocator* allocator)
: decoder_(job->enabled_features_),
job_(job),
- wasm_engine_(job_->isolate_->wasm_engine()),
compilation_unit_builder_(nullptr),
async_counters_(async_counters),
allocator_(allocator) {}
@@ -2451,7 +2481,7 @@ AsyncStreamingProcessor::AsyncStreamingProcessor(
AsyncStreamingProcessor::~AsyncStreamingProcessor() {
if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
// Clean up the temporary cache entry.
- job_->isolate_->wasm_engine()->StreamingCompilationFailed(prefix_hash_);
+ GetWasmEngine()->StreamingCompilationFailed(prefix_hash_);
}
}
@@ -2491,12 +2521,12 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
}
// Process the module header.
-bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
- uint32_t offset) {
+bool AsyncStreamingProcessor::ProcessModuleHeader(
+ base::Vector<const uint8_t> bytes, uint32_t offset) {
TRACE_STREAMING("Process module header...\n");
- decoder_.StartDecoding(
- job_->isolate()->counters(), job_->isolate()->metrics_recorder(),
- job_->context_id(), job_->isolate()->wasm_engine()->allocator());
+ decoder_.StartDecoding(job_->isolate()->counters(),
+ job_->isolate()->metrics_recorder(),
+ job_->context_id(), GetWasmEngine()->allocator());
decoder_.DecodeModuleHeader(bytes, offset);
if (!decoder_.ok()) {
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
@@ -2508,7 +2538,7 @@ bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
// Process all sections except for the code section.
bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
- Vector<const uint8_t> bytes,
+ base::Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process section %d ...\n", section_code);
if (compilation_unit_builder_) {
@@ -2567,7 +2597,7 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
prefix_hash_ = base::hash_combine(prefix_hash_,
static_cast<uint32_t>(code_section_length));
- if (!wasm_engine_->GetStreamingCompilationOwnership(prefix_hash_)) {
+ if (!GetWasmEngine()->GetStreamingCompilationOwnership(prefix_hash_)) {
// Known prefix, wait until the end of the stream and check the cache.
prefix_cache_hit_ = true;
return true;
@@ -2589,29 +2619,18 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
- const bool lazy_module = job_->wasm_lazy_compilation_;
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
- compilation_unit_builder_.reset(
- new CompilationUnitBuilder(job_->native_module_.get()));
-
- NativeModule* native_module = job_->native_module_.get();
-
- int num_import_wrappers =
- AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
- int num_export_wrappers = AddExportWrapperUnits(
- job_->isolate_, wasm_engine_, native_module,
- compilation_unit_builder_.get(), job_->enabled_features_);
- compilation_state->InitializeCompilationProgress(
- lazy_module, num_import_wrappers, num_export_wrappers);
+ compilation_unit_builder_ =
+ InitializeCompilation(job_->isolate(), job_->native_module_.get());
return true;
}
// Process a function body.
-bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
- uint32_t offset) {
+bool AsyncStreamingProcessor::ProcessFunctionBody(
+ base::Vector<const uint8_t> bytes, uint32_t offset) {
TRACE_STREAMING("Process function body %d ...\n", num_functions_);
decoder_.DecodeFunctionBody(
@@ -2648,17 +2667,9 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
return true;
}
- NativeModule* native_module = job_->native_module_.get();
- if (strategy == CompileStrategy::kLazy) {
- native_module->UseLazyStub(func_index);
- } else if (strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
- compilation_unit_builder_->AddTopTierUnit(func_index);
- native_module->UseLazyStub(func_index);
- } else {
- DCHECK_EQ(strategy, CompileStrategy::kEager);
- compilation_unit_builder_->AddUnits(func_index);
- }
-
+ auto* compilation_state = Impl(job_->native_module_->compilation_state());
+ compilation_state->AddCompilationUnit(compilation_unit_builder_.get(),
+ func_index);
++num_functions_;
return true;
@@ -2675,7 +2686,8 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
}
// Finish the processing of the stream.
-void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
+void AsyncStreamingProcessor::OnFinishedStream(
+ base::OwnedVector<uint8_t> bytes) {
TRACE_STREAMING("Finish stream...\n");
DCHECK_EQ(NativeModuleCache::PrefixHash(bytes.as_vector()), prefix_hash_);
ModuleResult result = decoder_.FinishDecoding(false);
@@ -2737,7 +2749,7 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
if (needs_finish) {
const bool failed = job_->native_module_->compilation_state()->failed();
if (!cache_hit) {
- cache_hit = !job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
+ cache_hit = !GetWasmEngine()->UpdateNativeModuleCache(
failed, &job_->native_module_, job_->isolate_);
}
if (failed) {
@@ -2759,9 +2771,30 @@ void AsyncStreamingProcessor::OnAbort() {
job_->Abort();
}
-bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
- Vector<const uint8_t> wire_bytes) {
+namespace {
+class DeserializationTimeScope {
+ public:
+ explicit DeserializationTimeScope(TimedHistogram* counter)
+ : counter_(counter), start_(base::TimeTicks::Now()) {}
+
+ ~DeserializationTimeScope() {
+ base::TimeDelta duration = base::TimeTicks::Now() - start_;
+ int duration_usecs = static_cast<int>(duration.InMilliseconds());
+ counter_->AddSample(duration_usecs);
+ }
+
+ private:
+ TimedHistogram* counter_;
+ base::TimeTicks start_;
+};
+} // namespace
+
+bool AsyncStreamingProcessor::Deserialize(
+ base::Vector<const uint8_t> module_bytes,
+ base::Vector<const uint8_t> wire_bytes) {
TRACE_EVENT0("v8.wasm", "wasm.Deserialize");
+ DeserializationTimeScope time_scope(
+ job_->isolate()->counters()->wasm_deserialization_time());
// DeserializeNativeModule and FinishCompile assume that they are executed in
// a HandleScope, and that a context is set on the isolate.
HandleScope scope(job_->isolate_);
@@ -2785,19 +2818,14 @@ CompilationStateImpl::CompilationStateImpl(
std::shared_ptr<Counters> async_counters)
: native_module_(native_module.get()),
native_module_weak_(std::move(native_module)),
- compile_mode_(FLAG_wasm_tier_up &&
- native_module->module()->origin == kWasmOrigin
- ? CompileMode::kTiering
- : CompileMode::kRegular),
async_counters_(std::move(async_counters)),
compilation_unit_queues_(native_module->num_functions()) {}
-void CompilationStateImpl::InitCompileJob(WasmEngine* engine) {
+void CompilationStateImpl::InitCompileJob() {
DCHECK_NULL(compile_job_);
compile_job_ = V8::GetCurrentPlatform()->PostJob(
- TaskPriority::kUserVisible,
- std::make_unique<BackgroundCompileJob>(native_module_weak_, engine,
- async_counters_));
+ TaskPriority::kUserVisible, std::make_unique<BackgroundCompileJob>(
+ native_module_weak_, async_counters_));
}
void CompilationStateImpl::CancelCompilation(
@@ -2849,8 +2877,8 @@ void CompilationStateImpl::InitializeCompilationProgress(
outstanding_top_tier_functions_++;
continue;
}
- ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
- module, compile_mode(), enabled_features, func_index);
+ ExecutionTierPair requested_tiers =
+ GetRequestedExecutionTiers(module, enabled_features, func_index);
CompileStrategy strategy =
GetCompileStrategy(module, enabled_features, func_index, lazy_module);
@@ -2889,6 +2917,68 @@ void CompilationStateImpl::InitializeCompilationProgress(
TriggerCallbacks();
}
+void CompilationStateImpl::AddCompilationUnitInternal(
+ CompilationUnitBuilder* builder, int function_index,
+ uint8_t function_progress) {
+ ExecutionTier required_baseline_tier =
+ CompilationStateImpl::RequiredBaselineTierField::decode(
+ function_progress);
+ ExecutionTier required_top_tier =
+ CompilationStateImpl::RequiredTopTierField::decode(function_progress);
+ ExecutionTier reached_tier =
+ CompilationStateImpl::ReachedTierField::decode(function_progress);
+
+ if (reached_tier < required_baseline_tier) {
+ builder->AddBaselineUnit(function_index, required_baseline_tier);
+ }
+ if (reached_tier < required_top_tier &&
+ required_baseline_tier != required_top_tier) {
+ builder->AddTopTierUnit(function_index, required_top_tier);
+ }
+}
+
+void CompilationStateImpl::InitializeCompilationUnits(
+ std::unique_ptr<CompilationUnitBuilder> builder) {
+ int offset = native_module_->module()->num_imported_functions;
+ if (native_module_->IsTieredDown()) {
+ for (size_t i = 0; i < compilation_progress_.size(); ++i) {
+ int func_index = offset + static_cast<int>(i);
+ builder->AddDebugUnit(func_index);
+ }
+ } else {
+ base::MutexGuard guard(&callbacks_mutex_);
+
+ for (size_t i = 0; i < compilation_progress_.size(); ++i) {
+ uint8_t function_progress = compilation_progress_[i];
+ int func_index = offset + static_cast<int>(i);
+ AddCompilationUnitInternal(builder.get(), func_index, function_progress);
+ }
+ }
+ builder->Commit();
+}
+
+void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
+ int func_index) {
+ if (native_module_->IsTieredDown()) {
+ builder->AddDebugUnit(func_index);
+ return;
+ }
+ int offset = native_module_->module()->num_imported_functions;
+ int progress_index = func_index - offset;
+ uint8_t function_progress;
+ {
+ // TODO(ahaas): This lock may cause overhead. If so, we could get rid of the
+ // lock as follows:
+ // 1) Make compilation_progress_ an array of atomic<uint8_t>, and access it
+ // lock-free.
+ // 2) Have a copy of compilation_progress_ that we use for initialization.
+ // 3) Just re-calculate the content of compilation_progress_.
+ base::MutexGuard guard(&callbacks_mutex_);
+ function_progress = compilation_progress_[progress_index];
+ }
+ AddCompilationUnitInternal(builder, func_index, function_progress);
+}
+
void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization() {
auto* module = native_module_->module();
base::MutexGuard guard(&callbacks_mutex_);
@@ -3004,10 +3094,10 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
}
}
-void CompilationStateImpl::AddCompilationUnits(
- Vector<WasmCompilationUnit> baseline_units,
- Vector<WasmCompilationUnit> top_tier_units,
- Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
+void CompilationStateImpl::CommitCompilationUnits(
+ base::Vector<WasmCompilationUnit> baseline_units,
+ base::Vector<WasmCompilationUnit> top_tier_units,
+ base::Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units) {
if (!js_to_wasm_wrapper_units.empty()) {
// |js_to_wasm_wrapper_units_| will only be initialized once.
@@ -3027,8 +3117,9 @@ void CompilationStateImpl::AddCompilationUnits(
compile_job_->NotifyConcurrencyIncrease();
}
-void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
- AddCompilationUnits({}, {&unit, 1}, {});
+void CompilationStateImpl::CommitTopTierCompilationUnit(
+ WasmCompilationUnit unit) {
+ CommitCompilationUnits({}, {&unit, 1}, {});
}
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
@@ -3070,7 +3161,7 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
Handle<Code> code = unit->Finalize();
int wrapper_index =
GetExportWrapperIndex(module, unit->sig(), unit->is_import());
- (*export_wrappers_out)->set(wrapper_index, *code);
+ (*export_wrappers_out)->set(wrapper_index, ToCodeT(*code));
RecordStats(*code, isolate->counters());
}
}
@@ -3086,7 +3177,8 @@ CompilationStateImpl::GetNextCompilationUnit(
return compilation_unit_queues_.GetNextUnit(queue, baseline_only);
}
-void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
+void CompilationStateImpl::OnFinishedUnits(
+ base::Vector<WasmCode*> code_vector) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.OnFinishedUnits", "units", code_vector.size());
@@ -3283,19 +3375,20 @@ void CompilationStateImpl::PublishCompilationResults(
code->IncRef();
}
}
- PublishCode(VectorOf(unpublished_code));
+ PublishCode(base::VectorOf(unpublished_code));
}
-void CompilationStateImpl::PublishCode(Vector<std::unique_ptr<WasmCode>> code) {
+void CompilationStateImpl::PublishCode(
+ base::Vector<std::unique_ptr<WasmCode>> code) {
WasmCodeRefScope code_ref_scope;
std::vector<WasmCode*> published_code =
native_module_->PublishCode(std::move(code));
// Defer logging code in case wire bytes were not fully received yet.
if (native_module_->HasWireBytes()) {
- native_module_->engine()->LogCode(VectorOf(published_code));
+ GetWasmEngine()->LogCode(base::VectorOf(published_code));
}
- OnFinishedUnits(VectorOf(std::move(published_code)));
+ OnFinishedUnits(base::VectorOf(std::move(published_code)));
}
void CompilationStateImpl::SchedulePublishCompilationResults(
@@ -3450,8 +3543,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
JSToWasmWrapperKey key(function.imported, *function.sig);
if (queue.insert(key)) {
auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>(
- isolate, isolate->wasm_engine(), function.sig, module,
- function.imported, enabled_features,
+ isolate, function.sig, module, function.imported, enabled_features,
JSToWasmWrapperCompilationUnit::kAllowGeneric);
compilation_units.emplace(key, std::move(unit));
}
@@ -3480,13 +3572,13 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
DCHECK_EQ(isolate, unit->isolate());
Handle<Code> code = unit->Finalize();
int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
- (*export_wrappers_out)->set(wrapper_index, *code);
+ (*export_wrappers_out)->set(wrapper_index, ToCodeT(*code));
RecordStats(*code, isolate->counters());
}
}
WasmCode* CompileImportWrapper(
- WasmEngine* wasm_engine, NativeModule* native_module, Counters* counters,
+ NativeModule* native_module, Counters* counters,
compiler::WasmImportCallKind kind, const FunctionSig* sig,
int expected_arity,
WasmImportWrapperCache::ModificationScope* cache_scope) {
@@ -3500,7 +3592,7 @@ WasmCode* CompileImportWrapper(
WasmCodeRefScope code_ref_scope;
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- wasm_engine, &env, kind, sig, source_positions, expected_arity);
+ &env, kind, sig, source_positions, expected_arity);
std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index c45ca2a03e..e8bd2597bc 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -23,6 +23,12 @@
#include "src/wasm/wasm-module.h"
namespace v8 {
+
+namespace base {
+template <typename T>
+class Vector;
+} // namespace base
+
namespace internal {
class JSArrayBuffer;
@@ -31,9 +37,6 @@ class Counters;
class WasmModuleObject;
class WasmInstanceObject;
-template <typename T>
-class Vector;
-
namespace wasm {
struct CompilationEnv;
@@ -61,7 +64,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
// compiled yet.
V8_EXPORT_PRIVATE
WasmCode* CompileImportWrapper(
- WasmEngine* wasm_engine, NativeModule* native_module, Counters* counters,
+ NativeModule* native_module, Counters* counters,
compiler::WasmImportCallKind kind, const FunctionSig* sig,
int expected_arity, WasmImportWrapperCache::ModificationScope* cache_scope);
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index be4d8ef833..5e0c76025b 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -15,10 +15,12 @@
#include "src/utils/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/init-expr-interface.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -142,18 +144,20 @@ SectionCode IdentifyUnknownSectionInternal(Decoder* decoder) {
static_cast<int>(section_name_start - decoder->start()),
string.length() < 20 ? string.length() : 20, section_name_start);
- using SpecialSectionPair = std::pair<Vector<const char>, SectionCode>;
+ using SpecialSectionPair = std::pair<base::Vector<const char>, SectionCode>;
static constexpr SpecialSectionPair kSpecialSections[]{
- {StaticCharVector(kNameString), kNameSectionCode},
- {StaticCharVector(kSourceMappingURLString), kSourceMappingURLSectionCode},
- {StaticCharVector(kCompilationHintsString), kCompilationHintsSectionCode},
- {StaticCharVector(kBranchHintsString), kBranchHintsSectionCode},
- {StaticCharVector(kDebugInfoString), kDebugInfoSectionCode},
- {StaticCharVector(kExternalDebugInfoString),
+ {base::StaticCharVector(kNameString), kNameSectionCode},
+ {base::StaticCharVector(kSourceMappingURLString),
+ kSourceMappingURLSectionCode},
+ {base::StaticCharVector(kCompilationHintsString),
+ kCompilationHintsSectionCode},
+ {base::StaticCharVector(kBranchHintsString), kBranchHintsSectionCode},
+ {base::StaticCharVector(kDebugInfoString), kDebugInfoSectionCode},
+ {base::StaticCharVector(kExternalDebugInfoString),
kExternalDebugInfoSectionCode}};
- auto name_vec =
- Vector<const char>::cast(VectorOf(section_name_start, string.length()));
+ auto name_vec = base::Vector<const char>::cast(
+ base::VectorOf(section_name_start, string.length()));
for (auto& special_section : kSpecialSections) {
if (name_vec == special_section.first) return special_section.second;
}
@@ -174,27 +178,27 @@ class WasmSectionIterator {
next();
}
- inline bool more() const { return decoder_->ok() && decoder_->more(); }
+ bool more() const { return decoder_->ok() && decoder_->more(); }
- inline SectionCode section_code() const { return section_code_; }
+ SectionCode section_code() const { return section_code_; }
- inline const byte* section_start() const { return section_start_; }
+ const byte* section_start() const { return section_start_; }
- inline uint32_t section_length() const {
+ uint32_t section_length() const {
return static_cast<uint32_t>(section_end_ - section_start_);
}
- inline Vector<const uint8_t> payload() const {
+ base::Vector<const uint8_t> payload() const {
return {payload_start_, payload_length()};
}
- inline const byte* payload_start() const { return payload_start_; }
+ const byte* payload_start() const { return payload_start_; }
- inline uint32_t payload_length() const {
+ uint32_t payload_length() const {
return static_cast<uint32_t>(section_end_ - payload_start_);
}
- inline const byte* section_end() const { return section_end_; }
+ const byte* section_end() const { return section_end_; }
// Advances to the next section, checking that decoding the current section
// stopped at {section_end_}.
@@ -298,7 +302,7 @@ class ModuleDecoderImpl : public Decoder {
pc_ = end_; // On error, terminate section decoding loop.
}
- void DumpModule(const Vector<const byte> module_bytes) {
+ void DumpModule(const base::Vector<const byte> module_bytes) {
std::string path;
if (FLAG_dump_wasm_module_path) {
path = FLAG_dump_wasm_module_path;
@@ -309,7 +313,7 @@ class ModuleDecoderImpl : public Decoder {
}
// File are named `HASH.{ok,failed}.wasm`.
size_t hash = base::hash_range(module_bytes.begin(), module_bytes.end());
- EmbeddedVector<char, 32> buf;
+ base::EmbeddedVector<char, 32> buf;
SNPrintF(buf, "%016zx.%s.wasm", hash, ok() ? "ok" : "failed");
path += buf.begin();
size_t rv = 0;
@@ -334,7 +338,7 @@ class ModuleDecoderImpl : public Decoder {
module_->origin = origin_;
}
- void DecodeModuleHeader(Vector<const uint8_t> bytes, uint8_t offset) {
+ void DecodeModuleHeader(base::Vector<const uint8_t> bytes, uint8_t offset) {
if (failed()) return;
Reset(bytes, offset);
@@ -385,8 +389,9 @@ class ModuleDecoderImpl : public Decoder {
return true;
}
- void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
- uint32_t offset, bool verify_functions = true) {
+ void DecodeSection(SectionCode section_code,
+ base::Vector<const uint8_t> bytes, uint32_t offset,
+ bool verify_functions = true) {
if (failed()) return;
Reset(bytes, offset);
TRACE("Section: %s\n", SectionName(section_code));
@@ -657,17 +662,16 @@ class ModuleDecoderImpl : public Decoder {
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
&module_->is_memory64);
- consume_resizable_limits("memory", "pages", max_mem_pages(),
- &module_->initial_pages,
- &module_->has_maximum_pages, max_mem_pages(),
- &module_->maximum_pages, flags);
+ consume_resizable_limits(
+ "memory", "pages", kSpecMaxMemoryPages, &module_->initial_pages,
+ &module_->has_maximum_pages, kSpecMaxMemoryPages,
+ &module_->maximum_pages, flags);
break;
}
case kExternalGlobal: {
// ===== Imported global =============================================
import->index = static_cast<uint32_t>(module_->globals.size());
- module_->globals.push_back(
- {kWasmVoid, false, WasmInitExpr(), {0}, true, false});
+ module_->globals.push_back({kWasmVoid, false, {}, {0}, true, false});
WasmGlobal* global = &module_->globals.back();
global->type = consume_value_type();
global->mutability = consume_mutability();
@@ -749,7 +753,7 @@ class ModuleDecoderImpl : public Decoder {
&table->initial_size, &table->has_maximum_size,
std::numeric_limits<uint32_t>::max(), &table->maximum_size, flags);
if (!table_type.is_defaultable()) {
- table->initial_value = consume_init_expr(module_.get(), table_type, 0);
+ table->initial_value = consume_init_expr(module_.get(), table_type);
}
}
}
@@ -761,9 +765,9 @@ class ModuleDecoderImpl : public Decoder {
if (!AddMemory(module_.get())) break;
uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
&module_->is_memory64);
- consume_resizable_limits("memory", "pages", max_mem_pages(),
+ consume_resizable_limits("memory", "pages", kSpecMaxMemoryPages,
&module_->initial_pages,
- &module_->has_maximum_pages, max_mem_pages(),
+ &module_->has_maximum_pages, kSpecMaxMemoryPages,
&module_->maximum_pages, flags);
}
}
@@ -774,14 +778,11 @@ class ModuleDecoderImpl : public Decoder {
module_->globals.reserve(imported_globals + globals_count);
for (uint32_t i = 0; ok() && i < globals_count; ++i) {
TRACE("DecodeGlobal[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
- // Add an uninitialized global and pass a pointer to it.
- module_->globals.push_back(
- {kWasmVoid, false, WasmInitExpr(), {0}, false, false});
- WasmGlobal* global = &module_->globals.back();
- global->type = consume_value_type();
- global->mutability = consume_mutability();
- global->init =
- consume_init_expr(module_.get(), global->type, imported_globals + i);
+ ValueType type = consume_value_type();
+ bool mutability = consume_mutability();
+ if (failed()) break;
+ WireBytesRef init = consume_init_expr(module_.get(), type);
+ module_->globals.push_back({type, mutability, init, {0}, false, false});
}
if (ok()) CalculateGlobalOffsets(module_.get());
}
@@ -915,12 +916,20 @@ class ModuleDecoderImpl : public Decoder {
consume_count("number of elements", max_table_init_entries());
for (uint32_t j = 0; j < num_elem; j++) {
- WasmInitExpr init =
+ WasmElemSegment::Entry init =
expressions_as_elements
? consume_element_expr()
- : WasmInitExpr::RefFuncConst(consume_element_func_index());
+ : WasmElemSegment::Entry(WasmElemSegment::Entry::kRefFuncEntry,
+ consume_element_func_index());
if (failed()) return;
- segment.entries.push_back(std::move(init));
+ if (!IsSubtypeOf(TypeOf(init), segment.type, module_.get())) {
+ errorf(pc_,
+ "Invalid type in the init expression. The expected type is "
+ "'%s', but the actual type is '%s'.",
+ segment.type.name().c_str(), TypeOf(init).name().c_str());
+ return;
+ }
+ segment.entries.push_back(init);
}
module_->elem_segments.push_back(std::move(segment));
}
@@ -1001,7 +1010,7 @@ class ModuleDecoderImpl : public Decoder {
bool is_active;
uint32_t memory_index;
- WasmInitExpr dest_addr;
+ WireBytesRef dest_addr;
consume_data_segment_header(&is_active, &memory_index, &dest_addr);
if (failed()) break;
@@ -1312,8 +1321,8 @@ class ModuleDecoderImpl : public Decoder {
bool verify_functions = true) {
StartDecoding(counters, allocator);
uint32_t offset = 0;
- Vector<const byte> orig_bytes(start(), end() - start());
- DecodeModuleHeader(VectorOf(start(), end() - start()), offset);
+ base::Vector<const byte> orig_bytes(start(), end() - start());
+ DecodeModuleHeader(base::VectorOf(start(), end() - start()), offset);
if (failed()) {
return FinishDecoding(verify_functions);
}
@@ -1375,8 +1384,8 @@ class ModuleDecoderImpl : public Decoder {
return ok() ? result : nullptr;
}
- WasmInitExpr DecodeInitExprForTesting() {
- return consume_init_expr(nullptr, kWasmVoid, 0);
+ WireBytesRef DecodeInitExprForTesting(ValueType expected) {
+ return consume_init_expr(module_.get(), expected);
}
const std::shared_ptr<WasmModule>& shared_module() const { return module_; }
@@ -1412,46 +1421,18 @@ class ModuleDecoderImpl : public Decoder {
"not enough bits");
WasmError intermediate_error_;
ModuleOrigin origin_;
+ AccountingAllocator allocator_;
+ Zone init_expr_zone_{&allocator_, "initializer expression zone"};
- ValueType TypeOf(const WasmInitExpr& expr) {
- switch (expr.kind()) {
- case WasmInitExpr::kNone:
- return kWasmVoid;
- case WasmInitExpr::kGlobalGet:
- return expr.immediate().index < module_->globals.size()
- ? module_->globals[expr.immediate().index].type
- : kWasmVoid;
- case WasmInitExpr::kI32Const:
- return kWasmI32;
- case WasmInitExpr::kI64Const:
- return kWasmI64;
- case WasmInitExpr::kF32Const:
- return kWasmF32;
- case WasmInitExpr::kF64Const:
- return kWasmF64;
- case WasmInitExpr::kS128Const:
- return kWasmS128;
- case WasmInitExpr::kRefFuncConst: {
- uint32_t heap_type =
- enabled_features_.has_typed_funcref()
- ? module_->functions[expr.immediate().index].sig_index
- : HeapType::kFunc;
- return ValueType::Ref(heap_type, kNonNullable);
- }
- case WasmInitExpr::kRefNullConst:
- return ValueType::Ref(expr.immediate().heap_type, kNullable);
- case WasmInitExpr::kRttCanon: {
- return ValueType::Rtt(expr.immediate().heap_type, 0);
- }
- case WasmInitExpr::kRttSub: {
- ValueType operand_type = TypeOf(*expr.operand());
- if (operand_type.is_rtt()) {
- return ValueType::Rtt(expr.immediate().heap_type,
- operand_type.depth() + 1);
- } else {
- return kWasmVoid;
- }
- }
+ ValueType TypeOf(WasmElemSegment::Entry entry) {
+ switch (entry.kind) {
+ case WasmElemSegment::Entry::kGlobalGetEntry:
+ return module_->globals[entry.index].type;
+ case WasmElemSegment::Entry::kRefFuncEntry:
+ return ValueType::Ref(module_->functions[entry.index].sig_index,
+ kNonNullable);
+ case WasmElemSegment::Entry::kRefNullEntry:
+ return ValueType::Ref(entry.index, kNullable);
}
}
@@ -1717,202 +1698,32 @@ class ModuleDecoderImpl : public Decoder {
return true;
}
- WasmInitExpr consume_init_expr(WasmModule* module, ValueType expected,
- size_t current_global_index) {
- constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
- WasmOpcode opcode = kExprNop;
- std::vector<WasmInitExpr> stack;
- while (pc() < end() && opcode != kExprEnd) {
- uint32_t len = 1;
- opcode = static_cast<WasmOpcode>(read_u8<validate>(pc(), "opcode"));
- switch (opcode) {
- case kExprGlobalGet: {
- GlobalIndexImmediate<validate> imm(this, pc() + 1);
- len = 1 + imm.length;
- // We use 'capacity' over 'size' because we might be
- // mid-DecodeGlobalSection().
- if (V8_UNLIKELY(imm.index >= module->globals.capacity())) {
- error(pc() + 1, "global index is out of bounds");
- return {};
- }
- if (V8_UNLIKELY(imm.index >= current_global_index)) {
- errorf(pc() + 1, "global #%u is not defined yet", imm.index);
- return {};
- }
- WasmGlobal* global = &module->globals[imm.index];
- if (V8_UNLIKELY(global->mutability)) {
- error(pc() + 1,
- "mutable globals cannot be used in initializer "
- "expressions");
- return {};
- }
- if (V8_UNLIKELY(!global->imported && !enabled_features_.has_gc())) {
- error(pc() + 1,
- "non-imported globals cannot be used in initializer "
- "expressions");
- return {};
- }
- stack.push_back(WasmInitExpr::GlobalGet(imm.index));
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
- stack.emplace_back(imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
- stack.emplace_back(imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
- stack.emplace_back(imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
- stack.emplace_back(imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- if (V8_UNLIKELY(!enabled_features_.has_reftypes() &&
- !enabled_features_.has_eh())) {
- errorf(pc(),
- "invalid opcode 0x%x in initializer expression, enable with "
- "--experimental-wasm-reftypes or --experimental-wasm-eh",
- kExprRefNull);
- return {};
- }
- HeapTypeImmediate<Decoder::kFullValidation> imm(
- enabled_features_, this, pc() + 1, module_.get());
- if (V8_UNLIKELY(failed())) return {};
- len = 1 + imm.length;
- stack.push_back(
- WasmInitExpr::RefNullConst(imm.type.representation()));
- break;
- }
- case kExprRefFunc: {
- if (V8_UNLIKELY(!enabled_features_.has_reftypes())) {
- errorf(pc(),
- "invalid opcode 0x%x in initializer expression, enable with "
- "--experimental-wasm-reftypes",
- kExprRefFunc);
- return {};
- }
+ WireBytesRef consume_init_expr(WasmModule* module, ValueType expected) {
+ FunctionBody body(FunctionSig::Build(&init_expr_zone_, {expected}, {}),
+ buffer_offset_, pc_, end_);
+ WasmFeatures detected;
+ WasmFullDecoder<Decoder::kFullValidation, InitExprInterface,
+ kInitExpression>
+ decoder(&init_expr_zone_, module, enabled_features_, &detected, body,
+ module);
- FunctionIndexImmediate<Decoder::kFullValidation> imm(this, pc() + 1);
- len = 1 + imm.length;
- if (V8_UNLIKELY(module->functions.size() <= imm.index)) {
- errorf(pc(), "invalid function index: %u", imm.index);
- return {};
- }
- stack.push_back(WasmInitExpr::RefFuncConst(imm.index));
- // Functions referenced in the globals section count as "declared".
- module->functions[imm.index].declared = true;
- break;
- }
- case kSimdPrefix: {
- // No need to check for Simd in enabled_features_ here; we either
- // failed to validate the global's type earlier, or will fail in
- // the type check or stack height check at the end.
- opcode = read_prefixed_opcode<validate>(pc(), &len);
- if (V8_UNLIKELY(opcode != kExprS128Const)) {
- errorf(pc(), "invalid SIMD opcode 0x%x in initializer expression",
- opcode);
- return {};
- }
+ uint32_t offset = this->pc_offset();
- Simd128Immediate<validate> imm(this, pc() + len);
- len += kSimd128Size;
- stack.emplace_back(imm.value);
- break;
- }
- case kGCPrefix: {
- // No need to check for GC in enabled_features_ here; we either
- // failed to validate the global's type earlier, or will fail in
- // the type check or stack height check at the end.
- opcode = read_prefixed_opcode<validate>(pc(), &len);
- switch (opcode) {
- case kExprRttCanon: {
- TypeIndexImmediate<validate> imm(this, pc() + 2);
- if (V8_UNLIKELY(imm.index >= module_->types.capacity())) {
- errorf(pc() + 2, "type index %u is out of bounds", imm.index);
- return {};
- }
- len += imm.length;
- stack.push_back(WasmInitExpr::RttCanon(imm.index));
- break;
- }
- case kExprRttSub: {
- TypeIndexImmediate<validate> imm(this, pc() + 2);
- if (V8_UNLIKELY(imm.index >= module_->types.capacity())) {
- errorf(pc() + 2, "type index %u is out of bounds", imm.index);
- return {};
- }
- len += imm.length;
- if (stack.empty()) {
- error(pc(), "calling rtt.sub without arguments");
- return {};
- }
- WasmInitExpr parent = std::move(stack.back());
- stack.pop_back();
- ValueType parent_type = TypeOf(parent);
- if (V8_UNLIKELY(!parent_type.is_rtt() ||
- !IsHeapSubtypeOf(imm.index,
- parent_type.ref_index(),
- module_.get()))) {
- error(pc(), "rtt.sub requires a supertype rtt on stack");
- return {};
- }
- stack.push_back(
- WasmInitExpr::RttSub(imm.index, std::move(parent)));
- break;
- }
- default: {
- errorf(pc(), "invalid opcode 0x%x in initializer expression",
- opcode);
- return {};
- }
- }
- break; // case kGCPrefix
- }
- case kExprEnd:
- break;
- default: {
- errorf(pc(), "invalid opcode 0x%x in initializer expression", opcode);
- return {};
- }
- }
- pc_ += len;
- }
+ decoder.DecodeFunctionBody();
- if (V8_UNLIKELY(pc() > end())) {
- error(end(), "Initializer expression extending beyond code end");
- return {};
- }
- if (V8_UNLIKELY(opcode != kExprEnd)) {
- error(pc(), "Initializer expression is missing 'end'");
+ this->pc_ = decoder.end();
+
+ if (decoder.failed()) {
+ error(decoder.error().offset(), decoder.error().message().c_str());
return {};
}
- if (V8_UNLIKELY(stack.size() != 1)) {
- errorf(pc(),
- "Found 'end' in initializer expression, but %s expressions were "
- "found on the stack",
- stack.size() > 1 ? "more than one" : "no");
+
+ if (!decoder.interface().end_found()) {
+ error("Initializer expression is missing 'end'");
return {};
}
- WasmInitExpr expr = std::move(stack.back());
- if (expected != kWasmVoid && !IsSubtypeOf(TypeOf(expr), expected, module)) {
- errorf(pc(), "type error in init expression, expected %s, got %s",
- expected.name().c_str(), TypeOf(expr).name().c_str());
- }
- return expr;
+ return {offset, static_cast<uint32_t>(decoder.end() - decoder.start())};
}
// Read a mutability flag
@@ -2019,7 +1830,7 @@ class ModuleDecoderImpl : public Decoder {
ValueType field = consume_storage_type();
if (failed()) return nullptr;
bool mutability = consume_mutability();
- if (!mutability) {
+ if (!V8_LIKELY(mutability)) {
error(this->pc() - 1, "immutable arrays are not supported yet");
}
return zone->New<ArrayType>(field, mutability);
@@ -2086,10 +1897,9 @@ class ModuleDecoderImpl : public Decoder {
ValueType table_type =
is_active ? module_->tables[table_index].type : kWasmBottom;
- WasmInitExpr offset;
+ WireBytesRef offset;
if (is_active) {
- offset = consume_init_expr(module_.get(), kWasmI32,
- module_.get()->globals.size());
+ offset = consume_init_expr(module_.get(), kWasmI32);
// Failed to parse offset initializer, return early.
if (failed()) return {};
}
@@ -2148,7 +1958,7 @@ class ModuleDecoderImpl : public Decoder {
}
void consume_data_segment_header(bool* is_active, uint32_t* index,
- WasmInitExpr* offset) {
+ WireBytesRef* offset) {
const byte* pos = pc();
uint32_t flag = consume_u32v("flag");
@@ -2161,12 +1971,11 @@ class ModuleDecoderImpl : public Decoder {
}
// We know now that the flag is valid. Time to read the rest.
- size_t num_globals = module_->globals.size();
ValueType expected_type = module_->is_memory64 ? kWasmI64 : kWasmI32;
if (flag == SegmentFlags::kActiveNoIndex) {
*is_active = true;
*index = 0;
- *offset = consume_init_expr(module_.get(), expected_type, num_globals);
+ *offset = consume_init_expr(module_.get(), expected_type);
return;
}
if (flag == SegmentFlags::kPassive) {
@@ -2176,7 +1985,7 @@ class ModuleDecoderImpl : public Decoder {
if (flag == SegmentFlags::kActiveWithIndex) {
*is_active = true;
*index = consume_u32v("memory index");
- *offset = consume_init_expr(module_.get(), expected_type, num_globals);
+ *offset = consume_init_expr(module_.get(), expected_type);
}
}
@@ -2191,9 +2000,10 @@ class ModuleDecoderImpl : public Decoder {
return index;
}
- // TODO(manoskouk): When reftypes lands, remove this and use
- // consume_init_expr() instead.
- WasmInitExpr consume_element_expr() {
+ // TODO(manoskouk): When reftypes lands, consider if we can implement this
+ // with consume_init_expr(). It will require changes in module-instantiate.cc,
+ // in {LoadElemSegmentImpl}.
+ WasmElemSegment::Entry consume_element_expr() {
uint8_t opcode = consume_u8("element opcode");
if (failed()) return {};
switch (opcode) {
@@ -2202,13 +2012,14 @@ class ModuleDecoderImpl : public Decoder {
this->pc(), module_.get());
consume_bytes(imm.length, "ref.null immediate");
expect_u8("end opcode", kExprEnd);
- return WasmInitExpr::RefNullConst(imm.type.representation());
+ return {WasmElemSegment::Entry::kRefNullEntry,
+ static_cast<uint32_t>(imm.type.representation())};
}
case kExprRefFunc: {
uint32_t index = consume_element_func_index();
if (failed()) return {};
expect_u8("end opcode", kExprEnd);
- return WasmInitExpr::RefFuncConst(index);
+ return {WasmElemSegment::Entry::kRefFuncEntry, index};
}
case kExprGlobalGet: {
if (!enabled_features_.has_reftypes()) {
@@ -2225,7 +2036,7 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
expect_u8("end opcode", kExprEnd);
- return WasmInitExpr::GlobalGet(index);
+ return {WasmElemSegment::Entry::kGlobalGetEntry, index};
}
default:
error("invalid opcode in element");
@@ -2257,12 +2068,19 @@ ModuleResult DecodeWasmModule(
v8::metrics::WasmModuleDecoded metrics_event;
base::ElapsedTimer timer;
timer.Start();
+ base::ThreadTicks thread_ticks = base::ThreadTicks::IsSupported()
+ ? base::ThreadTicks::Now()
+ : base::ThreadTicks();
ModuleResult result =
decoder.DecodeModule(counters, allocator, verify_functions);
// Record event metrics.
metrics_event.wall_clock_duration_in_us = timer.Elapsed().InMicroseconds();
timer.Stop();
+ if (!thread_ticks.IsNull()) {
+ metrics_event.cpu_duration_in_us =
+ (base::ThreadTicks::Now() - thread_ticks).InMicroseconds();
+ }
metrics_event.success = decoder.ok() && result.ok();
metrics_event.async = decoding_method == DecodingMethod::kAsync ||
decoding_method == DecodingMethod::kAsyncStream;
@@ -2297,14 +2115,14 @@ void ModuleDecoder::StartDecoding(
impl_->StartDecoding(counters, allocator);
}
-void ModuleDecoder::DecodeModuleHeader(Vector<const uint8_t> bytes,
+void ModuleDecoder::DecodeModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) {
impl_->DecodeModuleHeader(bytes, offset);
}
void ModuleDecoder::DecodeSection(SectionCode section_code,
- Vector<const uint8_t> bytes, uint32_t offset,
- bool verify_functions) {
+ base::Vector<const uint8_t> bytes,
+ uint32_t offset, bool verify_functions) {
impl_->DecodeSection(section_code, bytes, offset, verify_functions);
}
@@ -2329,7 +2147,7 @@ void ModuleDecoder::set_code_section(uint32_t offset, uint32_t size) {
}
size_t ModuleDecoder::IdentifyUnknownSection(ModuleDecoder* decoder,
- Vector<const uint8_t> bytes,
+ base::Vector<const uint8_t> bytes,
uint32_t offset,
SectionCode* result) {
if (!decoder->ok()) return 0;
@@ -2347,11 +2165,13 @@ const FunctionSig* DecodeWasmSignatureForTesting(const WasmFeatures& enabled,
return decoder.DecodeFunctionSignature(zone, start);
}
-WasmInitExpr DecodeWasmInitExprForTesting(const WasmFeatures& enabled,
- const byte* start, const byte* end) {
- AccountingAllocator allocator;
+WireBytesRef DecodeWasmInitExprForTesting(const WasmFeatures& enabled,
+ const byte* start, const byte* end,
+ ValueType expected) {
ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
- return decoder.DecodeInitExprForTesting();
+ AccountingAllocator allocator;
+ decoder.StartDecoding(nullptr, &allocator);
+ return decoder.DecodeInitExprForTesting(expected);
}
FunctionResult DecodeWasmFunctionForTesting(
@@ -2360,10 +2180,6 @@ FunctionResult DecodeWasmFunctionForTesting(
const byte* function_end, Counters* counters) {
size_t size = function_end - function_start;
CHECK_LE(function_start, function_end);
- auto size_histogram =
- SELECT_WASM_COUNTER(counters, module->origin, wasm, function_size_bytes);
- // TODO(bradnelson): Improve histogram handling of ptrdiff_t.
- size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize) {
return FunctionResult{WasmError{0,
"size > maximum function size (%zu): %zu",
@@ -2375,7 +2191,8 @@ FunctionResult DecodeWasmFunctionForTesting(
std::make_unique<WasmFunction>());
}
-AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets) {
+AsmJsOffsetsResult DecodeAsmJsOffsets(
+ base::Vector<const uint8_t> encoded_offsets) {
std::vector<AsmJsOffsetFunctionEntries> functions;
Decoder decoder(encoded_offsets);
@@ -2522,7 +2339,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
-NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
+NameMap DecodeNameMap(base::Vector<const uint8_t> module_bytes,
uint8_t name_section_kind) {
Decoder decoder(module_bytes);
if (!FindNameSection(&decoder)) return NameMap{{}};
@@ -2554,7 +2371,7 @@ NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
return NameMap{std::move(names)};
}
-IndirectNameMap DecodeIndirectNameMap(Vector<const uint8_t> module_bytes,
+IndirectNameMap DecodeIndirectNameMap(base::Vector<const uint8_t> module_bytes,
uint8_t name_section_kind) {
Decoder decoder(module_bytes);
if (!FindNameSection(&decoder)) return IndirectNameMap{{}};
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 2af2760ab4..0a64326cff 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -171,8 +171,9 @@ V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunctionForTesting(
const WasmModule* module, const byte* function_start,
const byte* function_end, Counters* counters);
-V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(
- const WasmFeatures& enabled, const byte* start, const byte* end);
+V8_EXPORT_PRIVATE WireBytesRef
+DecodeWasmInitExprForTesting(const WasmFeatures& enabled, const byte* start,
+ const byte* end, ValueType expected);
struct CustomSectionOffset {
WireBytesRef section;
@@ -185,7 +186,8 @@ V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
// Extracts the mapping from wasm byte offset to asm.js source position per
// function.
-AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets);
+AsmJsOffsetsResult DecodeAsmJsOffsets(
+ base::Vector<const uint8_t> encoded_offsets);
// Decode the function names from the name section. Returns the result as an
// unordered map. Only names with valid utf8 encoding are stored and conflicts
@@ -197,9 +199,9 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
// The result will be empty if no name section is present. On encountering an
// error in the name section, returns all information decoded up to the first
// error.
-NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
+NameMap DecodeNameMap(base::Vector<const uint8_t> module_bytes,
uint8_t name_section_kind);
-IndirectNameMap DecodeIndirectNameMap(Vector<const uint8_t> module_bytes,
+IndirectNameMap DecodeIndirectNameMap(base::Vector<const uint8_t> module_bytes,
uint8_t name_section_kind);
class ModuleDecoderImpl;
@@ -215,10 +217,11 @@ class ModuleDecoder {
AccountingAllocator* allocator,
ModuleOrigin origin = ModuleOrigin::kWasmOrigin);
- void DecodeModuleHeader(Vector<const uint8_t> bytes, uint32_t offset);
+ void DecodeModuleHeader(base::Vector<const uint8_t> bytes, uint32_t offset);
- void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
- uint32_t offset, bool verify_functions = true);
+ void DecodeSection(SectionCode section_code,
+ base::Vector<const uint8_t> bytes, uint32_t offset,
+ bool verify_functions = true);
void StartCodeSection();
@@ -243,7 +246,7 @@ class ModuleDecoder {
// the identifier string of the unknown section.
// The return value is the number of bytes that were consumed.
static size_t IdentifyUnknownSection(ModuleDecoder* decoder,
- Vector<const uint8_t> bytes,
+ base::Vector<const uint8_t> bytes,
uint32_t offset, SectionCode* result);
private:
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 7945e79849..e1409952b2 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -10,15 +10,20 @@
#include "src/logging/counters.h"
#include "src/logging/metrics.h"
#include "src/numbers/conversions-inl.h"
+#include "src/objects/descriptor-array-inl.h"
#include "src/objects/property-descriptor.h"
#include "src/tracing/trace-event.h"
#include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
+#include "src/wasm/init-expr-interface.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes-inl.h"
#include "src/wasm/wasm-subtyping.h"
#include "src/wasm/wasm-value.h"
@@ -31,45 +36,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-using base::ReadLittleEndianValue;
-using base::WriteLittleEndianValue;
-
-uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
- const WasmInitExpr& expr) {
- switch (expr.kind()) {
- case WasmInitExpr::kI32Const:
- return expr.immediate().i32_const;
- case WasmInitExpr::kGlobalGet: {
- const auto& global = instance->module()->globals[expr.immediate().index];
- DCHECK_EQ(kWasmI32, global.type);
- auto raw_addr = reinterpret_cast<Address>(
- instance->untagged_globals_buffer().backing_store()) +
- global.offset;
- return ReadLittleEndianValue<uint32_t>(raw_addr);
- }
- default:
- UNREACHABLE();
- }
-}
-
-uint64_t EvalUint64InitExpr(Handle<WasmInstanceObject> instance,
- const WasmInitExpr& expr) {
- switch (expr.kind()) {
- case WasmInitExpr::kI64Const:
- return expr.immediate().i64_const;
- case WasmInitExpr::kGlobalGet: {
- const auto& global = instance->module()->globals[expr.immediate().index];
- DCHECK_EQ(kWasmI64, global.type);
- auto raw_addr = reinterpret_cast<Address>(
- instance->untagged_globals_buffer().backing_store()) +
- global.offset;
- return ReadLittleEndianValue<uint64_t>(raw_addr);
- }
- default:
- UNREACHABLE();
- }
-}
-
namespace {
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
@@ -82,11 +48,10 @@ using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
class CompileImportWrapperJob final : public JobTask {
public:
CompileImportWrapperJob(
- WasmEngine* engine, Counters* counters, NativeModule* native_module,
+ Counters* counters, NativeModule* native_module,
ImportWrapperQueue* queue,
WasmImportWrapperCache::ModificationScope* cache_scope)
- : engine_(engine),
- counters_(counters),
+ : counters_(counters),
native_module_(native_module),
queue_(queue),
cache_scope_(cache_scope) {}
@@ -102,20 +67,74 @@ class CompileImportWrapperJob final : public JobTask {
void Run(JobDelegate* delegate) override {
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
- CompileImportWrapper(engine_, native_module_, counters_, key->kind,
- key->signature, key->expected_arity, cache_scope_);
+ CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
+ key->expected_arity, cache_scope_);
if (delegate->ShouldYield()) return;
}
}
private:
- WasmEngine* const engine_;
Counters* const counters_;
NativeModule* const native_module_;
ImportWrapperQueue* const queue_;
WasmImportWrapperCache::ModificationScope* const cache_scope_;
};
+Handle<DescriptorArray> CreateStructDescriptorArray(
+ Isolate* isolate, const wasm::StructType* type) {
+ if (type->field_count() == 0) {
+ return isolate->factory()->empty_descriptor_array();
+ }
+ uint32_t field_count = type->field_count();
+ static_assert(kV8MaxWasmStructFields <= kMaxNumberOfDescriptors,
+ "Bigger numbers of struct fields require different approach");
+ Handle<DescriptorArray> descriptors =
+ isolate->factory()->NewDescriptorArray(field_count);
+
+ // TODO(ishell): cache Wasm field type in FieldType value.
+ MaybeObject any_type = MaybeObject::FromObject(FieldType::Any());
+ DCHECK(any_type->IsSmi());
+
+ base::EmbeddedVector<char, 128> name_buffer;
+ for (uint32_t i = 0; i < field_count; i++) {
+ // TODO(ishell): consider introducing a cache of first N internalized field
+ // names similar to LookupSingleCharacterStringFromCode().
+ SNPrintF(name_buffer, "$field%d", i);
+ Handle<String> name =
+ isolate->factory()->InternalizeUtf8String(name_buffer.begin());
+
+ PropertyAttributes attributes = type->mutability(i) ? SEALED : FROZEN;
+ PropertyDetails details(
+ PropertyKind::kData, attributes, PropertyLocation::kField,
+ PropertyConstness::kMutable, // Don't track constness
+ Representation::WasmValue(), static_cast<int>(i));
+ descriptors->Set(InternalIndex(i), *name, any_type, details);
+ }
+ descriptors->Sort();
+ return descriptors;
+}
+
+Handle<DescriptorArray> CreateArrayDescriptorArray(
+ Isolate* isolate, const wasm::ArrayType* type) {
+ uint32_t kDescriptorsCount = 1;
+ Handle<DescriptorArray> descriptors =
+ isolate->factory()->NewDescriptorArray(kDescriptorsCount);
+
+ // TODO(ishell): cache Wasm field type in FieldType value.
+ MaybeObject any_type = MaybeObject::FromObject(FieldType::Any());
+ DCHECK(any_type->IsSmi());
+
+ // Add descriptor for length property.
+ PropertyDetails details(PropertyKind::kData, FROZEN, PropertyLocation::kField,
+ PropertyConstness::kConst,
+ Representation::WasmValue(), static_cast<int>(0));
+ descriptors->Set(InternalIndex(0), *isolate->factory()->length_string(),
+ any_type, details);
+
+ descriptors->Sort();
+ return descriptors;
+}
+
} // namespace
// TODO(jkummerow): Move these elsewhere.
@@ -132,9 +151,14 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
reinterpret_cast<Address>(type), opt_rtt_parent, real_instance_size);
+ Handle<DescriptorArray> descriptors =
+ CreateStructDescriptorArray(isolate, type);
Handle<Map> map = isolate->factory()->NewMap(
instance_type, map_instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
+ map->SetInstanceDescriptors(isolate, *descriptors,
+ descriptors->number_of_descriptors());
+ map->set_is_extensible(false);
return map;
}
@@ -149,9 +173,15 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
reinterpret_cast<Address>(type), opt_rtt_parent, cached_instance_size);
+ // TODO(ishell): get canonical descriptor array for WasmArrays from roots.
+ Handle<DescriptorArray> descriptors =
+ CreateArrayDescriptorArray(isolate, type);
Handle<Map> map = isolate->factory()->NewMap(
instance_type, instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
+ map->SetInstanceDescriptors(isolate, *descriptors,
+ descriptors->number_of_descriptors());
+ map->set_is_extensible(false);
return map;
}
@@ -187,7 +217,7 @@ class RttSubtypes : public ArrayList {
Handle<Map> AllocateSubRtt(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t type,
- Handle<Map> parent) {
+ Handle<Map> parent, WasmRttSubMode mode) {
DCHECK(parent->IsWasmStructMap() || parent->IsWasmArrayMap() ||
parent->IsJSFunctionMap());
@@ -201,11 +231,13 @@ Handle<Map> AllocateSubRtt(Isolate* isolate,
return Map::Copy(isolate, isolate->wasm_exported_function_map(),
"fresh function map for AllocateSubRtt");
}
-
- // Check for an existing RTT first.
- Handle<ArrayList> cache(parent->wasm_type_info().subtypes(), isolate);
- Map maybe_cached = RttSubtypes::SearchSubtype(cache, type);
- if (!maybe_cached.is_null()) return handle(maybe_cached, isolate);
+ // If canonicalization is requested, check for an existing RTT first.
+ Handle<ArrayList> cache;
+ if (mode == WasmRttSubMode::kCanonicalize) {
+ cache = handle(parent->wasm_type_info().subtypes(), isolate);
+ Map maybe_cached = RttSubtypes::SearchSubtype(cache, type);
+ if (!maybe_cached.is_null()) return handle(maybe_cached, isolate);
+ }
// Allocate a fresh RTT otherwise.
Handle<Map> rtt;
@@ -216,8 +248,10 @@ Handle<Map> AllocateSubRtt(Isolate* isolate,
rtt = wasm::CreateArrayMap(isolate, module, type, parent);
}
- cache = RttSubtypes::Insert(isolate, cache, type, rtt);
- parent->wasm_type_info().set_subtypes(*cache);
+ if (mode == WasmRttSubMode::kCanonicalize) {
+ cache = RttSubtypes::Insert(isolate, cache, type, rtt);
+ parent->wasm_type_info().set_subtypes(*cache);
+ }
return rtt;
}
@@ -257,6 +291,7 @@ class InstanceBuilder {
std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
Handle<WasmExportedFunction> start_function_;
std::vector<SanitizedImport> sanitized_imports_;
+ Zone init_expr_zone_;
// Helper routines to print out errors with imports.
#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
@@ -293,12 +328,7 @@ class InstanceBuilder {
// Load data segments into the memory.
void LoadDataSegments(Handle<WasmInstanceObject> instance);
- void WriteGlobalValue(const WasmGlobal& global, double value);
- void WriteGlobalValue(const WasmGlobal& global, int64_t num);
- void WriteGlobalValue(const WasmGlobal& global,
- Handle<WasmGlobalObject> value);
-
- void WriteGlobalExternRef(const WasmGlobal& global, Handle<Object> value);
+ void WriteGlobalValue(const WasmGlobal& global, const WasmValue& value);
void SanitizeImports();
@@ -355,13 +385,13 @@ class InstanceBuilder {
int ProcessImports(Handle<WasmInstanceObject> instance);
template <typename T>
- T* GetRawGlobalPtr(const WasmGlobal& global);
+ T* GetRawUntaggedGlobalPtr(const WasmGlobal& global);
// Process initialization of globals.
void InitGlobals(Handle<WasmInstanceObject> instance);
- Handle<Object> RecursivelyEvaluateGlobalInitializer(
- const WasmInitExpr& init, Handle<WasmInstanceObject> instance);
+ WasmValue EvaluateInitExpression(WireBytesRef init, ValueType expected,
+ Handle<WasmInstanceObject> instance);
// Process the exports, creating wrappers for functions, tables, memories,
// and globals.
@@ -405,7 +435,8 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate,
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
- memory_buffer_(memory_buffer) {
+ memory_buffer_(memory_buffer),
+ init_expr_zone_(isolate_->allocator(), "init. expression zone") {
sanitized_imports_.reserve(module_->import_table.size());
}
@@ -603,7 +634,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables);
}
- NativeModuleModificationScope native_modification_scope(native_module);
+ CodeSpaceWriteScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Process the imports for the module.
@@ -655,6 +686,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (table_count > 0) {
InitializeIndirectFunctionTables(instance);
+ if (thrower_->error()) return {};
}
//--------------------------------------------------------------------------
@@ -802,7 +834,7 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
// side-effect. We only accept accesses that resolve to data properties,
// which is indicated by the asm.js spec in section 7 ("Linking") as well.
Handle<Object> result;
- LookupIterator::Key key(isolate_, Handle<Name>::cast(import_name));
+ PropertyKey key(isolate_, Handle<Name>::cast(import_name));
LookupIterator it(isolate_, ffi_.ToHandleChecked(), key);
switch (it.state()) {
case LookupIterator::ACCESS_CHECK:
@@ -827,7 +859,7 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
// Load data segments into the memory.
void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
- Vector<const uint8_t> wire_bytes =
+ base::Vector<const uint8_t> wire_bytes =
module_object_->native_module()->wire_bytes();
for (const WasmDataSegment& segment : module_->data_segments) {
uint32_t size = segment.source.length();
@@ -837,14 +869,18 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
size_t dest_offset;
if (module_->is_memory64) {
- uint64_t dest_offset_64 = EvalUint64InitExpr(instance, segment.dest_addr);
+ uint64_t dest_offset_64 =
+ EvaluateInitExpression(segment.dest_addr, kWasmI64, instance)
+ .to_u64();
// Clamp to {std::numeric_limits<size_t>::max()}, which is always an
// invalid offset.
DCHECK_GT(std::numeric_limits<size_t>::max(), instance->memory_size());
dest_offset = static_cast<size_t>(std::min(
dest_offset_64, uint64_t{std::numeric_limits<size_t>::max()}));
} else {
- dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
+ dest_offset =
+ EvaluateInitExpression(segment.dest_addr, kWasmI32, instance)
+ .to_u32();
}
if (!base::IsInBounds<size_t>(dest_offset, size, instance->memory_size())) {
@@ -857,93 +893,23 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
}
}
-void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
- TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
- raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- global.type.name().c_str());
- switch (global.type.kind()) {
- case kI32:
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
- DoubleToInt32(num));
- break;
- case kI64:
- // The Wasm-BigInt proposal currently says that i64 globals may
- // only be initialized with BigInts. See:
- // https://github.com/WebAssembly/JS-BigInt-integration/issues/12
- UNREACHABLE();
- case kF32:
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
- DoubleToFloat32(num));
- break;
- case kF64:
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, int64_t num) {
- TRACE("init [globals_start=%p + %u] = %" PRId64 ", type = %s\n",
- raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- global.type.name().c_str());
- DCHECK_EQ(kWasmI64, global.type);
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
-}
-
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
- Handle<WasmGlobalObject> value) {
- TRACE("init [globals_start=%p + %u] = ", raw_buffer_ptr(untagged_globals_, 0),
- global.offset);
- switch (global.type.kind()) {
- case kI32: {
- int32_t num = value->GetI32();
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
- TRACE("%d", num);
- break;
- }
- case kI64: {
- int64_t num = value->GetI64();
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
- TRACE("%" PRId64, num);
- break;
- }
- case kF32: {
- float num = value->GetF32();
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
- TRACE("%f", num);
- break;
- }
- case kF64: {
- double num = value->GetF64();
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
- TRACE("%lf", num);
- break;
- }
- case kRtt:
- case kRttWithDepth:
- case kRef:
- case kOptRef: {
- tagged_globals_->set(global.offset, *value->GetRef());
- break;
- }
- case kVoid:
- case kS128:
- case kBottom:
- case kI8:
- case kI16:
- UNREACHABLE();
+ const WasmValue& value) {
+ TRACE("init [globals_start=%p + %u] = %s, type = %s\n",
+ global.type.is_reference()
+ ? reinterpret_cast<byte*>(tagged_globals_->address())
+ : raw_buffer_ptr(untagged_globals_, 0),
+ global.offset, value.to_string().c_str(), global.type.name().c_str());
+ DCHECK(IsSubtypeOf(value.type(), global.type, module_));
+ if (global.type.is_numeric()) {
+ value.CopyTo(GetRawUntaggedGlobalPtr<byte>(global));
+ } else {
+ tagged_globals_->set(global.offset, *value.to_ref());
}
- TRACE(", type = %s (from WebAssembly.Global)\n", global.type.name().c_str());
-}
-
-void InstanceBuilder::WriteGlobalExternRef(const WasmGlobal& global,
- Handle<Object> value) {
- tagged_globals_->set(global.offset, *value, UPDATE_WRITE_BARRIER);
}
void InstanceBuilder::SanitizeImports() {
- Vector<const uint8_t> wire_bytes =
+ base::Vector<const uint8_t> wire_bytes =
module_object_->native_module()->wire_bytes();
for (size_t index = 0; index < module_->import_table.size(); ++index) {
const WasmImport& import = module_->import_table[index];
@@ -1029,15 +995,25 @@ bool InstanceBuilder::ProcessImportedFunction(
}
case compiler::WasmImportCallKind::kWasmToCapi: {
NativeModule* native_module = instance->module_object().native_module();
- Address host_address =
- WasmCapiFunction::cast(*js_receiver).GetHostCallTarget();
- WasmCodeRefScope code_ref_scope;
- WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
- isolate_->wasm_engine(), native_module, expected_sig, host_address);
- isolate_->counters()->wasm_generated_code_size()->Increment(
- wasm_code->instructions().length());
- isolate_->counters()->wasm_reloc_size()->Increment(
- wasm_code->reloc_info().length());
+ int expected_arity = static_cast<int>(expected_sig->parameter_count());
+ WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
+ // TODO(jkummerow): Consider precompiling CapiCallWrappers in parallel,
+ // just like other import wrappers.
+ WasmCode* wasm_code = cache->MaybeGet(kind, expected_sig, expected_arity);
+ if (wasm_code == nullptr) {
+ WasmCodeRefScope code_ref_scope;
+ WasmImportWrapperCache::ModificationScope cache_scope(cache);
+ wasm_code =
+ compiler::CompileWasmCapiCallWrapper(native_module, expected_sig);
+ WasmImportWrapperCache::CacheKey key(kind, expected_sig,
+ expected_arity);
+ cache_scope[key] = wasm_code;
+ wasm_code->IncRef();
+ isolate_->counters()->wasm_generated_code_size()->Increment(
+ wasm_code->instructions().length());
+ isolate_->counters()->wasm_reloc_size()->Increment(
+ wasm_code->reloc_info().length());
+ }
ImportedFunctionEntry entry(instance, func_index);
// We re-use the SetWasmToJs infrastructure because it passes the
@@ -1293,7 +1269,35 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
return true;
}
- WriteGlobalValue(global, global_object);
+ WasmValue value;
+ switch (global_object->type().kind()) {
+ case kI32:
+ value = WasmValue(global_object->GetI32());
+ break;
+ case kI64:
+ value = WasmValue(global_object->GetI64());
+ break;
+ case kF32:
+ value = WasmValue(global_object->GetF32());
+ break;
+ case kF64:
+ value = WasmValue(global_object->GetF64());
+ break;
+ case kRtt:
+ case kRttWithDepth:
+ case kRef:
+ case kOptRef:
+ value = WasmValue(global_object->GetRef(), global_object->type());
+ break;
+ case kVoid:
+ case kS128:
+ case kBottom:
+ case kI8:
+ case kI16:
+ UNREACHABLE();
+ }
+
+ WriteGlobalValue(global, value);
return true;
}
@@ -1362,17 +1366,26 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
ReportLinkError(error_message, global_index, module_name, import_name);
return false;
}
- WriteGlobalExternRef(global, value);
+ WriteGlobalValue(global, WasmValue(value, global.type));
return true;
}
if (value->IsNumber() && global.type != kWasmI64) {
- WriteGlobalValue(global, value->Number());
+ double number_value = value->Number();
+ // The Wasm-BigInt proposal currently says that i64 globals may
+ // only be initialized with BigInts. See:
+ // https://github.com/WebAssembly/JS-BigInt-integration/issues/12
+ WasmValue wasm_value = global.type == kWasmI32
+ ? WasmValue(DoubleToInt32(number_value))
+ : global.type == kWasmF32
+ ? WasmValue(DoubleToFloat32(number_value))
+ : WasmValue(number_value);
+ WriteGlobalValue(global, wasm_value);
return true;
}
if (global.type == kWasmI64 && value->IsBigInt()) {
- WriteGlobalValue(global, BigInt::cast(*value).AsInt64());
+ WriteGlobalValue(global, WasmValue(BigInt::cast(*value).AsInt64()));
return true;
}
@@ -1431,8 +1444,7 @@ void InstanceBuilder::CompileImportWrappers(
}
auto compile_job_task = std::make_unique<CompileImportWrapperJob>(
- isolate_->wasm_engine(), isolate_->counters(), native_module,
- &import_wrapper_queue, &cache_scope);
+ isolate_->counters(), native_module, &import_wrapper_queue, &cache_scope);
auto compile_job = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible, std::move(compile_job_task));
@@ -1515,125 +1527,50 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
default:
UNREACHABLE();
- break;
}
}
return num_imported_functions;
}
template <typename T>
-T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
+T* InstanceBuilder::GetRawUntaggedGlobalPtr(const WasmGlobal& global) {
return reinterpret_cast<T*>(raw_buffer_ptr(untagged_globals_, global.offset));
}
-Handle<Object> InstanceBuilder::RecursivelyEvaluateGlobalInitializer(
- const WasmInitExpr& init, Handle<WasmInstanceObject> instance) {
- switch (init.kind()) {
- case WasmInitExpr::kI32Const:
- case WasmInitExpr::kI64Const:
- case WasmInitExpr::kF32Const:
- case WasmInitExpr::kF64Const:
- case WasmInitExpr::kS128Const:
- case WasmInitExpr::kRefNullConst:
- case WasmInitExpr::kRefFuncConst:
- case WasmInitExpr::kNone:
- // Handled directly by {InitGlobals()}, can't occur as recursive case.
- UNREACHABLE();
- break;
- case WasmInitExpr::kGlobalGet: {
- // We can only get here for reference-type globals, but we don't have
- // enough information to DCHECK that directly.
- DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
- uint32_t old_offset = module_->globals[init.immediate().index].offset;
- DCHECK(static_cast<int>(old_offset) < tagged_globals_->length());
- return handle(tagged_globals_->get(old_offset), isolate_);
- }
- case WasmInitExpr::kRttCanon: {
- int map_index = init.immediate().index;
- return handle(instance->managed_object_maps().get(map_index), isolate_);
- }
- case WasmInitExpr::kRttSub: {
- uint32_t type = init.immediate().index;
- Handle<Object> parent =
- RecursivelyEvaluateGlobalInitializer(*init.operand(), instance);
- return AllocateSubRtt(isolate_, instance, type,
- Handle<Map>::cast(parent));
- }
- }
+WasmValue InstanceBuilder::EvaluateInitExpression(
+ WireBytesRef init, ValueType expected,
+ Handle<WasmInstanceObject> instance) {
+ base::Vector<const byte> module_bytes =
+ instance->module_object().native_module()->wire_bytes();
+ FunctionBody body(FunctionSig::Build(&init_expr_zone_, {expected}, {}),
+ init.offset(), module_bytes.begin() + init.offset(),
+ module_bytes.begin() + init.end_offset());
+ WasmFeatures detected;
+ // We use kFullValidation so we do not have to create another instance of
+ // WasmFullDecoder, which would cost us >50Kb binary code size.
+ WasmFullDecoder<Decoder::kFullValidation, InitExprInterface, kInitExpression>
+ decoder(&init_expr_zone_, module_, WasmFeatures::All(), &detected, body,
+ module_, isolate_, instance, tagged_globals_, untagged_globals_);
+
+ decoder.DecodeFunctionBody();
+
+ return decoder.interface().result();
}
// Process initialization of globals.
void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
for (const WasmGlobal& global : module_->globals) {
- if (global.mutability && global.imported) {
- continue;
- }
+ if (global.mutability && global.imported) continue;
+ // Happens with imported globals.
+ if (!global.init.is_set()) continue;
- switch (global.init.kind()) {
- case WasmInitExpr::kI32Const:
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
- global.init.immediate().i32_const);
- break;
- case WasmInitExpr::kI64Const:
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
- global.init.immediate().i64_const);
- break;
- case WasmInitExpr::kF32Const:
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
- global.init.immediate().f32_const);
- break;
- case WasmInitExpr::kF64Const:
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
- global.init.immediate().f64_const);
- break;
- case WasmInitExpr::kS128Const:
- DCHECK(enabled_.has_simd());
- WriteLittleEndianValue<std::array<uint8_t, kSimd128Size>>(
- GetRawGlobalPtr<std::array<uint8_t, kSimd128Size>>(global),
- global.init.immediate().s128_const);
- break;
- case WasmInitExpr::kRefNullConst:
- DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
- if (global.imported) break; // We already initialized imported globals.
+ WasmValue value =
+ EvaluateInitExpression(global.init, global.type, instance);
- tagged_globals_->set(global.offset,
- ReadOnlyRoots(isolate_).null_value(),
- SKIP_WRITE_BARRIER);
- break;
- case WasmInitExpr::kRefFuncConst: {
- DCHECK(enabled_.has_reftypes());
- auto function = WasmInstanceObject::GetOrCreateWasmExternalFunction(
- isolate_, instance, global.init.immediate().index);
- tagged_globals_->set(global.offset, *function);
- break;
- }
- case WasmInitExpr::kGlobalGet: {
- // Initialize with another global.
- uint32_t new_offset = global.offset;
- uint32_t old_offset =
- module_->globals[global.init.immediate().index].offset;
- TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- if (global.type.is_reference()) {
- DCHECK(enabled_.has_reftypes());
- tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
- } else {
- size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
- ? sizeof(double)
- : sizeof(int32_t);
- base::Memcpy(raw_buffer_ptr(untagged_globals_, new_offset),
- raw_buffer_ptr(untagged_globals_, old_offset), size);
- }
- break;
- }
- case WasmInitExpr::kRttCanon:
- case WasmInitExpr::kRttSub:
- tagged_globals_->set(
- global.offset,
- *RecursivelyEvaluateGlobalInitializer(global.init, instance));
- break;
- case WasmInitExpr::kNone:
- // Happens with imported globals.
- break;
+ if (global.type.is_reference()) {
+ tagged_globals_->set(global.offset, *value.to_ref());
+ } else {
+ value.CopyTo(GetRawUntaggedGlobalPtr<byte>(global));
}
}
}
@@ -1644,18 +1581,12 @@ bool InstanceBuilder::AllocateMemory() {
int maximum_pages = module_->has_maximum_pages
? static_cast<int>(module_->maximum_pages)
: WasmMemoryObject::kNoMaximum;
- if (initial_pages > static_cast<int>(max_mem_pages())) {
- thrower_->RangeError("Out of memory: wasm memory too large");
- return false;
- }
auto shared = (module_->has_shared_memory && enabled_.has_threads())
? SharedFlag::kShared
: SharedFlag::kNotShared;
- MaybeHandle<WasmMemoryObject> result =
- WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared);
-
- if (!result.ToHandle(&memory_object_)) {
+ if (!WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared)
+ .ToHandle(&memory_object_)) {
thrower_->RangeError("Out of memory: wasm memory");
return false;
}
@@ -1819,7 +1750,6 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
default:
UNREACHABLE();
- break;
}
v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
@@ -1841,6 +1771,61 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
}
+void SetNullTableEntry(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ Handle<WasmTableObject> table_object,
+ uint32_t table_index, uint32_t entry_index) {
+ const WasmModule* module = instance->module();
+ if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
+ IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
+ }
+ WasmTableObject::Set(isolate, table_object, entry_index,
+ isolate->factory()->null_value());
+}
+
+void SetFunctionTableEntry(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmTableObject> table_object,
+ uint32_t table_index, uint32_t entry_index,
+ uint32_t func_index) {
+ const WasmModule* module = instance->module();
+ const WasmFunction* function = &module->functions[func_index];
+
+ // For externref tables, we have to generate the WasmExternalFunction eagerly.
+ // Later we cannot know if an entry is a placeholder or not.
+ if (table_object->type().is_reference_to(HeapType::kExtern)) {
+ Handle<WasmExternalFunction> wasm_external_function =
+ WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
+ func_index);
+ WasmTableObject::Set(isolate, table_object, entry_index,
+ wasm_external_function);
+ } else {
+ DCHECK(IsSubtypeOf(table_object->type(), kWasmFuncRef, module));
+
+ // Update the local dispatch table first if necessary.
+ uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
+ .Set(sig_id, instance, func_index);
+
+ // Update the table object's other dispatch tables.
+ MaybeHandle<WasmExternalFunction> wasm_external_function =
+ WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
+ func_index);
+ if (wasm_external_function.is_null()) {
+ // No JSFunction entry yet exists for this function. Create a
+ // {Tuple2} holding the information to lazily allocate one.
+ WasmTableObject::SetFunctionTablePlaceholder(
+ isolate, table_object, entry_index, instance, func_index);
+ } else {
+ table_object->entries().set(entry_index,
+ *wasm_external_function.ToHandleChecked());
+ }
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(isolate, table_object, entry_index,
+ function->sig, instance, func_index);
+ }
+}
+
void InstanceBuilder::InitializeIndirectFunctionTables(
Handle<WasmInstanceObject> instance) {
for (int table_index = 0;
@@ -1853,39 +1838,35 @@ void InstanceBuilder::InitializeIndirectFunctionTables(
}
if (!table.type.is_defaultable()) {
- // Function constant is currently the only viable initializer.
- DCHECK(table.initial_value.kind() == WasmInitExpr::kRefFuncConst);
- uint32_t func_index = table.initial_value.immediate().index;
-
- uint32_t sig_id =
- module_->canonicalized_type_ids[module_->functions[func_index]
- .sig_index];
- MaybeHandle<WasmExternalFunction> wasm_external_function =
- WasmInstanceObject::GetWasmExternalFunction(isolate_, instance,
- func_index);
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(table_index)), isolate_);
- for (uint32_t entry_index = 0; entry_index < table.initial_size;
- entry_index++) {
- // Update the local dispatch table first.
- IndirectFunctionTableEntry(instance, table_index, entry_index)
- .Set(sig_id, instance, func_index);
-
- // Update the table object's other dispatch tables.
- if (wasm_external_function.is_null()) {
- // No JSFunction entry yet exists for this function. Create a {Tuple2}
- // holding the information to lazily allocate one.
- WasmTableObject::SetFunctionTablePlaceholder(
- isolate_, table_object, entry_index, instance, func_index);
- } else {
- table_object->entries().set(
- entry_index, *wasm_external_function.ToHandleChecked());
+ Handle<Object> value =
+ EvaluateInitExpression(table.initial_value, table.type, instance)
+ .to_ref();
+ if (value.is_null()) {
+ for (uint32_t entry_index = 0; entry_index < table.initial_size;
+ entry_index++) {
+ SetNullTableEntry(isolate_, instance, table_object, table_index,
+ entry_index);
+ }
+ } else if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ uint32_t function_index =
+ Handle<WasmExportedFunction>::cast(value)->function_index();
+ for (uint32_t entry_index = 0; entry_index < table.initial_size;
+ entry_index++) {
+ SetFunctionTableEntry(isolate_, instance, table_object, table_index,
+ entry_index, function_index);
+ }
+ } else if (WasmJSFunction::IsWasmJSFunction(*value)) {
+ // TODO(manoskouk): Support WasmJSFunction.
+ thrower_->TypeError(
+ "Initializing a table with a Webassembly.Function object is not "
+ "supported yet");
+ } else {
+ for (uint32_t entry_index = 0; entry_index < table.initial_size;
+ entry_index++) {
+ WasmTableObject::Set(isolate_, table_object, entry_index, value);
}
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(
- isolate_, table_object, entry_index,
- module_->functions[func_index].sig, instance, func_index);
}
}
}
@@ -1909,68 +1890,39 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
return false;
}
- const WasmModule* module = instance->module();
for (size_t i = 0; i < count; ++i) {
- const WasmInitExpr* init = &elem_segment.entries[src + i];
+ WasmElemSegment::Entry init = elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i);
-
- if (init->kind() == WasmInitExpr::kRefNullConst) {
- if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
- }
- WasmTableObject::Set(isolate, table_object, entry_index,
- isolate->factory()->null_value());
- continue;
- }
-
- if (init->kind() == WasmInitExpr::kGlobalGet) {
- WasmTableObject::Set(
- isolate, table_object, entry_index,
- WasmInstanceObject::GetGlobalValue(
- instance, module->globals[init->immediate().index])
- .to_ref());
- continue;
- }
-
- DCHECK_EQ(init->kind(), WasmInitExpr::kRefFuncConst);
-
- const uint32_t func_index = init->immediate().index;
- const WasmFunction* function = &module->functions[func_index];
-
- // Update the local dispatch table first if necessary.
- if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, table_index, entry_index)
- .Set(sig_id, instance, func_index);
- }
-
- // For ExternRef tables, we have to generate the WasmExternalFunction
- // eagerly. Later we cannot know if an entry is a placeholder or not.
- if (table_object->type().is_reference_to(HeapType::kExtern)) {
- Handle<WasmExternalFunction> wasm_external_function =
- WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
- func_index);
- WasmTableObject::Set(isolate, table_object, entry_index,
- wasm_external_function);
- } else {
- // Update the table object's other dispatch tables.
- MaybeHandle<WasmExternalFunction> wasm_external_function =
- WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
- func_index);
- if (wasm_external_function.is_null()) {
- // No JSFunction entry yet exists for this function. Create a {Tuple2}
- // holding the information to lazily allocate one.
- WasmTableObject::SetFunctionTablePlaceholder(
- isolate, table_object, entry_index, instance, func_index);
- } else {
- table_object->entries().set(entry_index,
- *wasm_external_function.ToHandleChecked());
+ switch (init.kind) {
+ case WasmElemSegment::Entry::kRefNullEntry:
+ SetNullTableEntry(isolate, instance, table_object, table_index,
+ entry_index);
+ break;
+ case WasmElemSegment::Entry::kRefFuncEntry:
+ SetFunctionTableEntry(isolate, instance, table_object, table_index,
+ entry_index, init.index);
+ break;
+ case WasmElemSegment::Entry::kGlobalGetEntry: {
+ Handle<Object> value =
+ WasmInstanceObject::GetGlobalValue(
+ instance, instance->module()->globals[init.index])
+ .to_ref();
+ if (value.is_null()) {
+ SetNullTableEntry(isolate, instance, table_object, table_index,
+ entry_index);
+ } else if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ uint32_t function_index =
+ Handle<WasmExportedFunction>::cast(value)->function_index();
+ SetFunctionTableEntry(isolate, instance, table_object, table_index,
+ entry_index, function_index);
+ } else if (WasmJSFunction::IsWasmJSFunction(*value)) {
+ // TODO(manoskouk): Support WasmJSFunction.
+ return false;
+ } else {
+ WasmTableObject::Set(isolate, table_object, entry_index, value);
+ }
+ break;
}
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(isolate, table_object, entry_index,
- function->sig, instance,
- func_index);
}
}
return true;
@@ -1984,7 +1936,9 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
if (elem_segment.status != WasmElemSegment::kStatusActive) continue;
uint32_t table_index = elem_segment.table_index;
- uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
+ uint32_t dst =
+ EvaluateInitExpression(elem_segment.offset, kWasmI32, instance)
+ .to_u32();
uint32_t src = 0;
size_t count = elem_segment.entries.size();
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 1556036bba..22bc7d259a 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
AsyncStreamingDecoder& operator=(const AsyncStreamingDecoder&) = delete;
// The buffer passed into OnBytesReceived is owned by the caller.
- void OnBytesReceived(Vector<const uint8_t> bytes) override;
+ void OnBytesReceived(base::Vector<const uint8_t> bytes) override;
void Finish() override;
@@ -56,22 +56,21 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// payload_length: The length of the payload.
// length_bytes: The section length, as it is encoded in the module bytes.
SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
- Vector<const uint8_t> length_bytes)
+ base::Vector<const uint8_t> length_bytes)
: // ID + length + payload
module_offset_(module_offset),
- bytes_(OwnedVector<uint8_t>::NewForOverwrite(
+ bytes_(base::OwnedVector<uint8_t>::NewForOverwrite(
1 + length_bytes.length() + payload_length)),
payload_offset_(1 + length_bytes.length()) {
bytes_.start()[0] = id;
- base::Memcpy(bytes_.start() + 1, &length_bytes.first(),
- length_bytes.length());
+ memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
}
SectionCode section_code() const {
return static_cast<SectionCode>(bytes_.start()[0]);
}
- Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
+ base::Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
DCHECK_LE(module_offset_, ref.offset());
uint32_t offset_in_code_buffer = ref.offset() - module_offset_;
return bytes().SubVector(offset_in_code_buffer,
@@ -79,14 +78,14 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
}
uint32_t module_offset() const { return module_offset_; }
- Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
- Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
+ base::Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
+ base::Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
size_t length() const { return bytes_.size(); }
size_t payload_offset() const { return payload_offset_; }
private:
const uint32_t module_offset_;
- const OwnedVector<uint8_t> bytes_;
+ const base::OwnedVector<uint8_t> bytes_;
const size_t payload_offset_;
};
@@ -123,13 +122,13 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// Reads the bytes for the current state and returns the number of read
// bytes.
virtual size_t ReadBytes(AsyncStreamingDecoder* streaming,
- Vector<const uint8_t> bytes);
+ base::Vector<const uint8_t> bytes);
// Returns the next state of the streaming decoding.
virtual std::unique_ptr<DecodingState> Next(
AsyncStreamingDecoder* streaming) = 0;
// The buffer to store the received bytes.
- virtual Vector<uint8_t> buffer() = 0;
+ virtual base::Vector<uint8_t> buffer() = 0;
// The number of bytes which were already received.
size_t offset() const { return offset_; }
void set_offset(size_t value) { offset_ = value; }
@@ -155,7 +154,7 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// Creates a buffer for the next section of the module.
SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t section_id,
size_t length,
- Vector<const uint8_t> length_bytes);
+ base::Vector<const uint8_t> length_bytes);
std::unique_ptr<DecodingState> Error(const WasmError& error) {
if (ok()) processor_->OnError(error);
@@ -195,7 +194,7 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
}
}
- void ProcessFunctionBody(Vector<const uint8_t> bytes,
+ void ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t module_offset) {
if (!ok()) return;
if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
@@ -224,7 +223,7 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
std::vector<uint8_t> wire_bytes_for_deserializing_;
};
-void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
+void AsyncStreamingDecoder::OnBytesReceived(base::Vector<const uint8_t> bytes) {
if (deserializing()) {
wire_bytes_for_deserializing_.insert(wire_bytes_for_deserializing_.end(),
bytes.begin(), bytes.end());
@@ -250,11 +249,11 @@ void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
}
size_t AsyncStreamingDecoder::DecodingState::ReadBytes(
- AsyncStreamingDecoder* streaming, Vector<const uint8_t> bytes) {
- Vector<uint8_t> remaining_buf = buffer() + offset();
+ AsyncStreamingDecoder* streaming, base::Vector<const uint8_t> bytes) {
+ base::Vector<uint8_t> remaining_buf = buffer() + offset();
size_t num_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes(%zu bytes)\n", num_bytes);
- base::Memcpy(remaining_buf.begin(), &bytes.first(), num_bytes);
+ memcpy(remaining_buf.begin(), &bytes.first(), num_bytes);
set_offset(offset() + num_bytes);
return num_bytes;
}
@@ -266,7 +265,8 @@ void AsyncStreamingDecoder::Finish() {
if (!ok()) return;
if (deserializing()) {
- Vector<const uint8_t> wire_bytes = VectorOf(wire_bytes_for_deserializing_);
+ base::Vector<const uint8_t> wire_bytes =
+ base::VectorOf(wire_bytes_for_deserializing_);
// Try to deserialize the module from wire bytes and module bytes.
if (processor_->Deserialize(compiled_module_bytes_, wire_bytes)) return;
@@ -283,19 +283,19 @@ void AsyncStreamingDecoder::Finish() {
return;
}
- OwnedVector<uint8_t> bytes =
- OwnedVector<uint8_t>::NewForOverwrite(total_size_);
+ base::OwnedVector<uint8_t> bytes =
+ base::OwnedVector<uint8_t>::NewForOverwrite(total_size_);
uint8_t* cursor = bytes.start();
{
#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
uint8_t module_header[]{BYTES(kWasmMagic), BYTES(kWasmVersion)};
#undef BYTES
- base::Memcpy(cursor, module_header, arraysize(module_header));
+ memcpy(cursor, module_header, arraysize(module_header));
cursor += arraysize(module_header);
}
for (const auto& buffer : section_buffers_) {
DCHECK_LE(cursor - bytes.start() + buffer->length(), total_size_);
- base::Memcpy(cursor, buffer->bytes().begin(), buffer->length());
+ memcpy(cursor, buffer->bytes().begin(), buffer->length());
cursor += buffer->length();
}
processor_->OnFinishedStream(std::move(bytes));
@@ -360,10 +360,12 @@ class AsyncStreamingDecoder::DecodeVarInt32 : public DecodingState {
explicit DecodeVarInt32(size_t max_value, const char* field_name)
: max_value_(max_value), field_name_(field_name) {}
- Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
+ base::Vector<uint8_t> buffer() override {
+ return base::ArrayVector(byte_buffer_);
+ }
size_t ReadBytes(AsyncStreamingDecoder* streaming,
- Vector<const uint8_t> bytes) override;
+ base::Vector<const uint8_t> bytes) override;
std::unique_ptr<DecodingState> Next(
AsyncStreamingDecoder* streaming) override;
@@ -383,7 +385,9 @@ class AsyncStreamingDecoder::DecodeVarInt32 : public DecodingState {
class AsyncStreamingDecoder::DecodeModuleHeader : public DecodingState {
public:
- Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
+ base::Vector<uint8_t> buffer() override {
+ return base::ArrayVector(byte_buffer_);
+ }
std::unique_ptr<DecodingState> Next(
AsyncStreamingDecoder* streaming) override;
@@ -402,7 +406,7 @@ class AsyncStreamingDecoder::DecodeSectionID : public DecodingState {
explicit DecodeSectionID(uint32_t module_offset)
: module_offset_(module_offset) {}
- Vector<uint8_t> buffer() override { return {&id_, 1}; }
+ base::Vector<uint8_t> buffer() override { return {&id_, 1}; }
bool is_finishing_allowed() const override { return true; }
std::unique_ptr<DecodingState> Next(
@@ -435,7 +439,7 @@ class AsyncStreamingDecoder::DecodeSectionPayload : public DecodingState {
explicit DecodeSectionPayload(SectionBuffer* section_buffer)
: section_buffer_(section_buffer) {}
- Vector<uint8_t> buffer() override { return section_buffer_->payload(); }
+ base::Vector<uint8_t> buffer() override { return section_buffer_->payload(); }
std::unique_ptr<DecodingState> Next(
AsyncStreamingDecoder* streaming) override;
@@ -462,7 +466,7 @@ class AsyncStreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
explicit DecodeFunctionLength(SectionBuffer* section_buffer,
size_t buffer_offset,
size_t num_remaining_functions)
- : DecodeVarInt32(kV8MaxWasmFunctionSize, "body size"),
+ : DecodeVarInt32(kV8MaxWasmFunctionSize, "function body size"),
section_buffer_(section_buffer),
buffer_offset_(buffer_offset),
// We are reading a new function, so one function less is remaining.
@@ -491,8 +495,8 @@ class AsyncStreamingDecoder::DecodeFunctionBody : public DecodingState {
num_remaining_functions_(num_remaining_functions),
module_offset_(module_offset) {}
- Vector<uint8_t> buffer() override {
- Vector<uint8_t> remaining_buffer =
+ base::Vector<uint8_t> buffer() override {
+ base::Vector<uint8_t> remaining_buffer =
section_buffer_->bytes() + buffer_offset_;
return remaining_buffer.SubVector(0, function_body_length_);
}
@@ -509,12 +513,12 @@ class AsyncStreamingDecoder::DecodeFunctionBody : public DecodingState {
};
size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
- AsyncStreamingDecoder* streaming, Vector<const uint8_t> bytes) {
- Vector<uint8_t> buf = buffer();
- Vector<uint8_t> remaining_buf = buf + offset();
+ AsyncStreamingDecoder* streaming, base::Vector<const uint8_t> bytes) {
+ base::Vector<uint8_t> buf = buffer();
+ base::Vector<uint8_t> remaining_buf = buf + offset();
size_t new_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes of a VarInt\n");
- base::Memcpy(remaining_buf.begin(), &bytes.first(), new_bytes);
+ memcpy(remaining_buf.begin(), &bytes.first(), new_bytes);
buf.Truncate(offset() + new_bytes);
Decoder decoder(buf,
streaming->module_offset() - static_cast<uint32_t>(offset()));
@@ -549,8 +553,8 @@ AsyncStreamingDecoder::DecodeVarInt32::Next(AsyncStreamingDecoder* streaming) {
if (value_ > max_value_) {
std::ostringstream oss;
- oss << "function size > maximum function size: " << value_ << " < "
- << max_value_;
+ oss << "The value " << value_ << " for " << field_name_
+ << " exceeds the maximum allowed value of " << max_value_;
return streaming->Error(oss.str());
}
@@ -624,11 +628,11 @@ AsyncStreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
- Vector<uint8_t> payload_buf = section_buffer_->payload();
+ base::Vector<uint8_t> payload_buf = section_buffer_->payload();
if (payload_buf.size() < bytes_consumed_) {
return streaming->Error("invalid code section length");
}
- base::Memcpy(payload_buf.begin(), buffer().begin(), bytes_consumed_);
+ memcpy(payload_buf.begin(), buffer().begin(), bytes_consumed_);
// {value} is the number of functions.
if (value_ == 0) {
@@ -659,11 +663,12 @@ AsyncStreamingDecoder::DecodeFunctionLength::NextWithValue(
AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value_);
// Copy the bytes we consumed into the section buffer.
- Vector<uint8_t> fun_length_buffer = section_buffer_->bytes() + buffer_offset_;
+ base::Vector<uint8_t> fun_length_buffer =
+ section_buffer_->bytes() + buffer_offset_;
if (fun_length_buffer.size() < bytes_consumed_) {
return streaming->Error("read past code section end");
}
- base::Memcpy(fun_length_buffer.begin(), buffer().begin(), bytes_consumed_);
+ memcpy(fun_length_buffer.begin(), buffer().begin(), bytes_consumed_);
// {value} is the length of the function.
if (value_ == 0) return streaming->Error("invalid function length (0)");
@@ -704,7 +709,7 @@ AsyncStreamingDecoder::AsyncStreamingDecoder(
AsyncStreamingDecoder::SectionBuffer* AsyncStreamingDecoder::CreateNewBuffer(
uint32_t module_offset, uint8_t section_id, size_t length,
- Vector<const uint8_t> length_bytes) {
+ base::Vector<const uint8_t> length_bytes) {
// Section buffers are allocated in the same order they appear in the module,
// they will be processed and later on concatenated in that same order.
section_buffers_.emplace_back(std::make_shared<SectionBuffer>(
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 0dfbf1bf78..2c5e1eae3c 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -13,7 +13,7 @@
#include <vector>
#include "src/base/macros.h"
-#include "src/utils/vector.h"
+#include "src/base/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
@@ -31,13 +31,14 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
virtual ~StreamingProcessor() = default;
// Process the first 8 bytes of a WebAssembly module. Returns true if the
// processing finished successfully and the decoding should continue.
- virtual bool ProcessModuleHeader(Vector<const uint8_t> bytes,
+ virtual bool ProcessModuleHeader(base::Vector<const uint8_t> bytes,
uint32_t offset) = 0;
// Process all sections but the code section. Returns true if the processing
// finished successfully and the decoding should continue.
virtual bool ProcessSection(SectionCode section_code,
- Vector<const uint8_t> bytes, uint32_t offset) = 0;
+ base::Vector<const uint8_t> bytes,
+ uint32_t offset) = 0;
// Process the start of the code section. Returns true if the processing
// finished successfully and the decoding should continue.
@@ -48,7 +49,7 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// Process a function body. Returns true if the processing finished
// successfully and the decoding should continue.
- virtual bool ProcessFunctionBody(Vector<const uint8_t> bytes,
+ virtual bool ProcessFunctionBody(base::Vector<const uint8_t> bytes,
uint32_t offset) = 0;
// Report the end of a chunk.
@@ -56,15 +57,15 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// Report the end of the stream. If the stream was successful, all
// received bytes are passed by parameter. If there has been an error, an
// empty array is passed.
- virtual void OnFinishedStream(OwnedVector<uint8_t> bytes) = 0;
+ virtual void OnFinishedStream(base::OwnedVector<uint8_t> bytes) = 0;
// Report an error detected in the StreamingDecoder.
virtual void OnError(const WasmError&) = 0;
// Report the abortion of the stream.
virtual void OnAbort() = 0;
// Attempt to deserialize the module. Supports embedder caching.
- virtual bool Deserialize(Vector<const uint8_t> module_bytes,
- Vector<const uint8_t> wire_bytes) = 0;
+ virtual bool Deserialize(base::Vector<const uint8_t> module_bytes,
+ base::Vector<const uint8_t> wire_bytes) = 0;
};
// The StreamingDecoder takes a sequence of byte arrays, each received by a call
@@ -75,7 +76,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
virtual ~StreamingDecoder() = default;
// The buffer passed into OnBytesReceived is owned by the caller.
- virtual void OnBytesReceived(Vector<const uint8_t> bytes) = 0;
+ virtual void OnBytesReceived(base::Vector<const uint8_t> bytes) = 0;
virtual void Finish() = 0;
@@ -95,7 +96,8 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
}
// Passes previously compiled module bytes from the embedder's cache.
- bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes) {
+ bool SetCompiledModuleBytes(
+ base::Vector<const uint8_t> compiled_module_bytes) {
compiled_module_bytes_ = compiled_module_bytes;
return true;
}
@@ -103,9 +105,9 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
virtual void NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) = 0;
- Vector<const char> url() { return VectorOf(url_); }
+ base::Vector<const char> url() { return base::VectorOf(url_); }
- void SetUrl(Vector<const char> url) {
+ void SetUrl(base::Vector<const char> url) {
url_.assign(url.begin(), url.length());
}
@@ -122,7 +124,7 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
std::string url_;
ModuleCompiledCallback module_compiled_callback_;
- Vector<const uint8_t> compiled_module_bytes_;
+ base::Vector<const uint8_t> compiled_module_bytes_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/struct-types.h b/deps/v8/src/wasm/struct-types.h
index 1bc0a1666f..eafa90a215 100644
--- a/deps/v8/src/wasm/struct-types.h
+++ b/deps/v8/src/wasm/struct-types.h
@@ -114,6 +114,9 @@ class StructType : public ZoneObject {
bool* const mutabilities_;
};
+ static const size_t kMaxFieldOffset =
+ (kV8MaxWasmStructFields - 1) * kMaxValueTypeSize;
+
private:
const uint32_t field_count_;
uint32_t* const field_offsets_;
diff --git a/deps/v8/src/wasm/sync-streaming-decoder.cc b/deps/v8/src/wasm/sync-streaming-decoder.cc
index 7152806d9d..73c22cb5a3 100644
--- a/deps/v8/src/wasm/sync-streaming-decoder.cc
+++ b/deps/v8/src/wasm/sync-streaming-decoder.cc
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
resolver_(resolver) {}
// The buffer passed into OnBytesReceived is owned by the caller.
- void OnBytesReceived(Vector<const uint8_t> bytes) override {
+ void OnBytesReceived(base::Vector<const uint8_t> bytes) override {
buffer_.emplace_back(bytes.size());
CHECK_EQ(buffer_.back().size(), bytes.size());
std::memcpy(buffer_.back().data(), bytes.data(), bytes.size());
@@ -49,7 +49,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
MaybeHandle<WasmModuleObject> module_object = DeserializeNativeModule(
isolate_, compiled_module_bytes_,
- Vector<const uint8_t>(bytes.get(), buffer_size_), url());
+ base::Vector<const uint8_t>(bytes.get(), buffer_size_), url());
if (!module_object.is_null()) {
Handle<WasmModuleObject> module = module_object.ToHandleChecked();
@@ -62,8 +62,7 @@ class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
ModuleWireBytes wire_bytes(bytes.get(), bytes.get() + buffer_size_);
ErrorThrower thrower(isolate_, api_method_name_for_errors_);
MaybeHandle<WasmModuleObject> module_object =
- isolate_->wasm_engine()->SyncCompile(isolate_, enabled_, &thrower,
- wire_bytes);
+ GetWasmEngine()->SyncCompile(isolate_, enabled_, &thrower, wire_bytes);
if (thrower.error()) {
resolver_->OnCompilationFailed(thrower.Reify());
return;
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 7895a731f6..c12496759f 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -50,6 +50,8 @@ class Simd128;
V(OptRef, kTaggedSizeLog2, OptRef, AnyTagged, 'n', "ref null") \
V(Bottom, -1, Void, None, '*', "<bot>")
+constexpr int kMaxValueTypeSize = 16; // bytes
+
// Represents a WebAssembly heap type, as per the typed-funcref and gc
// proposals.
// The underlying Representation enumeration encodes heap types as follows:
@@ -184,6 +186,18 @@ enum ValueKind : uint8_t {
#undef DEF_ENUM
};
+constexpr bool is_numeric(ValueKind kind) {
+ switch (kind) {
+#define NUMERIC_CASE(kind, ...) \
+ case k##kind: \
+ return true;
+ FOREACH_NUMERIC_VALUE_TYPE(NUMERIC_CASE)
+#undef NUMERIC_CASE
+ default:
+ return false;
+ }
+}
+
constexpr bool is_reference(ValueKind kind) {
return kind == kRef || kind == kOptRef || kind == kRtt ||
kind == kRttWithDepth;
@@ -310,6 +324,8 @@ class ValueType {
}
/******************************** Type checks *******************************/
+ constexpr bool is_numeric() const { return wasm::is_numeric(kind()); }
+
constexpr bool is_reference() const { return wasm::is_reference(kind()); }
constexpr bool is_object_reference() const {
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 8907cbab31..fe30f804b7 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -11,6 +11,7 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/small-vector.h"
+#include "src/base/vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -21,7 +22,6 @@
#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
-#include "src/utils/vector.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
@@ -52,10 +52,6 @@ namespace wasm {
using trap_handler::ProtectedInstructionData;
-#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
-thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
-#endif
-
base::AddressRegion DisjointAllocationPool::Merge(
base::AddressRegion new_region) {
// Find the possible insertion position by identifying the first region whose
@@ -177,7 +173,7 @@ int WasmCode::code_comments_size() const {
}
std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
- std::initializer_list<Vector<const byte>> vectors) {
+ std::initializer_list<base::Vector<const byte>> vectors) {
size_t total_size = 0;
for (auto& vec : vectors) total_size += vec.size();
// Use default-initialization (== no initialization).
@@ -185,7 +181,7 @@ std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
byte* ptr = result.get();
for (auto& vec : vectors) {
if (vec.empty()) continue; // Avoid nullptr in {memcpy}.
- base::Memcpy(ptr, vec.begin(), vec.size());
+ memcpy(ptr, vec.begin(), vec.size());
ptr += vec.size();
}
return result;
@@ -253,21 +249,21 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
constexpr size_t kMaxSigLength = 128;
name_buffer.resize(prefix_len + kMaxSigLength);
const FunctionSig* sig = module->functions[index_].sig;
- size_t sig_length =
- PrintSignature(VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
+ size_t sig_length = PrintSignature(
+ base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
name_buffer.resize(prefix_len + sig_length);
// If the import has a name, also append that (separated by "-").
if (!name.empty()) {
name_buffer += '-';
name_buffer.append(name.begin(), name.size());
}
- name = VectorOf(name_buffer);
+ name = base::VectorOf(name_buffer);
} else if (name.empty()) {
name_buffer.resize(32);
name_buffer.resize(
- SNPrintF(VectorOf(&name_buffer.front(), name_buffer.size()),
+ SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
"wasm-function[%d]", index()));
- name = VectorOf(name_buffer);
+ name = base::VectorOf(name_buffer);
}
// TODO(clemensb): Remove this #if once this compilation unit is excluded in
// no-wasm builds.
@@ -284,6 +280,15 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
}
void WasmCode::Validate() const {
+ // The packing strategy for {tagged_parameter_slots} only works if both the
+ // max number of parameters and their max combined stack slot usage fits into
+ // their respective half of the result value.
+ STATIC_ASSERT(wasm::kV8MaxWasmFunctionParams <
+ std::numeric_limits<uint16_t>::max());
+ static constexpr int kMaxSlotsPerParam = 4; // S128 on 32-bit platforms.
+ STATIC_ASSERT(wasm::kV8MaxWasmFunctionParams * kMaxSlotsPerParam <
+ std::numeric_limits<uint16_t>::max());
+
#ifdef DEBUG
// Scope for foreign WasmCode pointers.
WasmCodeRefScope code_ref_scope;
@@ -358,7 +363,13 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (name) os << "name: " << name << "\n";
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
- os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
+ if (kind() == kFunction) {
+ DCHECK(is_liftoff() || tier() == ExecutionTier::kTurbofan);
+ const char* compiler =
+ is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
+ : "TurboFan";
+ os << "compiler: " << compiler << "\n";
+ }
size_t padding = instructions().size() - unpadded_binary_size_;
os << "Body (size = " << instructions().size() << " = "
<< unpadded_binary_size_ << " + " << padding << " padding)\n";
@@ -376,7 +387,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
}
DCHECK_LT(0, instruction_size);
os << "Instructions (size = " << instruction_size << ")\n";
- Disassembler::Decode(nullptr, &os, instructions().begin(),
+ Disassembler::Decode(nullptr, os, instructions().begin(),
instructions().begin() + instruction_size,
CodeReference(this), current_pc);
os << "\n";
@@ -465,7 +476,7 @@ WasmCode::~WasmCode() {
}
V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
- if (native_module_->engine()->AddPotentiallyDeadCode(this)) {
+ if (GetWasmEngine()->AddPotentiallyDeadCode(this)) {
// The code just became potentially dead. The ref count we wanted to
// decrement is now transferred to the set of potentially dead code, and
// will be decremented when the next GC is run.
@@ -477,20 +488,18 @@ V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
}
// static
-void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
+void WasmCode::DecrementRefCount(base::Vector<WasmCode* const> code_vec) {
// Decrement the ref counter of all given code objects. Keep the ones whose
// ref count drops to zero.
WasmEngine::DeadCodeMap dead_code;
- WasmEngine* engine = nullptr;
for (WasmCode* code : code_vec) {
if (!code->DecRef()) continue; // Remaining references.
dead_code[code->native_module()].push_back(code);
- if (!engine) engine = code->native_module()->engine();
- DCHECK_EQ(engine, code->native_module()->engine());
}
- DCHECK_EQ(dead_code.empty(), engine == nullptr);
- if (engine) engine->FreeDeadCode(dead_code);
+ if (dead_code.empty()) return;
+
+ GetWasmEngine()->FreeDeadCode(dead_code);
}
int WasmCode::GetSourcePositionBefore(int offset) {
@@ -506,16 +515,14 @@ int WasmCode::GetSourcePositionBefore(int offset) {
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
-WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
- std::shared_ptr<Counters> async_counters)
- : code_manager_(code_manager),
- async_counters_(std::move(async_counters)) {
+WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
+ : async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
}
WasmCodeAllocator::~WasmCodeAllocator() {
- code_manager_->FreeNativeModule(VectorOf(owned_code_space_),
- committed_code_space());
+ GetWasmCodeManager()->FreeNativeModule(base::VectorOf(owned_code_space_),
+ committed_code_space());
}
void WasmCodeAllocator::Init(VirtualMemory code_space) {
@@ -594,35 +601,41 @@ size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
return overhead;
}
-size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
- size_t total_reserved) {
+// Returns both the minimum size to reserve, and an estimate how much should be
+// reserved.
+std::pair<size_t, size_t> ReservationSize(size_t code_size_estimate,
+ int num_declared_functions,
+ size_t total_reserved) {
size_t overhead = OverheadPerCodeSpace(num_declared_functions);
// Reserve a power of two at least as big as any of
// a) needed size + overhead (this is the minimum needed)
// b) 2 * overhead (to not waste too much space by overhead)
// c) 1/4 of current total reservation size (to grow exponentially)
- size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
+ size_t minimum_size = 2 * overhead;
+ size_t suggested_size = base::bits::RoundUpToPowerOfTwo(
std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
- 2 * overhead),
+ minimum_size),
total_reserved / 4));
// Limit by the maximum supported code space size.
- return std::min(WasmCodeAllocator::kMaxCodeSpaceSize, reserve_size);
+ size_t reserve_size =
+ std::min(WasmCodeAllocator::kMaxCodeSpaceSize, suggested_size);
+
+ return {minimum_size, reserve_size};
}
} // namespace
-Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
- size_t size) {
+base::Vector<byte> WasmCodeAllocator::AllocateForCode(
+ NativeModule* native_module, size_t size) {
return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
}
-Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
+base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
NativeModule* native_module, size_t size, base::AddressRegion region) {
- DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ auto* code_manager = GetWasmCodeManager();
size = RoundUp<kCodeAlignment>(size);
base::AddressRegion code_space =
free_code_space_.AllocateInRegion(size, region);
@@ -637,17 +650,19 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
size_t total_reserved = 0;
for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
- size_t reserve_size = ReservationSize(
+ size_t min_reservation;
+ size_t reserve_size;
+ std::tie(min_reservation, reserve_size) = ReservationSize(
size, native_module->module()->num_declared_functions, total_reserved);
VirtualMemory new_mem =
- code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
- if (!new_mem.IsReserved()) {
+ code_manager->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
+ if (!new_mem.IsReserved() || new_mem.size() < min_reservation) {
V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
UNREACHABLE();
}
base::AddressRegion new_region = new_mem.region();
- code_manager_->AssignRange(new_region, native_module);
+ code_manager->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
native_module->AddCodeSpaceLocked(new_region);
@@ -657,7 +672,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
async_counters_->wasm_module_num_code_spaces()->AddSample(
static_cast<int>(owned_code_space_.size()));
}
- const Address commit_page_size = page_allocator->CommitPageSize();
+ const Address commit_page_size = CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
Address commit_end = RoundUp(code_space.end(), commit_page_size);
// {commit_start} will be either code_space.start or the start of the next
@@ -671,7 +686,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
if (commit_start < commit_end) {
for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
{commit_start, commit_end - commit_start}, owned_code_space_)) {
- code_manager_->Commit(split_range);
+ code_manager->Commit(split_range);
}
committed_code_space_.fetch_add(commit_end - commit_start);
// Committed code cannot grow bigger than maximum code space size.
@@ -687,12 +702,11 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
// TODO(dlehmann): Do not return the success as a bool, but instead fail hard.
-// That is, pull the CHECK from {NativeModuleModificationScope} in here and
-// return void.
+// That is, pull the CHECK from {CodeSpaceWriteScope} in here and return void.
// TODO(dlehmann): Ensure {SetWritable(true)} is always paired up with a
// {SetWritable(false)}, such that eventually the code space is write protected.
// One solution is to make the API foolproof by hiding {SetWritable()} and
-// allowing change of permissions only through {NativeModuleModificationScope}.
+// allowing change of permissions only through {CodeSpaceWriteScope}.
// TODO(dlehmann): Add tests that ensure the code space is eventually write-
// protected.
bool WasmCodeAllocator::SetWritable(bool writable) {
@@ -753,38 +767,11 @@ bool WasmCodeAllocator::SetWritable(bool writable) {
return true;
}
-bool WasmCodeAllocator::SetThreadWritable(bool writable) {
- static thread_local int writable_nesting_level = 0;
- if (writable) {
- if (++writable_nesting_level > 1) return true;
- } else {
- DCHECK_GT(writable_nesting_level, 0);
- if (--writable_nesting_level > 0) return true;
- }
- writable = writable_nesting_level > 0;
-
- int key = code_manager_->memory_protection_key_;
-
- MemoryProtectionKeyPermission permissions =
- writable ? kNoRestrictions : kDisableWrite;
-
- TRACE_HEAP("Setting memory protection key %d to writable: %d.\n", key,
- writable);
- return SetPermissionsForMemoryProtectionKey(key, permissions);
-}
-
-void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
+void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
size_t code_size = 0;
- CODE_SPACE_WRITE_SCOPE
for (WasmCode* code : codes) {
- // TODO(dlehmann): Pull the {NativeModuleModificationScope} out of the loop.
- // However, its constructor requires a {NativeModule}.
- // Can be fixed if {NativeModuleModificationScope()} is changed to take
- // only a {WasmCodeAllocator} in its constructor.
- NativeModuleModificationScope native_module_modification_scope(
- code->native_module());
ZapCode(code->instruction_start(), code->instructions().size());
FlushInstructionCache(code->instruction_start(),
code->instructions().size());
@@ -798,8 +785,7 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
// pages to decommit into {regions_to_decommit} (decommitting is expensive,
// so try to merge regions before decommitting).
DisjointAllocationPool regions_to_decommit;
- PageAllocator* allocator = GetPlatformPageAllocator();
- size_t commit_page_size = allocator->CommitPageSize();
+ size_t commit_page_size = CommitPageSize();
for (auto region : freed_regions.regions()) {
auto merged_region = freed_code_space_.Merge(region);
Address discard_start =
@@ -812,13 +798,14 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
+ auto* code_manager = GetWasmCodeManager();
for (auto region : regions_to_decommit.regions()) {
size_t old_committed = committed_code_space_.fetch_sub(region.size());
DCHECK_GE(old_committed, region.size());
USE(old_committed);
for (base::AddressRegion split_range :
SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
- code_manager_->Decommit(split_range);
+ code_manager->Decommit(split_range);
}
}
}
@@ -830,20 +817,30 @@ size_t WasmCodeAllocator::GetNumCodeSpaces() const {
// static
constexpr base::AddressRegion WasmCodeAllocator::kUnrestrictedRegion;
-NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
+namespace {
+BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
+ if (!FLAG_wasm_bounds_checks) return kNoBoundsChecks;
+ if (FLAG_wasm_enforce_bounds_checks) return kExplicitBoundsChecks;
+ // We do not have trap handler support for memory64 yet.
+ if (module->is_memory64) return kExplicitBoundsChecks;
+ if (trap_handler::IsTrapHandlerEnabled()) return kTrapHandler;
+ return kExplicitBoundsChecks;
+}
+} // namespace
+
+NativeModule::NativeModule(const WasmFeatures& enabled,
VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
- : engine_(engine),
- engine_scope_(engine->GetBarrierForBackgroundCompile()->TryLock()),
- code_allocator_(engine->code_manager(), async_counters),
+ : engine_scope_(
+ GetWasmEngine()->GetBarrierForBackgroundCompile()->TryLock()),
+ code_allocator_(async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
new WasmImportWrapperCache())),
- use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
- : kNoTrapHandler) {
+ bounds_checks_(GetBoundsChecks(module_.get())) {
DCHECK(engine_scope_);
// We receive a pointer to an empty {std::shared_ptr}, and install ourselve
// there.
@@ -852,7 +849,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
shared_this->reset(this);
compilation_state_ =
CompilationState::New(*shared_this, std::move(async_counters));
- compilation_state_->InitCompileJob(engine);
+ compilation_state_->InitCompileJob();
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
code_table_ =
@@ -880,8 +877,8 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
DCHECK_LE(module_->num_declared_functions, max_functions);
auto new_table = std::make_unique<WasmCode*[]>(max_functions);
if (module_->num_declared_functions > 0) {
- base::Memcpy(new_table.get(), code_table_.get(),
- module_->num_declared_functions * sizeof(WasmCode*));
+ memcpy(new_table.get(), code_table_.get(),
+ module_->num_declared_functions * sizeof(WasmCode*));
}
code_table_ = std::move(new_table);
@@ -920,30 +917,29 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- return {module(), use_trap_handler_, kRuntimeExceptionSupport,
+ return {module(), bounds_checks_, kRuntimeExceptionSupport,
enabled_features_};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
+ CodeSpaceWriteScope code_space_write_scope(this);
const size_t relocation_size = code->relocation_size();
- OwnedVector<byte> reloc_info;
+ base::OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
- reloc_info = OwnedVector<byte>::Of(
- Vector<byte>{code->relocation_start(), relocation_size});
+ reloc_info = base::OwnedVector<byte>::Of(
+ base::Vector<byte>{code->relocation_start(), relocation_size});
}
Handle<ByteArray> source_pos_table(code->source_position_table(),
code->GetIsolate());
- OwnedVector<byte> source_pos =
- OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
+ base::OwnedVector<byte> source_pos =
+ base::OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
if (source_pos_table->length() > 0) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
}
CHECK(!code->is_off_heap_trampoline());
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
- Vector<const byte> instructions(
+ base::Vector<const byte> instructions(
reinterpret_cast<byte*>(code->raw_body_start()),
static_cast<size_t>(code->raw_body_size()));
const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
@@ -962,10 +958,9 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
const int code_comments_offset = base_offset + code->code_comments_offset();
base::RecursiveMutexGuard guard{&allocation_mutex_};
- Vector<uint8_t> dst_code_bytes =
+ base::Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
- base::Memcpy(dst_code_bytes.begin(), instructions.begin(),
- instructions.size());
+ memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
@@ -1024,11 +1019,10 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
module_->num_imported_functions + module_->num_declared_functions);
base::RecursiveMutexGuard guard(&allocation_mutex_);
+ CodeSpaceWriteScope code_space_write_scope(this);
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
DCHECK_EQ(1, code_space_data_.size());
base::AddressRegion single_code_space_region = code_space_data_[0].region;
lazy_compile_table_ = CreateEmptyJumpTableInRegionLocked(
@@ -1054,10 +1048,11 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
std::unique_ptr<WasmCode> NativeModule::AddCode(
int index, const CodeDesc& desc, int stack_slots,
- int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
- Vector<const byte> source_position_table, WasmCode::Kind kind,
+ uint32_t tagged_parameter_slots,
+ base::Vector<const byte> protected_instructions_data,
+ base::Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging) {
- Vector<byte> code_space;
+ base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
@@ -1065,6 +1060,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
jump_table_ref =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
}
+ CodeSpaceWriteScope code_space_write_scope(this);
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
source_position_table, kind, tier, for_debugging,
@@ -1073,12 +1069,14 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
int index, const CodeDesc& desc, int stack_slots,
- int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
- Vector<const byte> source_position_table, WasmCode::Kind kind,
+ uint32_t tagged_parameter_slots,
+ base::Vector<const byte> protected_instructions_data,
+ base::Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging,
- Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
- Vector<byte> reloc_info{desc.buffer + desc.buffer_size - desc.reloc_size,
- static_cast<size_t>(desc.reloc_size)};
+ base::Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
+ base::Vector<byte> reloc_info{
+ desc.buffer + desc.buffer_size - desc.reloc_size,
+ static_cast<size_t>(desc.reloc_size)};
UpdateCodeSize(desc.instr_size, tier, for_debugging);
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
@@ -1091,10 +1089,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
const int code_comments_offset = desc.code_comments_offset;
const int instr_size = desc.instr_size;
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
- base::Memcpy(dst_code_bytes.begin(), desc.buffer,
- static_cast<size_t>(desc.instr_size));
+ memcpy(dst_code_bytes.begin(), desc.buffer,
+ static_cast<size_t>(desc.instr_size));
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = dst_code_bytes.begin() - desc.buffer;
@@ -1144,16 +1140,20 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode");
base::RecursiveMutexGuard lock(&allocation_mutex_);
+ CodeSpaceWriteScope code_space_write_scope(this);
return PublishCodeLocked(std::move(code));
}
std::vector<WasmCode*> NativeModule::PublishCode(
- Vector<std::unique_ptr<WasmCode>> codes) {
+ base::Vector<std::unique_ptr<WasmCode>> codes) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
base::RecursiveMutexGuard lock(&allocation_mutex_);
+ // Get writable permission already here (and not inside the loop in
+ // {PatchJumpTablesLocked}), to avoid switching for each {code} individually.
+ CodeSpaceWriteScope code_space_write_scope(this);
// The published code is put into the top-most surrounding {WasmCodeRefScope}.
for (auto& code : codes) {
published_code.push_back(PublishCodeLocked(std::move(code)));
@@ -1216,8 +1216,10 @@ WasmCode* NativeModule::PublishCodeLocked(
(tiering_state_ == kTieredDown
// Tiered down: Install breakpoints over normal debug code.
? prior_code->for_debugging() <= code->for_debugging()
- // Tiered up: Install if the tier is higher than before.
- : prior_code->tier() < code->tier()));
+ // Tiered up: Install if the tier is higher than before or we
+ // replace debugging code with non-debugging code.
+ : (prior_code->tier() < code->tier() ||
+ (prior_code->for_debugging() && !code->for_debugging()))));
if (update_code_table) {
code_table_[slot_idx] = code;
if (prior_code) {
@@ -1234,10 +1236,6 @@ WasmCode* NativeModule::PublishCodeLocked(
// {WasmCodeRefScope} though, so it cannot die here.
code->DecRefOnLiveCode();
}
- if (!code->for_debugging() && tiering_state_ == kTieredDown &&
- code->tier() == ExecutionTier::kTurbofan) {
- liftoff_bailout_count_.fetch_add(1);
- }
return code;
}
@@ -1250,7 +1248,9 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
DCHECK(!code->IsAnonymous());
DCHECK_LE(module_->num_imported_functions, code->index());
DCHECK_LT(code->index(), num_functions());
- DCHECK_EQ(kTieredDown, tiering_state_);
+
+ // If the module is tiered up by now, do not reinstall debug code.
+ if (tiering_state_ != kTieredDown) return;
uint32_t slot_idx = declared_function_index(module(), code->index());
if (WasmCode* prior_code = code_table_[slot_idx]) {
@@ -1262,13 +1262,14 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
code_table_[slot_idx] = code;
code->IncRef();
+ CodeSpaceWriteScope code_space_write_scope(this);
PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
-std::pair<Vector<uint8_t>, NativeModule::JumpTablesRef>
+std::pair<base::Vector<uint8_t>, NativeModule::JumpTablesRef>
NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
base::RecursiveMutexGuard guard{&allocation_mutex_};
- Vector<uint8_t> code_space =
+ base::Vector<uint8_t> code_space =
code_allocator_.AllocateForCode(this, total_code_size);
auto jump_tables =
FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
@@ -1276,13 +1277,14 @@ NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
}
std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
- int index, Vector<byte> instructions, int stack_slots,
- int tagged_parameter_slots, int safepoint_table_offset,
+ int index, base::Vector<byte> instructions, int stack_slots,
+ uint32_t tagged_parameter_slots, int safepoint_table_offset,
int handler_table_offset, int constant_pool_offset,
int code_comments_offset, int unpadded_binary_size,
- Vector<const byte> protected_instructions_data,
- Vector<const byte> reloc_info, Vector<const byte> source_position_table,
- WasmCode::Kind kind, ExecutionTier tier) {
+ base::Vector<const byte> protected_instructions_data,
+ base::Vector<const byte> reloc_info,
+ base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier) {
UpdateCodeSize(instructions.size(), tier, kNoDebugging);
return std::unique_ptr<WasmCode>{new WasmCode{
@@ -1296,7 +1298,7 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
base::RecursiveMutexGuard lock(&allocation_mutex_);
WasmCode** start = code_table_.get();
WasmCode** end = start + module_->num_declared_functions;
- for (WasmCode* code : VectorOf(start, end - start)) {
+ for (WasmCode* code : base::VectorOf(start, end - start)) {
if (code) WasmCodeRefScope::AddRef(code);
}
return std::vector<WasmCode*>{start, end};
@@ -1334,12 +1336,11 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
allocation_mutex_.AssertHeld();
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- Vector<uint8_t> code_space =
+ base::Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
+ CodeSpaceWriteScope code_space_write_scope(this);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
@@ -1373,8 +1374,6 @@ void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
allocation_mutex_.AssertHeld();
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.jump_table) continue;
@@ -1423,11 +1422,10 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
// See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
// https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
- if (engine_->code_manager()
- ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
+ if (WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
- Vector<byte> padding =
+ base::Vector<byte> padding =
code_allocator_.AllocateForCodeInRegion(this, size, region);
CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
win64_unwindinfo::RegisterNonABICompliantCodeRange(
@@ -1436,8 +1434,7 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
#endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope;
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
+ CodeSpaceWriteScope code_space_write_scope(this);
WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
@@ -1462,16 +1459,16 @@ void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
region);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
-#define RUNTIME_STUB(Name) Builtins::k##Name,
+#define RUNTIME_STUB(Name) Builtin::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
+ Builtin stub_names[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
- Builtins::Name builtin = stub_names[i];
+ Builtin builtin = stub_names[i];
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
@@ -1513,23 +1510,23 @@ namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {
public:
explicit NativeModuleWireBytesStorage(
- std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
+ std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes)
: wire_bytes_(std::move(wire_bytes)) {}
- Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
+ base::Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
return std::atomic_load(&wire_bytes_)
->as_vector()
.SubVector(ref.offset(), ref.end_offset());
}
private:
- const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
+ const std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
};
} // namespace
-void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
+void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {
auto shared_wire_bytes =
- std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
+ std::make_shared<base::OwnedVector<const uint8_t>>(std::move(wire_bytes));
std::atomic_store(&wire_bytes_, shared_wire_bytes);
if (!shared_wire_bytes->empty()) {
compilation_state_->SetWireBytesStorage(
@@ -1538,6 +1535,20 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
}
}
+void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) {
+ if (tier == WasmCompilationUnit::GetBaselineExecutionTier(this->module())) {
+ if (!compilation_state_->baseline_compilation_finished()) {
+ baseline_compilation_cpu_duration_.fetch_add(
+ cpu_duration, std::memory_order::memory_order_relaxed);
+ }
+ } else if (tier == ExecutionTier::kTurbofan) {
+ if (!compilation_state_->top_tier_compilation_finished()) {
+ tier_up_cpu_duration_.fetch_add(cpu_duration,
+ std::memory_order::memory_order_relaxed);
+ }
+ }
+}
+
void NativeModule::TransferNewOwnedCodeLocked() const {
allocation_mutex_.AssertHeld();
DCHECK(!new_owned_code_.empty());
@@ -1697,21 +1708,19 @@ NativeModule::~NativeModule() {
// Cancel all background compilation before resetting any field of the
// NativeModule or freeing anything.
compilation_state_->CancelCompilation();
- engine_->FreeNativeModule(this);
+ GetWasmEngine()->FreeNativeModule(this);
// Free the import wrapper cache before releasing the {WasmCode} objects in
// {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
// decrease reference counts on the {WasmCode} objects.
import_wrapper_cache_.reset();
}
-WasmCodeManager::WasmCodeManager(size_t max_committed)
- : max_committed_code_space_(max_committed),
- critical_committed_code_space_(max_committed / 2),
+WasmCodeManager::WasmCodeManager()
+ : max_committed_code_space_(FLAG_wasm_max_code_space * MB),
+ critical_committed_code_space_(max_committed_code_space_ / 2),
memory_protection_key_(FLAG_wasm_memory_protection_keys
? AllocateMemoryProtectionKey()
- : kNoMemoryProtectionKey) {
- DCHECK_LE(max_committed, FLAG_wasm_max_code_space * MB);
-}
+ : kNoMemoryProtectionKey) {}
WasmCodeManager::~WasmCodeManager() {
// No more committed code space.
@@ -1723,7 +1732,8 @@ WasmCodeManager::~WasmCodeManager() {
}
#if defined(V8_OS_WIN64)
-bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
+// static
+bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() {
return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
FLAG_win64_unwinding_info;
}
@@ -1731,7 +1741,7 @@ bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
void WasmCodeManager::Commit(base::AddressRegion region) {
// TODO(v8:8462): Remove eager commit once perf supports remapping.
- if (V8_UNLIKELY(FLAG_perf_prof)) return;
+ if (FLAG_perf_prof) return;
DCHECK(IsAligned(region.begin(), CommitPageSize()));
DCHECK(IsAligned(region.size(), CommitPageSize()));
// Reserve the size. Use CAS loop to avoid overflow on
@@ -1757,13 +1767,12 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
// TODO(dlehmann): This allocates initially as writable and executable, and
// as such is not safe-by-default. In particular, if
// {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
- // because no {NativeModuleModificationScope} is created), the writable
- // permission is never withdrawn.
+ // because no {CodeSpaceWriteScope} is created), the writable permission is
+ // never withdrawn.
// One potential fix is to allocate initially with kReadExecute only, which
- // forces all compilation threads to add the missing
- // {NativeModuleModificationScope}s before modification; and/or adding
- // DCHECKs that {NativeModuleModificationScope} is open when calling this
- // method.
+ // forces all compilation threads to add the missing {CodeSpaceWriteScope}s
+ // before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is
+ // open when calling this method.
PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
bool success;
@@ -1791,7 +1800,7 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
void WasmCodeManager::Decommit(base::AddressRegion region) {
// TODO(v8:8462): Remove this once perf supports remapping.
- if (V8_UNLIKELY(FLAG_perf_prof)) return;
+ if (FLAG_perf_prof) return;
PageAllocator* allocator = GetPlatformPageAllocator();
DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
@@ -1959,10 +1968,32 @@ size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
return wasm_module_estimate + native_module_estimate;
}
+void WasmCodeManager::SetThreadWritable(bool writable) {
+ DCHECK(HasMemoryProtectionKeySupport());
+ static thread_local int writable_nesting_level = 0;
+ if (writable) {
+ if (++writable_nesting_level > 1) return;
+ } else {
+ DCHECK_GT(writable_nesting_level, 0);
+ if (--writable_nesting_level > 0) return;
+ }
+ writable = writable_nesting_level > 0;
+
+ MemoryProtectionKeyPermission permissions =
+ writable ? kNoRestrictions : kDisableWrite;
+
+ TRACE_HEAP("Setting memory protection key %d to writable: %d.\n",
+ memory_protection_key_, writable);
+ SetPermissionsForMemoryProtectionKey(memory_protection_key_, permissions);
+}
+
+bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
+ return memory_protection_key_ != kNoMemoryProtectionKey;
+}
+
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
- WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
- size_t code_size_estimate, std::shared_ptr<const WasmModule> module) {
- DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
+ Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
+ std::shared_ptr<const WasmModule> module) {
if (total_committed_code_space_.load() >
critical_committed_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate))
@@ -1973,18 +2004,25 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
committed + (max_committed_code_space_ - committed) / 2);
}
- // If we cannot add code space later, reserve enough address space up front.
- size_t code_vmem_size =
+ size_t min_code_size;
+ size_t code_vmem_size;
+ std::tie(min_code_size, code_vmem_size) =
ReservationSize(code_size_estimate, module->num_declared_functions, 0);
- // The '--wasm-max-code-space-reservation' testing flag can be used to reduce
- // the maximum size of the initial code space reservation (in MB).
+ // The '--wasm-max-initial-code-space-reservation' testing flag can be used to
+ // reduce the maximum size of the initial code space reservation (in MB).
if (FLAG_wasm_max_initial_code_space_reservation > 0) {
size_t flag_max_bytes =
static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
}
+ // If we cannot allocate enough code space, fail with an OOM message.
+ if (code_vmem_size < min_code_size) {
+ V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
+ UNREACHABLE();
+ }
+
// Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
@@ -2006,7 +2044,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t size = code_space.size();
Address end = code_space.end();
std::shared_ptr<NativeModule> ret;
- new NativeModule(engine, enabled, std::move(code_space), std::move(module),
+ new NativeModule(enabled, std::move(code_space), std::move(module),
isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
@@ -2058,7 +2096,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
}
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
- Vector<WasmCompilationResult> results) {
+ base::Vector<WasmCompilationResult> results) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.AddCompiledCode", "num", results.size());
DCHECK(!results.empty());
@@ -2068,7 +2106,7 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
}
- Vector<byte> code_space;
+ base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_tables;
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
@@ -2087,12 +2125,14 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
generated_code.reserve(results.size());
// Now copy the generated code into the code space and relocate it.
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(this);
+ // Get writable permission already here (and not inside the loop in
+ // {AddCodeWithCodeSpace}), to avoid lock contention on the
+ // {allocator_mutex_} if we try to switch for each code individually.
+ CodeSpaceWriteScope code_space_write_scope(this);
for (auto& result : results) {
- DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
+ DCHECK_EQ(result.code_desc.buffer, result.instr_buffer->start());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
- Vector<byte> this_code_space = code_space.SubVector(0, code_size);
+ base::Vector<byte> this_code_space = code_space.SubVector(0, code_size);
code_space += code_size;
generated_code.emplace_back(AddCodeWithCodeSpace(
result.func_index, result.code_desc, result.frame_slot_count,
@@ -2121,6 +2161,11 @@ bool NativeModule::IsTieredDown() {
}
void NativeModule::RecompileForTiering() {
+ // If baseline compilation is not finished yet, we do not tier down now. This
+ // would be tricky because not all code is guaranteed to be available yet.
+ // Instead, we tier down after streaming compilation finished.
+ if (!compilation_state_->baseline_compilation_finished()) return;
+
// Read the tiering state under the lock, then trigger recompilation after
// releasing the lock. If the tiering state was changed when the triggered
// compilation units finish, code installation will handle that correctly.
@@ -2147,6 +2192,9 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) {
WasmCodeRefScope code_ref_scope;
base::RecursiveMutexGuard guard(&allocation_mutex_);
+ // Get writable permission already here (and not inside the loop in
+ // {PatchJumpTablesLocked}), to avoid switching for each slot individually.
+ CodeSpaceWriteScope code_space_write_scope(this);
std::vector<int> function_indexes;
int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions;
@@ -2181,8 +2229,12 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
return function_indexes;
}
-void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
+void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
base::RecursiveMutexGuard guard(&allocation_mutex_);
+ // Get writable permission already here (and not inside the loop in
+ // {WasmCodeAllocator::FreeCode}), to avoid switching for each {code}
+ // individually.
+ CodeSpaceWriteScope code_space_write_scope(this);
// Free the code space.
code_allocator_.FreeCode(codes);
@@ -2214,8 +2266,8 @@ DebugInfo* NativeModule::GetDebugInfo() {
return debug_info_.get();
}
-void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
- size_t committed_size) {
+void WasmCodeManager::FreeNativeModule(
+ base::Vector<VirtualMemory> owned_code_space, size_t committed_size) {
base::MutexGuard lock(&native_modules_mutex_);
for (auto& code_space : owned_code_space) {
DCHECK(code_space.IsReserved());
@@ -2265,39 +2317,6 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
return candidate ? candidate->Lookup(pc) : nullptr;
}
-// TODO(v8:7424): Code protection scopes are not yet supported with shared code
-// enabled and need to be revisited.
-NativeModuleModificationScope::NativeModuleModificationScope(
- NativeModule* native_module)
- : native_module_(native_module) {
- DCHECK_NOT_NULL(native_module_);
- if (FLAG_wasm_memory_protection_keys) {
- bool success = native_module_->SetThreadWritable(true);
- if (!success && FLAG_wasm_write_protect_code_memory) {
- // Fallback to mprotect-based write protection (much slower).
- success = native_module_->SetWritable(true);
- CHECK(success);
- }
- } else if (FLAG_wasm_write_protect_code_memory) {
- bool success = native_module_->SetWritable(true);
- CHECK(success);
- }
-}
-
-NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (FLAG_wasm_memory_protection_keys) {
- bool success = native_module_->SetThreadWritable(false);
- if (!success && FLAG_wasm_write_protect_code_memory) {
- // Fallback to mprotect-based write protection (much slower).
- success = native_module_->SetWritable(false);
- CHECK(success);
- }
- } else if (FLAG_wasm_write_protect_code_memory) {
- bool success = native_module_->SetWritable(false);
- CHECK(success);
- }
-}
-
namespace {
thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
} // namespace
@@ -2310,7 +2329,7 @@ WasmCodeRefScope::WasmCodeRefScope()
WasmCodeRefScope::~WasmCodeRefScope() {
DCHECK_EQ(this, current_code_refs_scope);
current_code_refs_scope = previous_scope_;
- WasmCode::DecrementRefCount(VectorOf(code_ptrs_));
+ WasmCode::DecrementRefCount(base::VectorOf(code_ptrs_));
}
// static
@@ -2322,10 +2341,10 @@ void WasmCodeRefScope::AddRef(WasmCode* code) {
code->IncRef();
}
-Builtins::Name RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId stub_id) {
-#define RUNTIME_STUB_NAME(Name) Builtins::k##Name,
-#define RUNTIME_STUB_NAME_TRAP(Name) Builtins::kThrowWasm##Name,
- constexpr Builtins::Name builtin_names[] = {
+Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId stub_id) {
+#define RUNTIME_STUB_NAME(Name) Builtin::k##Name,
+#define RUNTIME_STUB_NAME_TRAP(Name) Builtin::kThrowWasm##Name,
+ constexpr Builtin builtin_names[] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP)};
#undef RUNTIME_STUB_NAME
#undef RUNTIME_STUB_NAME_TRAP
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 312f5346b4..3d35478cfb 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -20,11 +20,11 @@
#include "src/base/bit-field.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
+#include "src/base/vector.h"
#include "src/builtins/builtins.h"
#include "src/handles/handles.h"
#include "src/tasks/operations-barrier.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/memory-protection-key.h"
#include "src/wasm/wasm-features.h"
@@ -43,7 +43,6 @@ namespace wasm {
class DebugInfo;
class NativeModule;
-class WasmCodeManager;
struct WasmCompilationResult;
class WasmEngine;
class WasmImportWrapperCache;
@@ -51,51 +50,71 @@ struct WasmModule;
// Convenience macro listing all wasm runtime stubs. Note that the first few
// elements of the list coincide with {compiler::TrapId}, order matters.
-#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
- FOREACH_WASM_TRAPREASON(VTRAP) \
- V(WasmCompileLazy) \
- V(WasmTriggerTierUp) \
- V(WasmDebugBreak) \
- V(WasmInt32ToHeapNumber) \
- V(WasmTaggedNonSmiToInt32) \
- V(WasmFloat32ToNumber) \
- V(WasmFloat64ToNumber) \
- V(WasmTaggedToFloat64) \
- V(WasmAllocateJSArray) \
- V(WasmAllocatePair) \
- V(WasmAtomicNotify) \
- V(WasmI32AtomicWait32) \
- V(WasmI32AtomicWait64) \
- V(WasmI64AtomicWait32) \
- V(WasmI64AtomicWait64) \
- V(WasmGetOwnProperty) \
- V(WasmRefFunc) \
- V(WasmMemoryGrow) \
- V(WasmTableInit) \
- V(WasmTableCopy) \
- V(WasmTableFill) \
- V(WasmTableGrow) \
- V(WasmTableGet) \
- V(WasmTableSet) \
- V(WasmStackGuard) \
- V(WasmStackOverflow) \
- V(WasmAllocateFixedArray) \
- V(WasmThrow) \
- V(WasmRethrow) \
- V(WasmTraceEnter) \
- V(WasmTraceExit) \
- V(WasmTraceMemory) \
- V(BigIntToI32Pair) \
- V(BigIntToI64) \
- V(DoubleToI) \
- V(I32PairToBigInt) \
- V(I64ToBigInt) \
- V(RecordWrite) \
- V(ToNumber) \
- V(WasmAllocateArrayWithRtt) \
- V(WasmAllocateRtt) \
- V(WasmAllocateStructWithRtt) \
- V(WasmSubtypeCheck) \
+#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
+ FOREACH_WASM_TRAPREASON(VTRAP) \
+ V(WasmCompileLazy) \
+ V(WasmTriggerTierUp) \
+ V(WasmDebugBreak) \
+ V(WasmInt32ToHeapNumber) \
+ V(WasmTaggedNonSmiToInt32) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
+ V(WasmTaggedToFloat64) \
+ V(WasmAllocateJSArray) \
+ V(WasmAllocatePair) \
+ V(WasmAtomicNotify) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI32AtomicWait64) \
+ V(WasmI64AtomicWait32) \
+ V(WasmI64AtomicWait64) \
+ V(WasmGetOwnProperty) \
+ V(WasmRefFunc) \
+ V(WasmMemoryGrow) \
+ V(WasmTableInit) \
+ V(WasmTableCopy) \
+ V(WasmTableFill) \
+ V(WasmTableGrow) \
+ V(WasmTableGet) \
+ V(WasmTableSet) \
+ V(WasmStackGuard) \
+ V(WasmStackOverflow) \
+ V(WasmAllocateFixedArray) \
+ V(WasmThrow) \
+ V(WasmRethrow) \
+ V(WasmTraceEnter) \
+ V(WasmTraceExit) \
+ V(WasmTraceMemory) \
+ V(BigIntToI32Pair) \
+ V(BigIntToI64) \
+ V(DoubleToI) \
+ V(I32PairToBigInt) \
+ V(I64ToBigInt) \
+ V(RecordWriteEmitRememberedSetSaveFP) \
+ V(RecordWriteOmitRememberedSetSaveFP) \
+ V(RecordWriteEmitRememberedSetIgnoreFP) \
+ V(RecordWriteOmitRememberedSetIgnoreFP) \
+ V(ToNumber) \
+ IF_TSAN(V, TSANRelaxedStore8IgnoreFP) \
+ IF_TSAN(V, TSANRelaxedStore8SaveFP) \
+ IF_TSAN(V, TSANRelaxedStore16IgnoreFP) \
+ IF_TSAN(V, TSANRelaxedStore16SaveFP) \
+ IF_TSAN(V, TSANRelaxedStore32IgnoreFP) \
+ IF_TSAN(V, TSANRelaxedStore32SaveFP) \
+ IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
+ IF_TSAN(V, TSANRelaxedStore64SaveFP) \
+ IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
+ IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
+ IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
+ IF_TSAN(V, TSANRelaxedLoad64SaveFP) \
+ V(WasmAllocateArray_Uninitialized) \
+ V(WasmAllocateArray_InitNull) \
+ V(WasmAllocateArray_InitZero) \
+ V(WasmArrayCopy) \
+ V(WasmArrayCopyWithChecks) \
+ V(WasmAllocateRtt) \
+ V(WasmAllocateFreshRtt) \
+ V(WasmAllocateStructWithRtt) \
+ V(WasmSubtypeCheck) \
V(WasmOnStackReplace)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
@@ -148,17 +167,76 @@ class V8_EXPORT_PRIVATE WasmCode final {
kRuntimeStubCount
};
- Vector<byte> instructions() const {
- return VectorOf(instructions_, static_cast<size_t>(instructions_size_));
+ static constexpr RuntimeStubId GetRecordWriteStub(
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ switch (remembered_set_action) {
+ case RememberedSetAction::kEmit:
+ switch (fp_mode) {
+ case SaveFPRegsMode::kIgnore:
+ return RuntimeStubId::kRecordWriteEmitRememberedSetIgnoreFP;
+ case SaveFPRegsMode::kSave:
+ return RuntimeStubId::kRecordWriteEmitRememberedSetSaveFP;
+ }
+ case RememberedSetAction::kOmit:
+ switch (fp_mode) {
+ case SaveFPRegsMode::kIgnore:
+ return RuntimeStubId::kRecordWriteOmitRememberedSetIgnoreFP;
+ case SaveFPRegsMode::kSave:
+ return RuntimeStubId::kRecordWriteOmitRememberedSetSaveFP;
+ }
+ }
+ }
+
+#ifdef V8_IS_TSAN
+ static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
+ int size) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ }
+ }
+
+ static RuntimeStubId GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode,
+ int size) {
+ if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedLoad32IgnoreFP
+ : RuntimeStubId::kTSANRelaxedLoad32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedLoad64IgnoreFP
+ : RuntimeStubId::kTSANRelaxedLoad64SaveFP;
+ }
+ }
+#endif // V8_IS_TSAN
+
+ base::Vector<byte> instructions() const {
+ return base::VectorOf(instructions_,
+ static_cast<size_t>(instructions_size_));
}
Address instruction_start() const {
return reinterpret_cast<Address>(instructions_);
}
- Vector<const byte> reloc_info() const {
+ base::Vector<const byte> reloc_info() const {
return {protected_instructions_data().end(),
static_cast<size_t>(reloc_info_size_)};
}
- Vector<const byte> source_positions() const {
+ base::Vector<const byte> source_positions() const {
return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
}
@@ -179,7 +257,15 @@ class V8_EXPORT_PRIVATE WasmCode final {
int code_comments_offset() const { return code_comments_offset_; }
int unpadded_binary_size() const { return unpadded_binary_size_; }
int stack_slots() const { return stack_slots_; }
- int tagged_parameter_slots() const { return tagged_parameter_slots_; }
+ uint16_t first_tagged_parameter_slot() const {
+ return tagged_parameter_slots_ >> 16;
+ }
+ uint16_t num_tagged_parameter_slots() const {
+ return tagged_parameter_slots_ & 0xFFFF;
+ }
+ uint32_t raw_tagged_parameter_slots_for_serialization() const {
+ return tagged_parameter_slots_;
+ }
bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
bool contains(Address pc) const {
return reinterpret_cast<Address>(instructions_) <= pc &&
@@ -190,14 +276,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
// (otherwise debug side table positions would not match up).
bool is_inspectable() const { return is_liftoff() && for_debugging(); }
- Vector<const uint8_t> protected_instructions_data() const {
+ base::Vector<const uint8_t> protected_instructions_data() const {
return {meta_data_.get(),
static_cast<size_t>(protected_instructions_size_)};
}
- Vector<const trap_handler::ProtectedInstructionData> protected_instructions()
- const {
- return Vector<const trap_handler::ProtectedInstructionData>::cast(
+ base::Vector<const trap_handler::ProtectedInstructionData>
+ protected_instructions() const {
+ return base::Vector<const trap_handler::ProtectedInstructionData>::cast(
protected_instructions_data());
}
@@ -252,7 +338,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Decrement the ref count on a set of {WasmCode} objects, potentially
// belonging to different {NativeModule}s. Dead code will be deleted.
- static void DecrementRefCount(Vector<WasmCode* const>);
+ static void DecrementRefCount(base::Vector<WasmCode* const>);
// Returns the last source position before {offset}.
int GetSourcePositionBefore(int offset);
@@ -269,14 +355,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
private:
friend class NativeModule;
- WasmCode(NativeModule* native_module, int index, Vector<byte> instructions,
- int stack_slots, int tagged_parameter_slots,
- int safepoint_table_offset, int handler_table_offset,
- int constant_pool_offset, int code_comments_offset,
- int unpadded_binary_size,
- Vector<const byte> protected_instructions_data,
- Vector<const byte> reloc_info,
- Vector<const byte> source_position_table, Kind kind,
+ WasmCode(NativeModule* native_module, int index,
+ base::Vector<byte> instructions, int stack_slots,
+ uint32_t tagged_parameter_slots, int safepoint_table_offset,
+ int handler_table_offset, int constant_pool_offset,
+ int code_comments_offset, int unpadded_binary_size,
+ base::Vector<const byte> protected_instructions_data,
+ base::Vector<const byte> reloc_info,
+ base::Vector<const byte> source_position_table, Kind kind,
ExecutionTier tier, ForDebugging for_debugging)
: native_module_(native_module),
instructions_(instructions.begin()),
@@ -303,7 +389,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
std::unique_ptr<const byte[]> ConcatenateBytes(
- std::initializer_list<Vector<const byte>>);
+ std::initializer_list<base::Vector<const byte>>);
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
@@ -341,9 +427,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
const int index_;
const int constant_pool_offset_;
const int stack_slots_;
- // Number of tagged parameters passed to this function via the stack. This
- // value is used by the stack walker (e.g. GC) to find references.
- const int tagged_parameter_slots_;
+ // Number and position of tagged parameters passed to this function via the
+ // stack, packed into a single uint32. These values are used by the stack
+ // walker (e.g. GC) to find references.
+ const uint32_t tagged_parameter_slots_;
// We care about safepoint data for wasm-to-js functions, since there may be
// stack/register tagged values for large number conversions.
const int safepoint_table_offset_;
@@ -395,7 +482,7 @@ class WasmCodeAllocator {
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif
- WasmCodeAllocator(WasmCodeManager*, std::shared_ptr<Counters> async_counters);
+ explicit WasmCodeAllocator(std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
// Call before use, after the {NativeModule} is set up completely.
@@ -413,28 +500,22 @@ class WasmCodeAllocator {
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- Vector<byte> AllocateForCode(NativeModule*, size_t size);
+ base::Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
- base::AddressRegion);
+ base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
+ base::AddressRegion);
// Sets permissions of all owned code space to read-write or read-only (if
// {writable} is false). Returns true on success.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
V8_EXPORT_PRIVATE bool SetWritable(bool writable);
- // Set this thread's permission of all owned code space to read-write or
- // read-only (if {writable} is false). Uses memory protection keys.
- // Returns true on success. Since the permission is thread-local, there is no
- // requirement to hold any lock when calling this method.
- bool SetThreadWritable(bool writable);
-
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- void FreeCode(Vector<WasmCode* const>);
+ void FreeCode(base::Vector<WasmCode* const>);
// Retrieve the number of separately reserved code spaces.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
@@ -446,9 +527,6 @@ class WasmCodeAllocator {
static constexpr base::AddressRegion kUnrestrictedRegion{
kNullAddress, std::numeric_limits<size_t>::max()};
- // The engine-wide wasm code manager.
- WasmCodeManager* const code_manager_;
-
//////////////////////////////////////////////////////////////////////////////
// These fields are protected by the mutex in {NativeModule}.
@@ -489,18 +567,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
// The returned code still needs to be published via {PublishCode}.
- std::unique_ptr<WasmCode> AddCode(int index, const CodeDesc& desc,
- int stack_slots, int tagged_parameter_slots,
- Vector<const byte> protected_instructions,
- Vector<const byte> source_position_table,
- WasmCode::Kind kind, ExecutionTier tier,
- ForDebugging for_debugging);
+ std::unique_ptr<WasmCode> AddCode(
+ int index, const CodeDesc& desc, int stack_slots,
+ uint32_t tagged_parameter_slots,
+ base::Vector<const byte> protected_instructions,
+ base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier, ForDebugging for_debugging);
// {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object. Ownership is transferred to the {NativeModule}.
WasmCode* PublishCode(std::unique_ptr<WasmCode>);
- std::vector<WasmCode*> PublishCode(Vector<std::unique_ptr<WasmCode>>);
+ std::vector<WasmCode*> PublishCode(base::Vector<std::unique_ptr<WasmCode>>);
// ReinstallDebugCode does a subset of PublishCode: It installs the code in
// the code table and patches the jump table. The given code must be debug
@@ -516,17 +594,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
bool is_valid() const { return far_jump_table_start != kNullAddress; }
};
- std::pair<Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
+ std::pair<base::Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
size_t total_code_size);
std::unique_ptr<WasmCode> AddDeserializedCode(
- int index, Vector<byte> instructions, int stack_slots,
- int tagged_parameter_slots, int safepoint_table_offset,
+ int index, base::Vector<byte> instructions, int stack_slots,
+ uint32_t tagged_parameter_slots, int safepoint_table_offset,
int handler_table_offset, int constant_pool_offset,
int code_comments_offset, int unpadded_binary_size,
- Vector<const byte> protected_instructions_data,
- Vector<const byte> reloc_info, Vector<const byte> source_position_table,
- WasmCode::Kind kind, ExecutionTier tier);
+ base::Vector<const byte> protected_instructions_data,
+ base::Vector<const byte> reloc_info,
+ base::Vector<const byte> source_position_table, WasmCode::Kind kind,
+ ExecutionTier tier);
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
@@ -583,10 +662,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return code_allocator_.SetWritable(writable);
}
- bool SetThreadWritable(bool writable) {
- return code_allocator_.SetThreadWritable(writable);
- }
-
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ReserveCodeTableForTesting(uint32_t max_functions);
@@ -606,10 +681,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t num_imported_functions() const {
return module_->num_imported_functions;
}
- UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
+ BoundsCheckStrategy bounds_checks() const { return bounds_checks_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- Vector<const uint8_t> wire_bytes() const {
+ base::Vector<const uint8_t> wire_bytes() const {
return std::atomic_load(&wire_bytes_)->as_vector();
}
const WasmModule* module() const { return module_.get(); }
@@ -623,13 +698,22 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t liftoff_bailout_count() const { return liftoff_bailout_count_.load(); }
size_t liftoff_code_size() const { return liftoff_code_size_.load(); }
size_t turbofan_code_size() const { return turbofan_code_size_.load(); }
- WasmEngine* engine() const { return engine_; }
+ size_t baseline_compilation_cpu_duration() const {
+ return baseline_compilation_cpu_duration_.load();
+ }
+ size_t tier_up_cpu_duration() const { return tier_up_cpu_duration_.load(); }
bool HasWireBytes() const {
auto wire_bytes = std::atomic_load(&wire_bytes_);
return wire_bytes && !wire_bytes->empty();
}
- void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
+ void SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes);
+
+ void UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier);
+ void AddLiftoffBailout() {
+ liftoff_bailout_count_.fetch_add(1,
+ std::memory_order::memory_order_relaxed);
+ }
WasmCode* Lookup(Address) const;
@@ -650,7 +734,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
WasmCompilationResult);
V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
- Vector<WasmCompilationResult>);
+ base::Vector<WasmCompilationResult>);
// Set a new tiering state, but don't trigger any recompilation yet; use
// {RecompileForTiering} for that. The two steps are split because In some
@@ -676,7 +760,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {WasmCode} objects must not be used any more.
// Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
// its accounting.
- void FreeCode(Vector<WasmCode* const>);
+ void FreeCode(base::Vector<WasmCode* const>);
// Retrieve the number of separately reserved code spaces for this module.
size_t GetNumberOfCodeSpacesForTesting() const;
@@ -695,7 +779,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class WasmCode;
friend class WasmCodeAllocator;
friend class WasmCodeManager;
- friend class NativeModuleModificationScope;
+ friend class CodeSpaceWriteScope;
struct CodeSpaceData {
base::AddressRegion region;
@@ -704,19 +788,18 @@ class V8_EXPORT_PRIVATE NativeModule final {
};
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
- NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
- VirtualMemory code_space,
+ NativeModule(const WasmFeatures& enabled_features, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this);
std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
int index, const CodeDesc& desc, int stack_slots,
- int tagged_parameter_slots,
- Vector<const byte> protected_instructions_data,
- Vector<const byte> source_position_table, WasmCode::Kind kind,
+ uint32_t tagged_parameter_slots,
+ base::Vector<const byte> protected_instructions_data,
+ base::Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging,
- Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
+ base::Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
base::AddressRegion);
@@ -745,7 +828,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// -- Fields of {NativeModule} start here.
- WasmEngine* const engine_;
// Keep the engine alive as long as this NativeModule is alive. In its
// destructor, the NativeModule still communicates with the WasmCodeManager,
// owned by the engine. This fields comes before other fields which also still
@@ -770,7 +852,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Wire bytes, held in a shared_ptr so they can be kept alive by the
// {WireBytesStorage}, held by background compile tasks.
- std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
+ std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
// The first allocated jump table. Always used by external calls (from JS).
// Wasm calls might use one of the other jump tables stored in
@@ -797,10 +879,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// This mutex protects concurrent calls to {AddCode} and friends.
// TODO(dlehmann): Revert this to a regular {Mutex} again.
- // This needs to be a {RecursiveMutex} only because of
- // {NativeModuleModificationScope} usages, which are (1) either at places
- // that already hold the {allocation_mutex_} or (2) because of multiple open
- // {NativeModuleModificationScope}s in the call hierarchy. Both are fixable.
+ // This needs to be a {RecursiveMutex} only because of {CodeSpaceWriteScope}
+ // usages, which are (1) either at places that already hold the
+ // {allocation_mutex_} or (2) because of multiple open {CodeSpaceWriteScope}s
+ // in the call hierarchy. Both are fixable.
mutable base::RecursiveMutex allocation_mutex_;
//////////////////////////////////////////////////////////////////////////////
@@ -842,23 +924,25 @@ class V8_EXPORT_PRIVATE NativeModule final {
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
- UseTrapHandler use_trap_handler_ = kNoTrapHandler;
+ const BoundsCheckStrategy bounds_checks_;
bool lazy_compile_frozen_ = false;
std::atomic<size_t> liftoff_bailout_count_{0};
std::atomic<size_t> liftoff_code_size_{0};
std::atomic<size_t> turbofan_code_size_{0};
+ std::atomic<size_t> baseline_compilation_cpu_duration_{0};
+ std::atomic<size_t> tier_up_cpu_duration_{0};
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
- explicit WasmCodeManager(size_t max_committed);
+ WasmCodeManager();
WasmCodeManager(const WasmCodeManager&) = delete;
WasmCodeManager& operator=(const WasmCodeManager&) = delete;
~WasmCodeManager();
#if defined(V8_OS_WIN64)
- bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
+ static bool CanRegisterUnwindInfoForNonABICompliantCodeRange();
#endif // V8_OS_WIN64
NativeModule* LookupNativeModule(Address pc) const;
@@ -883,21 +967,30 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// generated code. This data still be stored on the C++ heap.
static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
+ // Set this thread's permission of all owned code space to read-write or
+ // read-only (if {writable} is false). Can only be called if
+ // {HasMemoryProtectionKeySupport()} is {true}.
+ // Since the permission is thread-local, there is no requirement to hold any
+ // lock when calling this method.
+ void SetThreadWritable(bool writable);
+
+ // Returns true if there is PKU support, false otherwise.
+ bool HasMemoryProtectionKeySupport() const;
+
private:
friend class WasmCodeAllocator;
friend class WasmEngine;
std::shared_ptr<NativeModule> NewNativeModule(
- WasmEngine* engine, Isolate* isolate,
- const WasmFeatures& enabled_features, size_t code_size_estimate,
- std::shared_ptr<const WasmModule> module);
+ Isolate* isolate, const WasmFeatures& enabled_features,
+ size_t code_size_estimate, std::shared_ptr<const WasmModule> module);
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
void* hint = nullptr);
void Commit(base::AddressRegion);
void Decommit(base::AddressRegion);
- void FreeNativeModule(Vector<VirtualMemory> owned_code,
+ void FreeNativeModule(base::Vector<VirtualMemory> owned_code,
size_t committed_size);
void AssignRange(base::AddressRegion, NativeModule*);
@@ -924,25 +1017,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
//////////////////////////////////////////////////////////////////////////////
};
-// Within the scope, the native_module is writable and not executable.
-// At the scope's destruction, the native_module is executable and not writable.
-// The states inside the scope and at the scope termination are irrespective of
-// native_module's state when entering the scope.
-// We currently mark the entire module's memory W^X:
-// - for AOT, that's as efficient as it can be.
-// - for Lazy, we don't have a heuristic for functions that may need patching,
-// and even if we did, the resulting set of pages may be fragmented.
-// Currently, we try and keep the number of syscalls low.
-// - similar argument for debug time.
-class V8_NODISCARD NativeModuleModificationScope final {
- public:
- explicit NativeModuleModificationScope(NativeModule* native_module);
- ~NativeModuleModificationScope();
-
- private:
- NativeModule* native_module_;
-};
-
// {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
// by e.g. creating new code or looking up code by its address are added to the
// top-most {WasmCodeRefScope}.
@@ -987,7 +1061,7 @@ class GlobalWasmCodeRef {
const std::shared_ptr<NativeModule> native_module_;
};
-Builtins::Name RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId);
+Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId);
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index ad4e7853aa..45fa789364 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -41,7 +41,7 @@ enum ReturnLocation { kAfterBreakpoint, kAfterWasmCall };
Address FindNewPC(WasmFrame* frame, WasmCode* wasm_code, int byte_offset,
ReturnLocation return_location) {
- Vector<const uint8_t> new_pos_table = wasm_code->source_positions();
+ base::Vector<const uint8_t> new_pos_table = wasm_code->source_positions();
DCHECK_LE(0, byte_offset);
@@ -49,7 +49,7 @@ Address FindNewPC(WasmFrame* frame, WasmCode* wasm_code, int byte_offset,
// source position entry to the return address.
WasmCode* old_code = frame->wasm_code();
int pc_offset = static_cast<int>(frame->pc() - old_code->instruction_start());
- Vector<const uint8_t> old_pos_table = old_code->source_positions();
+ base::Vector<const uint8_t> old_pos_table = old_code->source_positions();
SourcePositionTableIterator old_it(old_pos_table);
int call_offset = -1;
while (!old_it.done() && old_it.code_offset() < pc_offset) {
@@ -221,7 +221,7 @@ class DebugInfoImpl {
// position. Return 0 otherwise.
// This is used to generate a "dead breakpoint" in Liftoff, which is necessary
// for OSR to find the correct return address.
- int DeadBreakpoint(WasmFrame* frame, Vector<const int> breakpoints) {
+ int DeadBreakpoint(WasmFrame* frame, base::Vector<const int> breakpoints) {
const auto& function =
native_module_->module()->functions[frame->function_index()];
int offset = frame->position() - function.code.offset();
@@ -233,7 +233,7 @@ class DebugInfoImpl {
// Find the dead breakpoint (see above) for the top wasm frame, if that frame
// is in the function of the given index.
- int DeadBreakpoint(int func_index, Vector<const int> breakpoints,
+ int DeadBreakpoint(int func_index, base::Vector<const int> breakpoints,
Isolate* isolate) {
StackTraceFrameIterator it(isolate);
if (it.done() || !it.is_wasm()) return 0;
@@ -243,7 +243,7 @@ class DebugInfoImpl {
}
WasmCode* RecompileLiftoffWithBreakpoints(int func_index,
- Vector<const int> offsets,
+ base::Vector<const int> offsets,
int dead_breakpoint) {
DCHECK(!mutex_.TryLock()); // Mutex is held externally.
@@ -272,7 +272,7 @@ class DebugInfoImpl {
// Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv();
auto* function = &native_module_->module()->functions[func_index];
- Vector<const uint8_t> wire_bytes = native_module_->wire_bytes();
+ base::Vector<const uint8_t> wire_bytes = native_module_->wire_bytes();
FunctionBody body{function->sig, function->code.offset(),
wire_bytes.begin() + function->code.offset(),
wire_bytes.begin() + function->code.end_offset()};
@@ -283,9 +283,9 @@ class DebugInfoImpl {
Counters* counters = nullptr;
WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation(
- native_module_->engine()->allocator(), &env, body, func_index,
- for_debugging, counters, &unused_detected, offsets,
- generate_debug_sidetable ? &debug_sidetable : nullptr, dead_breakpoint);
+ &env, body, func_index, for_debugging, counters, &unused_detected,
+ offsets, generate_debug_sidetable ? &debug_sidetable : nullptr,
+ dead_breakpoint);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
if (!result.succeeded()) FATAL("Liftoff compilation failed");
@@ -304,7 +304,7 @@ class DebugInfoImpl {
// Insert new code into the cache. Insert before existing elements for LRU.
cached_debugging_code_.insert(
cached_debugging_code_.begin(),
- CachedDebuggingCode{func_index, OwnedVector<int>::Of(offsets),
+ CachedDebuggingCode{func_index, base::OwnedVector<int>::Of(offsets),
dead_breakpoint, new_code});
// Increase the ref count (for the cache entry).
new_code->IncRef();
@@ -363,9 +363,9 @@ class DebugInfoImpl {
} else {
all_breakpoints.insert(insertion_point, offset);
int dead_breakpoint =
- DeadBreakpoint(func_index, VectorOf(all_breakpoints), isolate);
+ DeadBreakpoint(func_index, base::VectorOf(all_breakpoints), isolate);
new_code = RecompileLiftoffWithBreakpoints(
- func_index, VectorOf(all_breakpoints), dead_breakpoint);
+ func_index, base::VectorOf(all_breakpoints), dead_breakpoint);
}
UpdateReturnAddresses(isolate, new_code, isolate_data.stepping_frame);
}
@@ -381,7 +381,7 @@ class DebugInfoImpl {
return {breakpoints.begin(), breakpoints.end()};
}
- void UpdateBreakpoints(int func_index, Vector<int> breakpoints,
+ void UpdateBreakpoints(int func_index, base::Vector<int> breakpoints,
Isolate* isolate, StackFrameId stepping_frame,
int dead_breakpoint) {
DCHECK(!mutex_.TryLock()); // Mutex is held externally.
@@ -397,7 +397,7 @@ class DebugInfoImpl {
// Generate an additional source position for the current byte offset.
base::MutexGuard guard(&mutex_);
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
- frame->function_index(), ArrayVector(kFloodingBreakpoints), 0);
+ frame->function_index(), base::ArrayVector(kFloodingBreakpoints), 0);
UpdateReturnAddress(frame, new_code, return_location);
per_isolate_data_[frame->isolate()].stepping_frame = frame->id();
@@ -426,9 +426,9 @@ class DebugInfoImpl {
if (code->for_debugging() != kForStepping) return;
int func_index = code->index();
std::vector<int> breakpoints = FindAllBreakpoints(func_index);
- int dead_breakpoint = DeadBreakpoint(frame, VectorOf(breakpoints));
+ int dead_breakpoint = DeadBreakpoint(frame, base::VectorOf(breakpoints));
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
- func_index, VectorOf(breakpoints), dead_breakpoint);
+ func_index, base::VectorOf(breakpoints), dead_breakpoint);
UpdateReturnAddress(frame, new_code, kAfterBreakpoint);
}
@@ -440,7 +440,7 @@ class DebugInfoImpl {
bool IsStepping(WasmFrame* frame) {
Isolate* isolate = frame->wasm_instance().GetIsolate();
- if (isolate->debug()->last_step_action() == StepIn) return true;
+ if (isolate->debug()->last_step_action() == StepInto) return true;
base::MutexGuard guard(&mutex_);
auto it = per_isolate_data_.find(isolate);
return it != per_isolate_data_.end() &&
@@ -474,12 +474,12 @@ class DebugInfoImpl {
DCHECK(std::is_sorted(remaining.begin(), remaining.end()));
if (std::binary_search(remaining.begin(), remaining.end(), offset)) return;
int dead_breakpoint =
- DeadBreakpoint(func_index, VectorOf(remaining), isolate);
- UpdateBreakpoints(func_index, VectorOf(remaining), isolate,
+ DeadBreakpoint(func_index, base::VectorOf(remaining), isolate);
+ UpdateBreakpoints(func_index, base::VectorOf(remaining), isolate,
isolate_data.stepping_frame, dead_breakpoint);
}
- void RemoveDebugSideTables(Vector<WasmCode* const> codes) {
+ void RemoveDebugSideTables(base::Vector<WasmCode* const> codes) {
base::MutexGuard guard(&debug_side_tables_mutex_);
for (auto* code : codes) {
debug_side_tables_.erase(code);
@@ -520,7 +520,8 @@ class DebugInfoImpl {
std::vector<int>& removed = entry.second;
std::vector<int> remaining = FindAllBreakpoints(func_index);
if (HasRemovedBreakpoints(removed, remaining)) {
- RecompileLiftoffWithBreakpoints(func_index, VectorOf(remaining), 0);
+ RecompileLiftoffWithBreakpoints(func_index, base::VectorOf(remaining),
+ 0);
}
}
}
@@ -528,8 +529,7 @@ class DebugInfoImpl {
private:
struct FrameInspectionScope {
FrameInspectionScope(DebugInfoImpl* debug_info, Address pc)
- : code(debug_info->native_module_->engine()->code_manager()->LookupCode(
- pc)),
+ : code(wasm::GetWasmCodeManager()->LookupCode(pc)),
pc_offset(static_cast<int>(pc - code->instruction_start())),
debug_side_table(code->is_inspectable()
? debug_info->GetDebugSideTable(code)
@@ -757,7 +757,7 @@ class DebugInfoImpl {
static constexpr size_t kMaxCachedDebuggingCode = 3;
struct CachedDebuggingCode {
int func_index;
- OwnedVector<const int> breakpoint_offsets;
+ base::OwnedVector<const int> breakpoint_offsets;
int dead_breakpoint;
WasmCode* code;
};
@@ -855,7 +855,7 @@ void DebugInfo::RemoveBreakpoint(int func_index, int offset,
impl_->RemoveBreakpoint(func_index, offset, current_isolate);
}
-void DebugInfo::RemoveDebugSideTables(Vector<WasmCode* const> code) {
+void DebugInfo::RemoveDebugSideTables(base::Vector<WasmCode* const> code) {
impl_->RemoveDebugSideTables(code);
}
@@ -902,21 +902,7 @@ int FindNextBreakablePosition(wasm::NativeModule* native_module, int func_index,
// static
bool WasmScript::SetBreakPoint(Handle<Script> script, int* position,
Handle<BreakPoint> break_point) {
- // Special handling for on-entry breakpoints.
- if (*position == kOnEntryBreakpointPosition) {
- AddBreakpointToInfo(script, *position, break_point);
- script->set_break_on_entry(true);
-
- // Update the "break_on_entry" flag on all live instances.
- i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
- for (int i = 0; i < weak_instance_list.length(); ++i) {
- if (weak_instance_list.Get(i)->IsCleared()) continue;
- i::WasmInstanceObject instance = i::WasmInstanceObject::cast(
- weak_instance_list.Get(i)->GetHeapObject());
- instance.set_break_on_entry(true);
- }
- return true;
- }
+ DCHECK_NE(kOnEntryBreakpointPosition, *position);
// Find the function for this breakpoint.
const wasm::WasmModule* module = script->wasm_native_module()->module();
@@ -935,6 +921,23 @@ bool WasmScript::SetBreakPoint(Handle<Script> script, int* position,
}
// static
+void WasmScript::SetBreakPointOnEntry(Handle<Script> script,
+ Handle<BreakPoint> break_point) {
+ // Special handling for on-entry breakpoints.
+ AddBreakpointToInfo(script, kOnEntryBreakpointPosition, break_point);
+ script->set_break_on_entry(true);
+
+ // Update the "break_on_entry" flag on all live instances.
+ i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
+ for (int i = 0; i < weak_instance_list.length(); ++i) {
+ if (weak_instance_list.Get(i)->IsCleared()) continue;
+ i::WasmInstanceObject instance =
+ i::WasmInstanceObject::cast(weak_instance_list.Get(i)->GetHeapObject());
+ instance.set_break_on_entry(true);
+ }
+}
+
+// static
bool WasmScript::SetBreakPointOnFirstBreakableForFunction(
Handle<Script> script, int func_index, Handle<BreakPoint> break_point) {
if (func_index < 0) return false;
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 1babd0650f..7e5e60ceda 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -17,7 +17,7 @@
#include "src/base/iterator.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/utils/vector.h"
+#include "src/base/vector.h"
#include "src/wasm/value-type.h"
namespace v8 {
@@ -87,8 +87,8 @@ class DebugSideTable {
// Stack height, including locals.
int stack_height() const { return stack_height_; }
- Vector<const Value> changed_values() const {
- return VectorOf(changed_values_);
+ base::Vector<const Value> changed_values() const {
+ return base::VectorOf(changed_values_);
}
const Value* FindChangedValue(int stack_index) const {
@@ -217,7 +217,7 @@ class V8_EXPORT_PRIVATE DebugInfo {
void RemoveBreakpoint(int func_index, int offset, Isolate* current_isolate);
- void RemoveDebugSideTables(Vector<WasmCode* const>);
+ void RemoveDebugSideTables(base::Vector<WasmCode* const>);
// Return the debug side table for the given code object, but only if it has
// already been created. This will never trigger generation of the table.
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index c38236dc78..6da33f1ab2 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -118,10 +118,9 @@ class WasmGCForegroundTask : public CancelableTask {
: CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
void RunInternal() final {
- WasmEngine* engine = isolate_->wasm_engine();
// The stack can contain live frames, for instance when this is invoked
// during a pause or a breakpoint.
- engine->ReportLiveCodeFromStackForGC(isolate_);
+ GetWasmEngine()->ReportLiveCodeFromStackForGC(isolate_);
}
private:
@@ -180,7 +179,7 @@ class WeakScriptHandle {
} // namespace
std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
- ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
+ ModuleOrigin origin, base::Vector<const uint8_t> wire_bytes) {
if (origin != kWasmOrigin) return nullptr;
base::MutexGuard lock(&mutex_);
size_t prefix_hash = PrefixHash(wire_bytes);
@@ -208,6 +207,8 @@ std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
return shared_native_module;
}
}
+ // TODO(11858): This deadlocks in predictable mode, because there is only a
+ // single thread.
cache_cv_.Wait(&mutex_);
}
}
@@ -238,7 +239,7 @@ std::shared_ptr<NativeModule> NativeModuleCache::Update(
std::shared_ptr<NativeModule> native_module, bool error) {
DCHECK_NOT_NULL(native_module);
if (native_module->module()->origin != kWasmOrigin) return native_module;
- Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ base::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
DCHECK(!wire_bytes.empty());
size_t prefix_hash = PrefixHash(native_module->wire_bytes());
base::MutexGuard lock(&mutex_);
@@ -279,14 +280,14 @@ void NativeModuleCache::Erase(NativeModule* native_module) {
}
// static
-size_t NativeModuleCache::WireBytesHash(Vector<const uint8_t> bytes) {
+size_t NativeModuleCache::WireBytesHash(base::Vector<const uint8_t> bytes) {
return StringHasher::HashSequentialString(
reinterpret_cast<const char*>(bytes.begin()), bytes.length(),
kZeroHashSeed);
}
// static
-size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
+size_t NativeModuleCache::PrefixHash(base::Vector<const uint8_t> wire_bytes) {
// Compute the hash as a combined hash of the sections up to the code section
// header, to mirror the way streaming compilation does it.
Decoder decoder(wire_bytes.begin(), wire_bytes.end());
@@ -308,7 +309,7 @@ size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
const uint8_t* payload_start = decoder.pc();
decoder.consume_bytes(section_size, "section payload");
size_t section_hash = NativeModuleCache::WireBytesHash(
- Vector<const uint8_t>(payload_start, section_size));
+ base::Vector<const uint8_t>(payload_start, section_size));
hash = base::hash_combine(hash, section_hash);
}
return hash;
@@ -391,6 +392,10 @@ struct WasmEngine::IsolateInfo {
// Keep new modules in tiered down state.
bool keep_tiered_down = false;
+ // Keep track whether we already added a sample for PKU support (we only want
+ // one sample per Isolate).
+ bool pku_support_sampled = false;
+
// Elapsed time since last throw/rethrow/catch event.
base::ElapsedTimer throw_timer;
base::ElapsedTimer rethrow_timer;
@@ -433,7 +438,7 @@ struct WasmEngine::NativeModuleInfo {
int8_t num_code_gcs_triggered = 0;
};
-WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
+WasmEngine::WasmEngine() = default;
WasmEngine::~WasmEngine() {
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
@@ -468,7 +473,7 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Vector<const byte> asm_js_offset_table_bytes,
+ base::Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
int compilation_id = next_compilation_id_.fetch_add(1);
TRACE_EVENT1("v8.wasm", "wasm.SyncCompileTranslatedAsmJs", "id",
@@ -549,7 +554,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
}
#endif
- constexpr Vector<const char> kNoSourceUrl;
+ constexpr base::Vector<const char> kNoSourceUrl;
Handle<Script> script =
GetOrCreateScript(isolate, native_module, kNoSourceUrl);
@@ -625,7 +630,7 @@ void WasmEngine::AsyncCompile(
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
- base::Memcpy(copy.get(), bytes.start(), bytes.length());
+ memcpy(copy.get(), bytes.start(), bytes.length());
ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy);
} else {
@@ -653,7 +658,7 @@ void WasmEngine::AsyncCompile(
// Make a copy of the wire bytes in case the user program changes them
// during asynchronous compilation.
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- base::Memcpy(copy.get(), bytes.start(), bytes.length());
+ memcpy(copy.get(), bytes.start(), bytes.length());
AsyncCompileJob* job = CreateAsyncCompileJob(
isolate, enabled, std::move(copy), bytes.length(),
@@ -754,14 +759,14 @@ std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
namespace {
Handle<Script> CreateWasmScript(Isolate* isolate,
std::shared_ptr<NativeModule> native_module,
- Vector<const char> source_url) {
+ base::Vector<const char> source_url) {
Handle<Script> script =
isolate->factory()->NewScript(isolate->factory()->undefined_value());
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
script->set_context_data(isolate->native_context()->debug_context_id());
script->set_type(Script::TYPE_WASM);
- Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ base::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
// The source URL of the script is
// - the original source URL if available (from the streaming API),
@@ -778,7 +783,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
kZeroHashSeed);
- EmbeddedVector<char, 32> buffer;
+ base::EmbeddedVector<char, 32> buffer;
if (module->name.is_empty()) {
// Build the URL in the form "wasm://wasm/<hash>".
int url_len = SNPrintF(buffer, "wasm://wasm/%08x", hash);
@@ -814,7 +819,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
if (debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
!debug_symbols.external_url.is_empty()) {
- Vector<const char> external_url =
+ base::Vector<const char> external_url =
ModuleWireBytes(wire_bytes).GetNameOrNull(debug_symbols.external_url);
MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
external_url, AllocationType::kOld);
@@ -840,8 +845,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module,
- Vector<const char> source_url) {
- DCHECK_EQ(this, shared_native_module->engine());
+ base::Vector<const char> source_url) {
NativeModule* native_module = shared_native_module.get();
ModuleWireBytes wire_bytes(native_module->wire_bytes());
Handle<Script> script =
@@ -981,8 +985,9 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
OperationsBarrier::Token WasmEngine::StartWrapperCompilation(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
- DCHECK_EQ(1, isolates_.count(isolate));
- return isolates_[isolate]->wrapper_compilation_barrier_->TryLock();
+ auto isolate_info_it = isolates_.find(isolate);
+ if (isolate_info_it == isolates_.end()) return {};
+ return isolate_info_it->second->wrapper_compilation_barrier_->TryLock();
}
void WasmEngine::AddIsolate(Isolate* isolate) {
@@ -990,15 +995,6 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
DCHECK_EQ(0, isolates_.count(isolate));
isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
- // Record memory protection key support.
- if (FLAG_wasm_memory_protection_keys) {
- auto* histogram =
- isolate->counters()->wasm_memory_protection_keys_support();
- bool has_mpk =
- code_manager()->memory_protection_key_ != kNoMemoryProtectionKey;
- histogram->AddSample(has_mpk ? 1 : 0);
- }
-
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
@@ -1007,7 +1003,7 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
v8::GCCallbackFlags flags, void* data) {
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
Counters* counters = isolate->counters();
- WasmEngine* engine = isolate->wasm_engine();
+ WasmEngine* engine = GetWasmEngine();
base::MutexGuard lock(&engine->mutex_);
DCHECK_EQ(1, engine->isolates_.count(isolate));
for (auto* native_module : engine->isolates_[isolate]->native_modules) {
@@ -1055,14 +1051,14 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
if (auto* task = info->log_codes_task) {
task->Cancel();
for (auto& log_entry : info->code_to_log) {
- WasmCode::DecrementRefCount(VectorOf(log_entry.second.code));
+ WasmCode::DecrementRefCount(base::VectorOf(log_entry.second.code));
}
info->code_to_log.clear();
}
DCHECK(info->code_to_log.empty());
}
-void WasmEngine::LogCode(Vector<WasmCode*> code_vec) {
+void WasmEngine::LogCode(base::Vector<WasmCode*> code_vec) {
if (code_vec.empty()) return;
base::MutexGuard guard(&mutex_);
NativeModule* native_module = code_vec[0]->native_module();
@@ -1125,7 +1121,7 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
code->LogCode(isolate, pair.second.source_url.get(), pair.first);
}
}
- WasmCode::DecrementRefCount(VectorOf(pair.second.code));
+ WasmCode::DecrementRefCount(base::VectorOf(pair.second.code));
}
}
@@ -1139,27 +1135,40 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
}
#endif // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
- std::shared_ptr<NativeModule> native_module = code_manager_.NewNativeModule(
- this, isolate, enabled, code_size_estimate, std::move(module));
+ std::shared_ptr<NativeModule> native_module =
+ GetWasmCodeManager()->NewNativeModule(
+ isolate, enabled, code_size_estimate, std::move(module));
base::MutexGuard lock(&mutex_);
auto pair = native_modules_.insert(std::make_pair(
native_module.get(), std::make_unique<NativeModuleInfo>(native_module)));
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
- auto& modules_per_isolate = isolates_[isolate]->native_modules;
- modules_per_isolate.insert(native_module.get());
- if (isolates_[isolate]->keep_tiered_down) {
+ auto* isolate_info = isolates_[isolate].get();
+ isolate_info->native_modules.insert(native_module.get());
+ if (isolate_info->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
}
+
+ // Record memory protection key support.
+ if (FLAG_wasm_memory_protection_keys && !isolate_info->pku_support_sampled) {
+ isolate_info->pku_support_sampled = true;
+ auto* histogram =
+ isolate->counters()->wasm_memory_protection_keys_support();
+ bool has_mpk =
+ GetWasmCodeManager()->memory_protection_key_ != kNoMemoryProtectionKey;
+ histogram->AddSample(has_mpk ? 1 : 0);
+ }
+
isolate->counters()->wasm_modules_per_isolate()->AddSample(
- static_cast<int>(modules_per_isolate.size()));
+ static_cast<int>(isolate_info->native_modules.size()));
isolate->counters()->wasm_modules_per_engine()->AddSample(
static_cast<int>(native_modules_.size()));
return native_module;
}
std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
- ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
+ ModuleOrigin origin, base::Vector<const uint8_t> wire_bytes,
+ Isolate* isolate) {
std::shared_ptr<NativeModule> native_module =
native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
bool recompile_module = false;
@@ -1184,7 +1193,6 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
bool WasmEngine::UpdateNativeModuleCache(
bool error, std::shared_ptr<NativeModule>* native_module,
Isolate* isolate) {
- DCHECK_EQ(this, native_module->get()->engine());
// Pass {native_module} by value here to keep it alive until at least after
// we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
// which would lock the mutex twice.
@@ -1304,7 +1312,7 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
}
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
- Vector<WasmCode*> live_code) {
+ base::Vector<WasmCode*> live_code) {
TRACE_EVENT0("v8.wasm", "wasm.ReportLiveCodeForGC");
TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
live_code.size());
@@ -1331,8 +1339,7 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
Address osr_target = base::Memory<Address>(WasmFrame::cast(frame)->fp() -
kOSRTargetOffset);
if (osr_target) {
- WasmCode* osr_code =
- isolate->wasm_engine()->code_manager()->LookupCode(osr_target);
+ WasmCode* osr_code = GetWasmCodeManager()->LookupCode(osr_target);
DCHECK_NOT_NULL(osr_code);
live_wasm_code.insert(osr_code);
}
@@ -1342,8 +1349,8 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
CheckNoArchivedThreads(isolate);
- ReportLiveCodeForGC(isolate,
- OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
+ ReportLiveCodeForGC(
+ isolate, base::OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}
bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
@@ -1360,7 +1367,7 @@ bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
size_t dead_code_limit =
FLAG_stress_wasm_code_gc
? 0
- : 64 * KB + code_manager_.committed_code_space() / 10;
+ : 64 * KB + GetWasmCodeManager()->committed_code_space() / 10;
if (new_potentially_dead_code_size_ > dead_code_limit) {
bool inc_gc_count =
info->num_code_gcs_triggered < std::numeric_limits<int8_t>::max();
@@ -1403,13 +1410,13 @@ void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
DCHECK_EQ(1, info->dead_code.count(code));
info->dead_code.erase(code);
}
- native_module->FreeCode(VectorOf(code_vec));
+ native_module->FreeCode(base::VectorOf(code_vec));
}
}
Handle<Script> WasmEngine::GetOrCreateScript(
Isolate* isolate, const std::shared_ptr<NativeModule>& native_module,
- Vector<const char> source_url) {
+ base::Vector<const char> source_url) {
{
base::MutexGuard guard(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
@@ -1571,14 +1578,22 @@ void WasmEngine::PotentiallyFinishCurrentGC() {
namespace {
-WasmEngine* global_wasm_engine = nullptr;
+struct GlobalWasmState {
+ // Note: The order of fields is important here, as the WasmEngine's destructor
+ // must run first. It contains a barrier which ensures that background threads
+ // finished, and that has to happen before the WasmCodeManager gets destroyed.
+ WasmCodeManager code_manager;
+ WasmEngine engine;
+};
+
+GlobalWasmState* global_wasm_state = nullptr;
} // namespace
// static
void WasmEngine::InitializeOncePerProcess() {
- DCHECK_NULL(global_wasm_engine);
- global_wasm_engine = new WasmEngine();
+ DCHECK_NULL(global_wasm_state);
+ global_wasm_state = new GlobalWasmState();
}
// static
@@ -1586,14 +1601,18 @@ void WasmEngine::GlobalTearDown() {
// Note: This can be called multiple times in a row (see
// test-api/InitializeAndDisposeMultiple). This is fine, as
// {global_wasm_engine} will be nullptr then.
- delete global_wasm_engine;
- global_wasm_engine = nullptr;
+ delete global_wasm_state;
+ global_wasm_state = nullptr;
}
-// static
-WasmEngine* WasmEngine::GetWasmEngine() {
- DCHECK_NOT_NULL(global_wasm_engine);
- return global_wasm_engine;
+WasmEngine* GetWasmEngine() {
+ DCHECK_NOT_NULL(global_wasm_state);
+ return &global_wasm_state->engine;
+}
+
+WasmCodeManager* GetWasmCodeManager() {
+ DCHECK_NOT_NULL(global_wasm_state);
+ return &global_wasm_state->code_manager;
}
// {max_mem_pages} is declared in wasm-limits.h.
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index d4736036cb..7209096911 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -68,7 +68,7 @@ class NativeModuleCache {
// Store the prefix hash as part of the key for faster lookup, and to
// quickly check existing prefixes for streaming compilation.
size_t prefix_hash;
- Vector<const uint8_t> bytes;
+ base::Vector<const uint8_t> bytes;
bool operator==(const Key& other) const {
bool eq = bytes == other.bytes;
@@ -98,7 +98,7 @@ class NativeModuleCache {
};
std::shared_ptr<NativeModule> MaybeGetNativeModule(
- ModuleOrigin origin, Vector<const uint8_t> wire_bytes);
+ ModuleOrigin origin, base::Vector<const uint8_t> wire_bytes);
bool GetStreamingCompilationOwnership(size_t prefix_hash);
void StreamingCompilationFailed(size_t prefix_hash);
std::shared_ptr<NativeModule> Update(
@@ -107,13 +107,13 @@ class NativeModuleCache {
bool empty() { return map_.empty(); }
- static size_t WireBytesHash(Vector<const uint8_t> bytes);
+ static size_t WireBytesHash(base::Vector<const uint8_t> bytes);
// Hash the wire bytes up to the code section header. Used as a heuristic to
// avoid streaming compilation of modules that are likely already in the
// cache. See {GetStreamingCompilationOwnership}. Assumes that the bytes have
// already been validated.
- static size_t PrefixHash(Vector<const uint8_t> wire_bytes);
+ static size_t PrefixHash(base::Vector<const uint8_t> wire_bytes);
private:
// Each key points to the corresponding native module's wire bytes, so they
@@ -155,7 +155,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// asm.js module.
MaybeHandle<AsmWasmData> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Vector<const byte> asm_js_offset_table_bytes,
+ base::Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode);
Handle<WasmModuleObject> FinalizeTranslatedAsmJs(
Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
@@ -214,9 +214,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// the the same engine, recreating a full module object in the given Isolate.
Handle<WasmModuleObject> ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_module,
- Vector<const char> source_url);
-
- WasmCodeManager* code_manager() { return &code_manager_; }
+ base::Vector<const char> source_url);
AccountingAllocator* allocator() { return &allocator_; }
@@ -259,7 +257,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Trigger code logging for the given code objects in all Isolates which have
// access to the NativeModule containing this code. This method can be called
// from background threads.
- void LogCode(Vector<WasmCode*>);
+ void LogCode(base::Vector<WasmCode*>);
// Enable code logging for the given Isolate. Initially, code logging is
// enabled if {WasmCode::ShouldBeLogged(Isolate*)} returns true during
@@ -288,7 +286,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
// threads. The {wire_bytes}' underlying array should be valid at least until
// the call to {UpdateNativeModuleCache}.
std::shared_ptr<NativeModule> MaybeGetNativeModule(
- ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate);
+ ModuleOrigin origin, base::Vector<const uint8_t> wire_bytes,
+ Isolate* isolate);
// Replace the temporary {nullopt} with the new native module, or
// erase it if any error occurred. Wake up blocked threads waiting for this
@@ -326,7 +325,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Called by each Isolate to report its live code for a GC cycle. First
// version reports an externally determined set of live code (might be empty),
// second version gets live code from the execution stack of that isolate.
- void ReportLiveCodeForGC(Isolate*, Vector<WasmCode*>);
+ void ReportLiveCodeForGC(Isolate*, base::Vector<WasmCode*>);
void ReportLiveCodeFromStackForGC(Isolate*);
// Add potentially dead code. The occurrence in the set of potentially dead
@@ -343,7 +342,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
Handle<Script> GetOrCreateScript(Isolate*,
const std::shared_ptr<NativeModule>&,
- Vector<const char> source_url);
+ base::Vector<const char> source_url);
// Returns a barrier allowing background compile operations if valid and
// preventing this object from being destroyed.
@@ -357,9 +356,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
static void InitializeOncePerProcess();
static void GlobalTearDown();
- // Returns a reference to the WasmEngine shared by the entire process.
- static WasmEngine* GetWasmEngine();
-
private:
struct CurrentGCInfo;
struct IsolateInfo;
@@ -382,7 +378,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
// calling this method.
void PotentiallyFinishCurrentGC();
- WasmCodeManager code_manager_;
AccountingAllocator allocator_;
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
@@ -431,6 +426,12 @@ class V8_EXPORT_PRIVATE WasmEngine {
//////////////////////////////////////////////////////////////////////////////
};
+// Returns a reference to the WasmEngine shared by the entire process.
+V8_EXPORT_PRIVATE WasmEngine* GetWasmEngine();
+
+// Returns a reference to the WasmCodeManager shared by the entire process.
+V8_EXPORT_PRIVATE WasmCodeManager* GetWasmCodeManager();
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index f3d900b79a..101d563876 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -238,7 +238,7 @@ void float32_to_uint64_sat_wrapper(Address data) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
- if (input >= std::numeric_limits<uint64_t>::max()) {
+ if (input >= static_cast<float>(std::numeric_limits<uint64_t>::max())) {
WriteUnalignedValue<uint64_t>(data, std::numeric_limits<uint64_t>::max());
return;
}
@@ -268,7 +268,7 @@ void float64_to_uint64_sat_wrapper(Address data) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
- if (input >= std::numeric_limits<uint64_t>::max()) {
+ if (input >= static_cast<double>(std::numeric_limits<uint64_t>::max())) {
WriteUnalignedValue<uint64_t>(data, std::numeric_limits<uint64_t>::max());
return;
}
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 9adf3d662f..1c4c2acaec 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -24,6 +24,11 @@
/* V8 side owner: jkummerow */ \
V(gc, "garbage collection", false) \
\
+ /* Non-specified, V8-only experimental additions to the GC proposal */ \
+ /* V8 side owner: jkummerow */ \
+ V(gc_experiments, "garbage collection V8-only experimental features", false) \
+ V(nn_locals, "allow non-defaultable/non-nullable locals", false) \
+ \
/* Typed function references proposal. */ \
/* Official proposal: https://github.com/WebAssembly/function-references */ \
/* V8 side owner: manoskouk */ \
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index ef71d4ccae..c760634a76 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -33,6 +33,16 @@ WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
return it->second;
}
+WasmCode* WasmImportWrapperCache::MaybeGet(compiler::WasmImportCallKind kind,
+ const FunctionSig* sig,
+ int expected_arity) const {
+ base::MutexGuard lock(&mutex_);
+
+ auto it = entry_map_.find({kind, sig, expected_arity});
+ if (it == entry_map_.end()) return nullptr;
+ return it->second;
+}
+
WasmImportWrapperCache::~WasmImportWrapperCache() {
std::vector<WasmCode*> ptrs;
ptrs.reserve(entry_map_.size());
@@ -41,7 +51,7 @@ WasmImportWrapperCache::~WasmImportWrapperCache() {
ptrs.push_back(e.second);
}
}
- WasmCode::DecrementRefCount(VectorOf(ptrs));
+ WasmCode::DecrementRefCount(base::VectorOf(ptrs));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index abf0cf7d68..57c92bc6bb 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -75,6 +75,9 @@ class WasmImportWrapperCache {
V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
const FunctionSig* sig,
int expected_arity) const;
+ // Thread-safe. Returns nullptr if the key doesn't exist in the map.
+ WasmCode* MaybeGet(compiler::WasmImportCallKind kind, const FunctionSig* sig,
+ int expected_arity) const;
~WasmImportWrapperCache();
diff --git a/deps/v8/src/wasm/wasm-init-expr.cc b/deps/v8/src/wasm/wasm-init-expr.cc
index 6348c58193..14a7e3b6a6 100644
--- a/deps/v8/src/wasm/wasm-init-expr.cc
+++ b/deps/v8/src/wasm/wasm-init-expr.cc
@@ -4,52 +4,56 @@
#include "src/wasm/wasm-init-expr.h"
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-module.h"
+
namespace v8 {
namespace internal {
namespace wasm {
-std::ostream& operator<<(std::ostream& os, const WasmInitExpr& expr) {
- os << "(";
- switch (expr.kind()) {
- case WasmInitExpr::kNone:
- UNREACHABLE();
- case WasmInitExpr::kGlobalGet:
- os << "global.get " << expr.immediate().index;
- break;
- case WasmInitExpr::kI32Const:
- os << "i32.const " << expr.immediate().i32_const;
- break;
- case WasmInitExpr::kI64Const:
- os << "i64.const " << expr.immediate().i64_const;
- break;
- case WasmInitExpr::kF32Const:
- os << "f32.const " << expr.immediate().f32_const;
- break;
- case WasmInitExpr::kF64Const:
- os << "f64.const " << expr.immediate().f64_const;
- break;
- case WasmInitExpr::kS128Const:
- os << "s128.const 0x" << std::hex;
- for (uint8_t b : expr.immediate().s128_const) {
- os << b;
+ValueType WasmInitExpr::type(const WasmModule* module,
+ const WasmFeatures& enabled_features) const {
+ switch (kind()) {
+ case kNone:
+ return kWasmBottom;
+ case kGlobalGet:
+ return immediate().index < module->globals.size()
+ ? module->globals[immediate().index].type
+ : kWasmBottom;
+ case kI32Const:
+ return kWasmI32;
+ case kI64Const:
+ return kWasmI64;
+ case kF32Const:
+ return kWasmF32;
+ case kF64Const:
+ return kWasmF64;
+ case kS128Const:
+ return kWasmS128;
+ case kRefFuncConst: {
+ uint32_t heap_type = enabled_features.has_typed_funcref()
+ ? module->functions[immediate().index].sig_index
+ : HeapType::kFunc;
+ return ValueType::Ref(heap_type, kNonNullable);
+ }
+ case kRefNullConst:
+ return ValueType::Ref(immediate().heap_type, kNullable);
+ case kStructNewWithRtt:
+ case kArrayInit:
+ return ValueType::Ref(immediate().index, kNonNullable);
+ case kRttCanon:
+ return ValueType::Rtt(immediate().heap_type, 0);
+ case kRttSub:
+ case kRttFreshSub: {
+ ValueType operand_type = operands()[0].type(module, enabled_features);
+ if (!operand_type.is_rtt()) return kWasmBottom;
+ if (operand_type.has_depth()) {
+ return ValueType::Rtt(immediate().heap_type, operand_type.depth() + 1);
+ } else {
+ return ValueType::Rtt(immediate().heap_type);
}
- os << std::dec;
- break;
- case WasmInitExpr::kRefNullConst:
- os << "ref.null " << expr.immediate().heap_type;
- break;
- case WasmInitExpr::kRefFuncConst:
- os << "ref.func " << expr.immediate().index;
- break;
- case WasmInitExpr::kRttCanon:
- os << "rtt.canon " << expr.immediate().heap_type;
- break;
- case WasmInitExpr::kRttSub:
- os << "rtt.sub " << *expr.operand();
- break;
+ }
}
- os << ")";
- return os;
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-init-expr.h b/deps/v8/src/wasm/wasm-init-expr.h
index 39fc1a7ee6..bf68265b2a 100644
--- a/deps/v8/src/wasm/wasm-init-expr.h
+++ b/deps/v8/src/wasm/wasm-init-expr.h
@@ -17,6 +17,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+struct WasmModule;
+class WasmFeatures;
+
// Representation of an initializer expression.
class WasmInitExpr {
public:
@@ -30,8 +33,11 @@ class WasmInitExpr {
kS128Const,
kRefNullConst,
kRefFuncConst,
+ kStructNewWithRtt,
+ kArrayInit,
kRttCanon,
- kRttSub
+ kRttSub,
+ kRttFreshSub,
};
union Immediate {
@@ -58,7 +64,7 @@ class WasmInitExpr {
immediate_.f64_const = v;
}
explicit WasmInitExpr(uint8_t v[kSimd128Size]) : kind_(kS128Const) {
- base::Memcpy(immediate_.s128_const.data(), v, kSimd128Size);
+ memcpy(immediate_.s128_const.data(), v, kSimd128Size);
}
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInitExpr);
@@ -84,6 +90,24 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr StructNewWithRtt(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewWithRtt;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
+ static WasmInitExpr ArrayInit(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kArrayInit;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
static WasmInitExpr RttCanon(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kRttCanon;
@@ -95,13 +119,21 @@ class WasmInitExpr {
WasmInitExpr expr;
expr.kind_ = kRttSub;
expr.immediate_.index = index;
- expr.operand_ = std::make_unique<WasmInitExpr>(std::move(supertype));
+ expr.operands_.push_back(std::move(supertype));
+ return expr;
+ }
+
+ static WasmInitExpr RttFreshSub(uint32_t index, WasmInitExpr supertype) {
+ WasmInitExpr expr;
+ expr.kind_ = kRttFreshSub;
+ expr.immediate_.index = index;
+ expr.operands_.push_back(std::move(supertype));
return expr;
}
Immediate immediate() const { return immediate_; }
Operator kind() const { return kind_; }
- WasmInitExpr* operand() const { return operand_.get(); }
+ const std::vector<WasmInitExpr>& operands() const { return operands_; }
bool operator==(const WasmInitExpr& other) const {
if (kind() != other.kind()) return false;
@@ -124,25 +156,40 @@ class WasmInitExpr {
return immediate().s128_const == other.immediate().s128_const;
case kRefNullConst:
return immediate().heap_type == other.immediate().heap_type;
+ case kStructNewWithRtt:
+ if (immediate().index != other.immediate().index) return false;
+ DCHECK_EQ(operands().size(), other.operands().size());
+ for (uint32_t i = 0; i < operands().size(); i++) {
+ if (operands()[i] != other.operands()[i]) return false;
+ }
+ return true;
+ case kArrayInit:
+ if (immediate().index != other.immediate().index) return false;
+ if (operands().size() != other.operands().size()) return false;
+ for (uint32_t i = 0; i < operands().size(); i++) {
+ if (operands()[i] != other.operands()[i]) return false;
+ }
+ return true;
case kRttSub:
+ case kRttFreshSub:
return immediate().index == other.immediate().index &&
- *operand() == *other.operand();
+ operands()[0] == other.operands()[0];
}
}
- V8_INLINE bool operator!=(const WasmInitExpr& other) {
+ V8_INLINE bool operator!=(const WasmInitExpr& other) const {
return !(*this == other);
}
+ ValueType type(const WasmModule* module,
+ const WasmFeatures& enabled_features) const;
+
private:
Immediate immediate_;
Operator kind_;
- std::unique_ptr<WasmInitExpr> operand_ = nullptr;
+ std::vector<WasmInitExpr> operands_;
};
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const WasmInitExpr& expr);
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 70492135de..a452e51855 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -49,13 +49,13 @@ class WasmStreaming::WasmStreamingImpl {
: isolate_(isolate), resolver_(std::move(resolver)) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation(
+ streaming_decoder_ = i::wasm::GetWasmEngine()->StartStreamingCompilation(
i_isolate, enabled_features, handle(i_isolate->context(), i_isolate),
api_method_name, resolver_);
}
void OnBytesReceived(const uint8_t* bytes, size_t size) {
- streaming_decoder_->OnBytesReceived(i::VectorOf(bytes, size));
+ streaming_decoder_->OnBytesReceived(base::VectorOf(bytes, size));
}
void Finish() { streaming_decoder_->Finish(); }
@@ -81,16 +81,14 @@ class WasmStreaming::WasmStreamingImpl {
streaming_decoder_->SetModuleCompiledCallback(
[client, streaming_decoder = streaming_decoder_](
const std::shared_ptr<i::wasm::NativeModule>& native_module) {
- i::Vector<const char> url = streaming_decoder->url();
+ base::Vector<const char> url = streaming_decoder->url();
auto compiled_wasm_module =
CompiledWasmModule(native_module, url.begin(), url.size());
client->OnModuleCompiled(compiled_wasm_module);
});
}
- void SetUrl(internal::Vector<const char> url) {
- streaming_decoder_->SetUrl(url);
- }
+ void SetUrl(base::Vector<const char> url) { streaming_decoder_->SetUrl(url); }
private:
Isolate* const isolate_;
@@ -134,7 +132,7 @@ void WasmStreaming::SetClient(std::shared_ptr<Client> client) {
void WasmStreaming::SetUrl(const char* url, size_t length) {
TRACE_EVENT0("v8.wasm", "wasm.SetUrl");
- impl_->SetUrl(internal::VectorOf(url, length));
+ impl_->SetUrl(base::VectorOf(url, length));
}
// static
@@ -419,7 +417,7 @@ class AsyncInstantiateCompileResultResolver
void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
if (finished_) return;
finished_ = true;
- isolate_->wasm_engine()->AsyncInstantiate(
+ i::wasm::GetWasmEngine()->AsyncInstantiate(
isolate_,
std::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
result),
@@ -518,7 +516,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// Asynchronous compilation handles copying wire bytes if necessary.
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features,
+ i::wasm::GetWasmEngine()->AsyncCompile(i_isolate, enabled_features,
std::move(resolver), bytes, is_shared,
kAPIMethodName);
}
@@ -637,14 +635,14 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
- base::Memcpy(copy.get(), bytes.start(), bytes.length());
+ memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- validated = i_isolate->wasm_engine()->SyncValidate(
+ validated = i::wasm::GetWasmEngine()->SyncValidate(
i_isolate, enabled_features, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- validated = i_isolate->wasm_engine()->SyncValidate(i_isolate,
+ validated = i::wasm::GetWasmEngine()->SyncValidate(i_isolate,
enabled_features, bytes);
}
@@ -680,14 +678,14 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
- base::Memcpy(copy.get(), bytes.start(), bytes.length());
+ memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i_isolate->wasm_engine()->SyncCompile(
+ module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i_isolate->wasm_engine()->SyncCompile(
+ module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes);
}
@@ -772,7 +770,7 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
GetValueAsImports(ffi, &thrower);
if (thrower.error()) return {};
- instance_object = i_isolate->wasm_engine()->SyncInstantiate(
+ instance_object = i::wasm::GetWasmEngine()->SyncInstantiate(
i_isolate, &thrower, i::Handle<i::WasmModuleObject>::cast(module_obj),
maybe_imports, i::MaybeHandle<i::JSArrayBuffer>());
}
@@ -943,7 +941,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::WasmModuleObject> module_obj =
i::Handle<i::WasmModuleObject>::cast(first_arg);
- i_isolate->wasm_engine()->AsyncInstantiate(i_isolate, std::move(resolver),
+ i::wasm::GetWasmEngine()->AsyncInstantiate(i_isolate, std::move(resolver),
module_obj, maybe_imports);
return;
}
@@ -973,7 +971,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Asynchronous compilation handles copying wire bytes if necessary.
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features,
+ i::wasm::GetWasmEngine()->AsyncCompile(i_isolate, enabled_features,
std::move(compilation_resolver), bytes,
is_shared, kAPIMethodName);
}
@@ -1140,14 +1138,14 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
int64_t initial = 0;
if (!GetInitialOrMinimumProperty(isolate, &thrower, context, descriptor,
- &initial, 0, i::wasm::max_mem_pages())) {
+ &initial, 0, i::wasm::kSpecMaxMemoryPages)) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = i::WasmMemoryObject::kNoMaximum;
if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "maximum"), nullptr, &maximum,
- initial, i::wasm::max_mem_pages())) {
+ initial, i::wasm::kSpecMaxMemoryPages)) {
return;
}
@@ -1376,7 +1374,6 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
default:
// TODO(7748): Implement these.
UNIMPLEMENTED();
- break;
}
break;
}
@@ -1396,15 +1393,6 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(global_js_object));
}
-// WebAssembly.Exception
-void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Isolate* isolate = args.GetIsolate();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- HandleScope scope(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception()");
- thrower.TypeError("WebAssembly.Exception cannot be called");
-}
-
namespace {
uint32_t GetIterableLength(i::Isolate* isolate, Local<Context> context,
@@ -1420,6 +1408,69 @@ uint32_t GetIterableLength(i::Isolate* isolate, Local<Context> context,
} // namespace
+// WebAssembly.Exception
+void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Exception()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Exception must be invoked with 'new'");
+ return;
+ }
+ if (!args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be an exception type");
+ return;
+ }
+
+ Local<Object> event_type = Local<Object>::Cast(args[0]);
+ Local<Context> context = isolate->GetCurrentContext();
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+
+ // Load the 'parameters' property of the event type.
+ Local<String> parameters_key = v8_str(isolate, "parameters");
+ v8::MaybeLocal<v8::Value> parameters_maybe =
+ event_type->Get(context, parameters_key);
+ v8::Local<v8::Value> parameters_value;
+ if (!parameters_maybe.ToLocal(&parameters_value) ||
+ !parameters_value->IsObject()) {
+ thrower.TypeError("Argument 0 must be an exception type with 'parameters'");
+ return;
+ }
+ Local<Object> parameters = parameters_value.As<Object>();
+ uint32_t parameters_len = GetIterableLength(i_isolate, context, parameters);
+ if (parameters_len == i::kMaxUInt32) {
+ thrower.TypeError("Argument 0 contains parameters without 'length'");
+ return;
+ }
+ if (parameters_len > i::wasm::kV8MaxWasmFunctionParams) {
+ thrower.TypeError("Argument 0 contains too many parameters");
+ return;
+ }
+
+ // Decode the exception type and construct a signature.
+ std::vector<i::wasm::ValueType> param_types(parameters_len,
+ i::wasm::kWasmVoid);
+ for (uint32_t i = 0; i < parameters_len; ++i) {
+ i::wasm::ValueType& type = param_types[i];
+ MaybeLocal<Value> maybe = parameters->Get(context, i);
+ if (!GetValueType(isolate, maybe, context, &type, enabled_features) ||
+ type == i::wasm::kWasmVoid) {
+ thrower.TypeError(
+ "Argument 0 parameter type at index #%u must be a value type", i);
+ return;
+ }
+ }
+ const i::wasm::FunctionSig sig{0, parameters_len, param_types.data()};
+ // Set the tag index to 0. It is only used for debugging purposes, and has no
+ // meaningful value when declared outside of a wasm module.
+ auto tag = i::WasmExceptionTag::New(i_isolate, 0);
+ i::Handle<i::Object> exception =
+ i::WasmExceptionObject::New(i_isolate, &sig, tag);
+ args.GetReturnValue().Set(Utils::ToLocal(exception));
+}
+
// WebAssembly.Function
void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -1443,8 +1494,8 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::MaybeLocal<v8::Value> parameters_maybe =
function_type->Get(context, parameters_key);
v8::Local<v8::Value> parameters_value;
- if (!parameters_maybe.ToLocal(&parameters_value)) return;
- if (!parameters_value->IsObject()) {
+ if (!parameters_maybe.ToLocal(&parameters_value) ||
+ !parameters_value->IsObject()) {
thrower.TypeError("Argument 0 must be a function type with 'parameters'");
return;
}
@@ -1486,8 +1537,8 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (uint32_t i = 0; i < parameters_len; ++i) {
i::wasm::ValueType type;
MaybeLocal<Value> maybe = parameters->Get(context, i);
- if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
- if (type == i::wasm::kWasmVoid) {
+ if (!GetValueType(isolate, maybe, context, &type, enabled_features) ||
+ type == i::wasm::kWasmVoid) {
thrower.TypeError(
"Argument 0 parameter type at index #%u must be a value type", i);
return;
@@ -1725,30 +1776,24 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmMemoryObject);
- uint32_t delta_size;
- if (!EnforceUint32("Argument 0", args[0], context, &thrower, &delta_size)) {
+ uint32_t delta_pages;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &delta_pages)) {
return;
}
- uint64_t max_size64 = receiver->maximum_pages();
- if (max_size64 > uint64_t{i::wasm::max_mem_pages()}) {
- max_size64 = i::wasm::max_mem_pages();
- }
i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer(), i_isolate);
- DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+ uint64_t old_pages64 = old_buffer->byte_length() / i::wasm::kWasmPageSize;
+ uint64_t new_pages64 = old_pages64 + static_cast<uint64_t>(delta_pages);
- uint64_t old_size64 = old_buffer->byte_length() / i::wasm::kWasmPageSize;
- uint64_t new_size64 = old_size64 + static_cast<uint64_t>(delta_size);
-
- if (new_size64 > max_size64) {
+ if (new_pages64 > static_cast<uint64_t>(receiver->maximum_pages())) {
thrower.RangeError("Maximum memory size exceeded");
return;
}
- int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver, delta_size);
+ int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver, delta_pages);
if (ret == -1) {
- thrower.RangeError("Unable to grow instance memory.");
+ thrower.RangeError("Unable to grow instance memory");
return;
}
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
@@ -1851,13 +1896,11 @@ void WebAssemblyGlobalGetValueCommon(
default:
// TODO(7748): Implement these.
UNIMPLEMENTED();
- break;
}
break;
case i::wasm::kRtt:
case i::wasm::kRttWithDepth:
UNIMPLEMENTED(); // TODO(7748): Implement.
- break;
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kBottom:
@@ -1891,7 +1934,7 @@ void WebAssemblyGlobalSetValue(
thrower.TypeError("Can't set the value of an immutable global.");
return;
}
- if (args[0]->IsUndefined()) {
+ if (args.Length() == 0) {
thrower.TypeError("Argument 0 is required");
return;
}
@@ -1947,14 +1990,12 @@ void WebAssemblyGlobalSetValue(
default:
// TODO(7748): Implement these.
UNIMPLEMENTED();
- break;
}
break;
case i::wasm::kRtt:
case i::wasm::kRttWithDepth:
// TODO(7748): Implement.
UNIMPLEMENTED();
- break;
case i::wasm::kI8:
case i::wasm::kI16:
case i::wasm::kBottom:
@@ -2102,7 +2143,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<String> name = v8_str(isolate, "WebAssembly");
// Not supposed to be called, hence using the kIllegal builtin as code.
Handle<SharedFunctionInfo> info =
- factory->NewSharedFunctionInfoForBuiltin(name, Builtins::kIllegal);
+ factory->NewSharedFunctionInfoForBuiltin(name, Builtin::kIllegal);
info->set_language_mode(LanguageMode::kStrict);
Handle<JSFunction> cons =
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 9e565db0e8..9e2ddc7fcc 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -20,8 +20,8 @@ namespace v8 {
namespace internal {
namespace wasm {
-// This constant is defined in the Wasm JS API spec and as such only
-// concern JS embeddings.
+// This constant limits the amount of *declared* memory. At runtime, memory can
+// only grow up to kV8MaxWasmMemoryPages.
constexpr size_t kSpecMaxMemoryPages = 65536;
// The following limits are imposed by V8 on WebAssembly modules.
@@ -35,9 +35,13 @@ constexpr size_t kV8MaxWasmExceptions = 1000000;
constexpr size_t kV8MaxWasmExceptionTypes = 1000000;
constexpr size_t kV8MaxWasmDataSegments = 100000;
// This indicates the maximum memory size our implementation supports.
-// Don't use this limit directly; use {max_mem_pages()} instead to take the
+// Do not use this limit directly; use {max_mem_pages()} instead to take the
// spec'ed limit as well as command line flag into account.
-constexpr size_t kV8MaxWasmMemoryPages = 65536; // = 4 GiB
+// Also, do not use this limit to validate declared memory, use
+// kSpecMaxMemoryPages for that.
+constexpr size_t kV8MaxWasmMemoryPages = kSystemPointerSize == 4
+ ? 32768 // = 2 GiB
+ : 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
constexpr size_t kV8MaxWasmFunctionSize = 7654321;
@@ -57,6 +61,7 @@ constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
// Maximum supported by implementation: ((1<<27)-3).
// Reason: total object size in bytes must fit into a Smi, for filler objects.
constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
+constexpr size_t kV8MaxWasmArrayInitLength = 999;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
"v8 should not exceed WebAssembly's non-web embedding limits");
@@ -68,17 +73,21 @@ constexpr uint64_t kWasmMaxHeapOffset =
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
-// Defined in wasm-engine.cc.
+// The following functions are defined in wasm-engine.cc.
+
+// Maximum number of pages we can allocate. This might be lower than the number
+// of pages that can be declared (e.g. as maximum): kSpecMaxMemoryPages.
// TODO(wasm): Make this size_t for wasm64. Currently the --wasm-max-mem-pages
// flag is only uint32_t.
V8_EXPORT_PRIVATE uint32_t max_mem_pages();
-uint32_t max_table_init_entries();
-size_t max_module_size();
inline uint64_t max_mem_bytes() {
return uint64_t{max_mem_pages()} * kWasmPageSize;
}
+uint32_t max_table_init_entries();
+size_t max_module_size();
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 41fa4f6b6b..e5209cdcde 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -120,6 +120,23 @@ void WasmFunctionBuilder::EmitWithU32V(WasmOpcode opcode, uint32_t immediate) {
body_.write_u32v(immediate);
}
+namespace {
+void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
+ buffer->write_u8(type.value_type_code());
+ if (type.encoding_needs_heap_type()) {
+ buffer->write_i32v(type.heap_type().code());
+ }
+ if (type.is_rtt()) {
+ if (type.has_depth()) buffer->write_u32v(type.depth());
+ buffer->write_u32v(type.ref_index());
+ }
+}
+} // namespace
+
+void WasmFunctionBuilder::EmitValueType(ValueType type) {
+ WriteValueType(&body_, type);
+}
+
void WasmFunctionBuilder::EmitI32Const(int32_t value) {
EmitWithI32V(kExprI32Const, value);
}
@@ -148,7 +165,9 @@ void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
EmitCode(placeholder_bytes, arraysize(placeholder_bytes));
}
-void WasmFunctionBuilder::SetName(Vector<const char> name) { name_ = name; }
+void WasmFunctionBuilder::SetName(base::Vector<const char> name) {
+ name_ = name;
+}
void WasmFunctionBuilder::AddAsmWasmOffset(size_t call_position,
size_t to_number_position) {
@@ -373,16 +392,17 @@ uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
return static_cast<uint32_t>(tables_.size() - 1);
}
-uint32_t WasmModuleBuilder::AddImport(Vector<const char> name, FunctionSig* sig,
- Vector<const char> module) {
+uint32_t WasmModuleBuilder::AddImport(base::Vector<const char> name,
+ FunctionSig* sig,
+ base::Vector<const char> module) {
DCHECK(adding_imports_allowed_);
function_imports_.push_back({module, name, AddSignature(sig)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
-uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
+uint32_t WasmModuleBuilder::AddGlobalImport(base::Vector<const char> name,
ValueType type, bool mutability,
- Vector<const char> module) {
+ base::Vector<const char> module) {
global_imports_.push_back({module, name, type.value_type_code(), mutability});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -391,7 +411,7 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
start_function_index_ = function->func_index();
}
-void WasmModuleBuilder::AddExport(Vector<const char> name,
+void WasmModuleBuilder::AddExport(base::Vector<const char> name,
ImportExportKindCode kind, uint32_t index) {
DCHECK_LE(index, std::numeric_limits<int>::max());
exports_.push_back({name, kind, static_cast<int>(index)});
@@ -399,13 +419,13 @@ void WasmModuleBuilder::AddExport(Vector<const char> name,
uint32_t WasmModuleBuilder::AddExportedGlobal(ValueType type, bool mutability,
WasmInitExpr init,
- Vector<const char> name) {
+ base::Vector<const char> name) {
uint32_t index = AddGlobal(type, mutability, std::move(init));
AddExport(name, kExternalGlobal, index);
return index;
}
-void WasmModuleBuilder::ExportImportedFunction(Vector<const char> name,
+void WasmModuleBuilder::ExportImportedFunction(base::Vector<const char> name,
int import_index) {
#if DEBUG
// The size of function_imports_ must not change any more.
@@ -434,17 +454,6 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
-void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
- buffer->write_u8(type.value_type_code());
- if (type.encoding_needs_heap_type()) {
- buffer->write_i32v(type.heap_type().code());
- }
- if (type.is_rtt()) {
- if (type.has_depth()) buffer->write_u32v(type.depth());
- buffer->write_u32v(type.ref_index());
- }
-}
-
void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
ValueType type) {
switch (init.kind()) {
@@ -504,11 +513,16 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
break;
case kOptRef:
buffer->write_u8(kExprRefNull);
+ buffer->write_i32v(type.heap_type().code());
+ break;
+ case kS128:
+ buffer->write_u8(static_cast<byte>(kSimdPrefix));
+ buffer->write_u8(static_cast<byte>(kExprS128Const & 0xff));
+ for (int i = 0; i < kSimd128Size; i++) buffer->write_u8(0);
break;
case kI8:
case kI16:
case kVoid:
- case kS128:
case kBottom:
case kRef:
case kRtt:
@@ -517,6 +531,25 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
break;
}
+ case WasmInitExpr::kStructNewWithRtt:
+ STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
+ for (const WasmInitExpr& operand : init.operands()) {
+ WriteInitializerExpression(buffer, operand, kWasmBottom);
+ }
+ buffer->write_u8(kGCPrefix);
+ buffer->write_u8(static_cast<uint8_t>(kExprStructNewWithRtt));
+ buffer->write_u32v(init.immediate().index);
+ break;
+ case WasmInitExpr::kArrayInit:
+ STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
+ for (const WasmInitExpr& operand : init.operands()) {
+ WriteInitializerExpression(buffer, operand, kWasmBottom);
+ }
+ buffer->write_u8(kGCPrefix);
+ buffer->write_u8(static_cast<uint8_t>(kExprArrayInit));
+ buffer->write_u32v(init.immediate().index);
+ buffer->write_u32v(static_cast<uint32_t>(init.operands().size() - 1));
+ break;
case WasmInitExpr::kRttCanon:
STATIC_ASSERT((kExprRttCanon >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
@@ -524,11 +557,15 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
buffer->write_i32v(static_cast<int32_t>(init.immediate().index));
break;
case WasmInitExpr::kRttSub:
+ case WasmInitExpr::kRttFreshSub:
// The operand to rtt.sub must be emitted first.
- WriteInitializerExpression(buffer, *init.operand(), kWasmBottom);
+ WriteInitializerExpression(buffer, init.operands()[0], kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprRttSub));
+ buffer->write_u8(static_cast<uint8_t>(init.kind() == WasmInitExpr::kRttSub
+ ? kExprRttSub
+ : kExprRttFreshSub));
buffer->write_i32v(static_cast<int32_t>(init.immediate().index));
break;
}
@@ -749,7 +786,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// Emit a placeholder for section length.
size_t start = buffer->reserve_u32v();
// Emit custom section name.
- buffer->write_string(CStrVector("compilationHints"));
+ buffer->write_string(base::CStrVector("compilationHints"));
// Emit hint count.
buffer->write_size(functions_.size());
// Emit hint bytes.
@@ -794,7 +831,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// Emit a placeholder for the length.
size_t start = buffer->reserve_u32v();
// Emit the section string.
- buffer->write_string(CStrVector("name"));
+ buffer->write_string(base::CStrVector("name"));
// Emit a subsection for the function names.
buffer->write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index c1d15a834e..d36db5f009 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -11,8 +11,8 @@
#include "src/base/memory.h"
#include "src/base/platform/wrappers.h"
+#include "src/base/vector.h"
#include "src/codegen/signature.h"
-#include "src/utils/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/value-type.h"
@@ -93,11 +93,11 @@ class ZoneBuffer : public ZoneObject {
void write(const byte* data, size_t size) {
if (size == 0) return;
EnsureSpace(size);
- base::Memcpy(pos_, data, size);
+ memcpy(pos_, data, size);
pos_ += size;
}
- void write_string(Vector<const char> name) {
+ void write_string(base::Vector<const char> name) {
write_size(name.length());
write(reinterpret_cast<const byte*>(name.begin()), name.length());
}
@@ -139,7 +139,7 @@ class ZoneBuffer : public ZoneObject {
if ((pos_ + size) > end_) {
size_t new_size = size + (end_ - buffer_) * 2;
byte* new_buffer = zone_->NewArray<byte, Buffer>(new_size);
- base::Memcpy(new_buffer, buffer_, (pos_ - buffer_));
+ memcpy(new_buffer, buffer_, (pos_ - buffer_));
pos_ = new_buffer + (pos_ - buffer_);
buffer_ = new_buffer;
end_ = new_buffer + new_size;
@@ -186,8 +186,9 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
void EmitWithI32V(WasmOpcode opcode, int32_t immediate);
void EmitWithU32V(WasmOpcode opcode, uint32_t immediate);
+ void EmitValueType(ValueType type);
void EmitDirectCallIndex(uint32_t index);
- void SetName(Vector<const char> name);
+ void SetName(base::Vector<const char> name);
void AddAsmWasmOffset(size_t call_position, size_t to_number_position);
void SetAsmFunctionStartPosition(size_t function_position);
void SetCompilationHint(WasmCompilationHintStrategy strategy,
@@ -206,7 +207,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
- FunctionSig* signature();
+ inline FunctionSig* signature();
private:
explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
@@ -223,7 +224,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
uint32_t signature_index_;
uint32_t func_index_;
ZoneBuffer body_;
- Vector<const char> name_;
+ base::Vector<const char> name_;
ZoneVector<uint32_t> i32_temps_;
ZoneVector<uint32_t> i64_temps_;
ZoneVector<uint32_t> f32_temps_;
@@ -245,13 +246,14 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
// Building methods.
- uint32_t AddImport(Vector<const char> name, FunctionSig* sig,
- Vector<const char> module = {});
+ uint32_t AddImport(base::Vector<const char> name, FunctionSig* sig,
+ base::Vector<const char> module = {});
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
uint32_t AddGlobal(ValueType type, bool mutability = true,
WasmInitExpr init = WasmInitExpr());
- uint32_t AddGlobalImport(Vector<const char> name, ValueType type,
- bool mutability, Vector<const char> module = {});
+ uint32_t AddGlobalImport(base::Vector<const char> name, ValueType type,
+ bool mutability,
+ base::Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
uint32_t AddException(FunctionSig* type);
@@ -268,14 +270,14 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
WasmInitExpr init);
void MarkStartFunction(WasmFunctionBuilder* builder);
- void AddExport(Vector<const char> name, ImportExportKindCode kind,
+ void AddExport(base::Vector<const char> name, ImportExportKindCode kind,
uint32_t index);
- void AddExport(Vector<const char> name, WasmFunctionBuilder* builder) {
+ void AddExport(base::Vector<const char> name, WasmFunctionBuilder* builder) {
AddExport(name, kExternalFunction, builder->func_index());
}
uint32_t AddExportedGlobal(ValueType type, bool mutability, WasmInitExpr init,
- Vector<const char> name);
- void ExportImportedFunction(Vector<const char> name, int import_index);
+ base::Vector<const char> name);
+ void ExportImportedFunction(base::Vector<const char> name, int import_index);
void SetMinMemorySize(uint32_t value);
void SetMaxMemorySize(uint32_t value);
void SetHasSharedMemory();
@@ -317,20 +319,20 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
};
struct WasmFunctionImport {
- Vector<const char> module;
- Vector<const char> name;
+ base::Vector<const char> module;
+ base::Vector<const char> name;
uint32_t sig_index;
};
struct WasmGlobalImport {
- Vector<const char> module;
- Vector<const char> name;
+ base::Vector<const char> module;
+ base::Vector<const char> name;
ValueTypeCode type_code;
bool mutability;
};
struct WasmExport {
- Vector<const char> name;
+ base::Vector<const char> name;
ImportExportKindCode kind;
int index; // Can be negative for re-exported imports.
};
@@ -383,7 +385,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
#endif
};
-inline FunctionSig* WasmFunctionBuilder::signature() {
+FunctionSig* WasmFunctionBuilder::signature() {
return builder_->types_[signature_index_].sig;
}
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index acebe8d0e5..65c78e0b95 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -113,8 +113,8 @@ void LazilyGeneratedNames::AddForTesting(int function_index,
}
AsmJsOffsetInformation::AsmJsOffsetInformation(
- Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
+ base::Vector<const byte> encoded_offsets)
+ : encoded_offsets_(base::OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
@@ -222,7 +222,8 @@ namespace {
// reflective functions. Should be kept in sync with the {GetValueType} helper.
Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
return isolate->factory()->InternalizeUtf8String(
- type == kWasmFuncRef ? CStrVector("anyfunc") : VectorOf(type.name()));
+ type == kWasmFuncRef ? base::CStrVector("anyfunc")
+ : base::VectorOf(type.name()));
}
} // namespace
@@ -303,7 +304,7 @@ Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
// place and then use that constant everywhere.
element = factory->InternalizeUtf8String("anyfunc");
} else {
- element = factory->InternalizeUtf8String(VectorOf(type.name()));
+ element = factory->InternalizeUtf8String(base::VectorOf(type.name()));
}
Handle<JSFunction> object_function = isolate->object_function();
@@ -522,7 +523,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
Handle<String> name, ErrorThrower* thrower) {
Factory* factory = isolate->factory();
- Vector<const uint8_t> wire_bytes =
+ base::Vector<const uint8_t> wire_bytes =
module_object->native_module()->wire_bytes();
std::vector<CustomSectionOffset> custom_sections =
DecodeCustomSections(wire_bytes.begin(), wire_bytes.end());
@@ -547,9 +548,9 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
- base::Memcpy(array_buffer->backing_store(),
- wire_bytes.begin() + section.payload.offset(),
- section.payload.length());
+ memcpy(array_buffer->backing_store(),
+ wire_bytes.begin() + section.payload.offset(),
+ section.payload.length());
matching_sections.push_back(array_buffer);
}
@@ -604,7 +605,7 @@ size_t EstimateStoredSize(const WasmModule* module) {
VectorSize(module->elem_segments);
}
-size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig* sig,
+size_t PrintSignature(base::Vector<char> buffer, const wasm::FunctionSig* sig,
char delimiter) {
if (buffer.empty()) return 0;
size_t old_size = buffer.size();
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index d185e67341..ed48532fca 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -13,9 +13,9 @@
#include "src/base/optional.h"
#include "src/base/platform/wrappers.h"
+#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
-#include "src/utils/vector.h"
#include "src/wasm/branch-hint-map.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/struct-types.h"
@@ -30,7 +30,7 @@ class WasmModuleObject;
namespace wasm {
-using WasmName = Vector<const char>;
+using WasmName = base::Vector<const char>;
struct AsmJsOffsets;
class ErrorThrower;
@@ -71,7 +71,7 @@ struct WasmFunction {
struct WasmGlobal {
ValueType type; // type of the global.
bool mutability; // {true} if mutable.
- WasmInitExpr init; // the initialization expression of the global.
+ WireBytesRef init; // the initialization expression of the global.
union {
uint32_t index; // index of imported mutable global.
uint32_t offset; // offset into global memory (if not imported & mutable).
@@ -95,13 +95,13 @@ struct WasmException {
// Static representation of a wasm data segment.
struct WasmDataSegment {
// Construct an active segment.
- explicit WasmDataSegment(WasmInitExpr dest_addr)
+ explicit WasmDataSegment(WireBytesRef dest_addr)
: dest_addr(std::move(dest_addr)), active(true) {}
// Construct a passive segment, which has no dest_addr.
WasmDataSegment() : active(false) {}
- WasmInitExpr dest_addr; // destination memory address of the data.
+ WireBytesRef dest_addr; // destination memory address of the data.
WireBytesRef source; // start offset in the module bytes.
bool active = true; // true if copied automatically during instantiation.
};
@@ -109,7 +109,7 @@ struct WasmDataSegment {
// Static representation of wasm element segment (table initializer).
struct WasmElemSegment {
// Construct an active segment.
- WasmElemSegment(ValueType type, uint32_t table_index, WasmInitExpr offset)
+ WasmElemSegment(ValueType type, uint32_t table_index, WireBytesRef offset)
: type(type),
table_index(table_index),
offset(std::move(offset)),
@@ -134,8 +134,14 @@ struct WasmElemSegment {
ValueType type;
uint32_t table_index;
- WasmInitExpr offset;
- std::vector<WasmInitExpr> entries;
+ WireBytesRef offset;
+ struct Entry {
+ enum Kind { kGlobalGetEntry, kRefFuncEntry, kRefNullEntry } kind;
+ uint32_t index;
+ Entry(Kind kind, uint32_t index) : kind(kind), index(index) {}
+ Entry() : kind(kRefNullEntry), index(0) {}
+ };
+ std::vector<Entry> entries;
enum Status {
kStatusActive, // copied automatically during instantiation.
kStatusPassive, // copied explicitly after instantiation.
@@ -208,7 +214,7 @@ class V8_EXPORT_PRIVATE LazilyGeneratedNames {
class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
public:
- explicit AsmJsOffsetInformation(Vector<const byte> encoded_offsets);
+ explicit AsmJsOffsetInformation(base::Vector<const byte> encoded_offsets);
// Destructor defined in wasm-module.cc, where the definition of
// {AsmJsOffsets} is available.
@@ -228,7 +234,7 @@ class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
mutable base::Mutex mutex_;
// Holds the encoded offset table bytes.
- OwnedVector<const uint8_t> encoded_offsets_;
+ base::OwnedVector<const uint8_t> encoded_offsets_;
// Holds the decoded offset table.
std::unique_ptr<AsmJsOffsets> decoded_offsets_;
@@ -377,7 +383,7 @@ struct WasmTable {
bool has_maximum_size = false; // true if there is a maximum size.
bool imported = false; // true if imported.
bool exported = false; // true if exported.
- WasmInitExpr initial_value;
+ WireBytesRef initial_value;
};
inline bool is_asmjs_module(const WasmModule* module) {
@@ -414,7 +420,7 @@ int GetNearestWasmFunction(const WasmModule* module, uint32_t byte_offset);
// on module_bytes, as this storage is only guaranteed to be alive as long as
// this struct is alive.
struct V8_EXPORT_PRIVATE ModuleWireBytes {
- explicit ModuleWireBytes(Vector<const byte> module_bytes)
+ explicit ModuleWireBytes(base::Vector<const byte> module_bytes)
: module_bytes_(module_bytes) {}
ModuleWireBytes(const byte* start, const byte* end)
: module_bytes_(start, static_cast<int>(end - start)) {
@@ -434,18 +440,19 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
return ref.offset() <= size && ref.length() <= size - ref.offset();
}
- Vector<const byte> GetFunctionBytes(const WasmFunction* function) const {
+ base::Vector<const byte> GetFunctionBytes(
+ const WasmFunction* function) const {
return module_bytes_.SubVector(function->code.offset(),
function->code.end_offset());
}
- Vector<const byte> module_bytes() const { return module_bytes_; }
+ base::Vector<const byte> module_bytes() const { return module_bytes_; }
const byte* start() const { return module_bytes_.begin(); }
const byte* end() const { return module_bytes_.end(); }
size_t length() const { return module_bytes_.length(); }
private:
- Vector<const byte> module_bytes_;
+ base::Vector<const byte> module_bytes_;
};
// A helper for printing out the names of functions.
@@ -501,7 +508,7 @@ class TruncatedUserString {
public:
template <typename T>
- explicit TruncatedUserString(Vector<T> name)
+ explicit TruncatedUserString(base::Vector<T> name)
: TruncatedUserString(name.begin(), name.length()) {}
TruncatedUserString(const byte* start, size_t len)
@@ -510,7 +517,7 @@ class TruncatedUserString {
TruncatedUserString(const char* start, size_t len)
: start_(start), length_(std::min(kMaxLen, static_cast<int>(len))) {
if (len > static_cast<size_t>(kMaxLen)) {
- base::Memcpy(buffer_, start, kMaxLen - 3);
+ memcpy(buffer_, start, kMaxLen - 3);
memset(buffer_ + kMaxLen - 3, '.', 3);
start_ = buffer_;
}
@@ -530,7 +537,7 @@ class TruncatedUserString {
// between parameter types and return types. If {buffer} is non-empty, it will
// be null-terminated, even if the signature is cut off. Returns the number of
// characters written, excluding the terminating null-byte.
-size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig*,
+size_t PrintSignature(base::Vector<char> buffer, const wasm::FunctionSig*,
char delimiter = ':');
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index e102fbd97f..95303ed253 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -9,13 +9,13 @@
#ifndef V8_WASM_WASM_OBJECTS_INL_H_
#define V8_WASM_WASM_OBJECTS_INL_H_
-#include "src/wasm/wasm-objects.h"
+#include <type_traits>
#include "src/base/memory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/contexts-inl.h"
-#include "src/objects/foreign-inl.h"
-#include "src/objects/heap-number-inl.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/js-objects-inl.h"
@@ -25,6 +25,7 @@
#include "src/roots/roots.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -34,32 +35,23 @@ namespace internal {
#include "torque-generated/src/wasm/wasm-objects-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
-OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, WasmFunctionData)
-OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject)
OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(WasmTableObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTableObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(AsmWasmData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmFunctionData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTypeInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
-CAST_ACCESSOR(WasmExceptionObject)
-CAST_ACCESSOR(WasmExportedFunctionData)
-CAST_ACCESSOR(WasmGlobalObject)
CAST_ACCESSOR(WasmInstanceObject)
-CAST_ACCESSOR(WasmMemoryObject)
-CAST_ACCESSOR(WasmModuleObject)
-CAST_ACCESSOR(WasmTableObject)
-CAST_ACCESSOR(AsmWasmData)
-CAST_ACCESSOR(WasmFunctionData)
-CAST_ACCESSOR(WasmTypeInfo)
-CAST_ACCESSOR(WasmStruct)
-CAST_ACCESSOR(WasmArray)
#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
DEF_GETTER(holder, has_##name, bool) { \
@@ -96,10 +88,6 @@ CAST_ACCESSOR(WasmArray)
}
// WasmModuleObject
-ACCESSORS(WasmModuleObject, managed_native_module, Managed<wasm::NativeModule>,
- kNativeModuleOffset)
-ACCESSORS(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset)
-ACCESSORS(WasmModuleObject, script, Script, kScriptOffset)
wasm::NativeModule* WasmModuleObject::native_module() const {
return managed_native_module().raw();
}
@@ -117,28 +105,13 @@ bool WasmModuleObject::is_asm_js() {
return asm_js;
}
-// WasmTableObject
-ACCESSORS(WasmTableObject, instance, HeapObject, kInstanceOffset)
-ACCESSORS(WasmTableObject, entries, FixedArray, kEntriesOffset)
-SMI_ACCESSORS(WasmTableObject, current_length, kCurrentLengthOffset)
-ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
-ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
-SMI_ACCESSORS(WasmTableObject, raw_type, kRawTypeOffset)
-
// WasmMemoryObject
-ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
-SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakArrayList, kInstancesOffset)
// WasmGlobalObject
-ACCESSORS(WasmGlobalObject, instance, HeapObject, kInstanceOffset)
ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
kUntaggedBufferOffset)
ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
-SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
-// TODO(7748): Try to come up with some encoding that includes is_mutable?
-SMI_ACCESSORS(WasmGlobalObject, raw_type, kRawTypeOffset)
-SMI_ACCESSORS(WasmGlobalObject, is_mutable, kIsMutableOffset)
wasm::ValueType WasmGlobalObject::type() const {
return wasm::ValueType::FromRawBitField(static_cast<uint32_t>(raw_type()));
@@ -156,19 +129,19 @@ Address WasmGlobalObject::address() const {
}
int32_t WasmGlobalObject::GetI32() {
- return base::ReadLittleEndianValue<int32_t>(address());
+ return base::ReadUnalignedValue<int32_t>(address());
}
int64_t WasmGlobalObject::GetI64() {
- return base::ReadLittleEndianValue<int64_t>(address());
+ return base::ReadUnalignedValue<int64_t>(address());
}
float WasmGlobalObject::GetF32() {
- return base::ReadLittleEndianValue<float>(address());
+ return base::ReadUnalignedValue<float>(address());
}
double WasmGlobalObject::GetF64() {
- return base::ReadLittleEndianValue<double>(address());
+ return base::ReadUnalignedValue<double>(address());
}
Handle<Object> WasmGlobalObject::GetRef() {
@@ -178,19 +151,19 @@ Handle<Object> WasmGlobalObject::GetRef() {
}
void WasmGlobalObject::SetI32(int32_t value) {
- base::WriteLittleEndianValue<int32_t>(address(), value);
+ base::WriteUnalignedValue(address(), value);
}
void WasmGlobalObject::SetI64(int64_t value) {
- base::WriteLittleEndianValue<int64_t>(address(), value);
+ base::WriteUnalignedValue(address(), value);
}
void WasmGlobalObject::SetF32(float value) {
- base::WriteLittleEndianValue<float>(address(), value);
+ base::WriteUnalignedValue(address(), value);
}
void WasmGlobalObject::SetF64(double value) {
- base::WriteLittleEndianValue<double>(address(), value);
+ base::WriteUnalignedValue(address(), value);
}
void WasmGlobalObject::SetExternRef(Handle<Object> value) {
@@ -316,11 +289,6 @@ ImportedFunctionEntry::ImportedFunctionEntry(
DCHECK_LT(index, instance->module()->num_imported_functions);
}
-// WasmExceptionObject
-ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
- kSerializedSignatureOffset)
-ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset)
-
// WasmExceptionPackage
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionPackage, JSReceiver)
CAST_ACCESSOR(WasmExceptionPackage)
@@ -334,15 +302,12 @@ CAST_ACCESSOR(WasmExportedFunction)
// WasmFunctionData
ACCESSORS(WasmFunctionData, ref, Object, kRefOffset)
-// WasmExportedFunctionData
-ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
-ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
- kInstanceOffset)
-SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
-ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
-SMI_ACCESSORS(WasmExportedFunctionData, wrapper_budget, kWrapperBudgetOffset)
-ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
-SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
+DEF_GETTER(WasmFunctionData, wrapper_code, Code) {
+ return FromCodeT(TorqueGeneratedClass::wrapper_code(cage_base));
+}
+void WasmFunctionData::set_wrapper_code(Code code, WriteBarrierMode mode) {
+ TorqueGeneratedClass::set_wrapper_code(ToCodeT(code), mode);
+}
wasm::FunctionSig* WasmExportedFunctionData::sig() const {
return reinterpret_cast<wasm::FunctionSig*>(signature().foreign_address());
@@ -355,18 +320,18 @@ WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
-OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, WasmFunctionData)
-CAST_ACCESSOR(WasmJSFunctionData)
-SMI_ACCESSORS(WasmJSFunctionData, serialized_return_count,
- kSerializedReturnCountOffset)
-SMI_ACCESSORS(WasmJSFunctionData, serialized_parameter_count,
- kSerializedParameterCountOffset)
-ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
- kSerializedSignatureOffset)
-ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
-ACCESSORS(WasmJSFunctionData, wasm_to_js_wrapper_code, Code,
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData)
+ACCESSORS(WasmJSFunctionData, raw_wasm_to_js_wrapper_code, CodeT,
kWasmToJsWrapperCodeOffset)
+DEF_GETTER(WasmJSFunctionData, wasm_to_js_wrapper_code, Code) {
+ return FromCodeT(raw_wasm_to_js_wrapper_code(cage_base));
+}
+void WasmJSFunctionData::set_wasm_to_js_wrapper_code(Code code,
+ WriteBarrierMode mode) {
+ set_raw_wasm_to_js_wrapper_code(ToCodeT(code), mode);
+}
+
// WasmCapiFunction
WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmCapiFunction(*this));
@@ -380,16 +345,13 @@ WasmExternalFunction::WasmExternalFunction(Address ptr) : JSFunction(ptr) {
CAST_ACCESSOR(WasmExternalFunction)
// WasmIndirectFunctionTable
-OBJECT_CONSTRUCTORS_IMPL(WasmIndirectFunctionTable, Struct)
-CAST_ACCESSOR(WasmIndirectFunctionTable)
-PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, size, uint32_t, kSizeOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmIndirectFunctionTable)
PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, sig_ids, uint32_t*,
kSigIdsOffset)
PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, targets, Address*,
kTargetsOffset)
OPTIONAL_ACCESSORS(WasmIndirectFunctionTable, managed_native_allocations,
Foreign, kManagedNativeAllocationsOffset)
-ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
#undef OPTIONAL_ACCESSORS
#undef READ_PRIMITIVE_FIELD
@@ -402,11 +364,169 @@ wasm::ValueType WasmTableObject::type() {
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
-// AsmWasmData
-ACCESSORS(AsmWasmData, managed_native_module, Managed<wasm::NativeModule>,
- kManagedNativeModuleOffset)
-ACCESSORS(AsmWasmData, export_wrappers, FixedArray, kExportWrappersOffset)
-ACCESSORS(AsmWasmData, uses_bitset, HeapNumber, kUsesBitsetOffset)
+// static
+Handle<Object> WasmObject::ReadValueAt(Isolate* isolate, Handle<HeapObject> obj,
+ wasm::ValueType type, uint32_t offset) {
+ Address field_address = obj->GetFieldAddress(offset);
+ switch (type.kind()) {
+ case wasm::kI8: {
+ int8_t value = base::Memory<int8_t>(field_address);
+ return handle(Smi::FromInt(value), isolate);
+ }
+ case wasm::kI16: {
+ int16_t value = base::Memory<int16_t>(field_address);
+ return handle(Smi::FromInt(value), isolate);
+ }
+ case wasm::kI32: {
+ int32_t value = base::Memory<int32_t>(field_address);
+ return isolate->factory()->NewNumberFromInt(value);
+ }
+ case wasm::kI64: {
+ int64_t value = base::ReadUnalignedValue<int64_t>(field_address);
+ return BigInt::FromInt64(isolate, value);
+ }
+ case wasm::kF32: {
+ float value = base::Memory<float>(field_address);
+ return isolate->factory()->NewNumber(value);
+ }
+ case wasm::kF64: {
+ double value = base::ReadUnalignedValue<double>(field_address);
+ return isolate->factory()->NewNumber(value);
+ }
+ case wasm::kS128:
+ // TODO(v8:11804): implement
+ UNREACHABLE();
+
+ case wasm::kRef:
+ case wasm::kOptRef: {
+ ObjectSlot slot(field_address);
+ return handle(slot.load(isolate), isolate);
+ }
+
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ // Rtt values are not supposed to be made available to JavaScript side.
+ UNREACHABLE();
+
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+}
+
+// static
+MaybeHandle<Object> WasmObject::ToWasmValue(Isolate* isolate,
+ wasm::ValueType type,
+ Handle<Object> value) {
+ switch (type.kind()) {
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kI32:
+ case wasm::kF32:
+ case wasm::kF64:
+ return Object::ToNumber(isolate, value);
+
+ case wasm::kI64:
+ return BigInt::FromObject(isolate, value);
+
+ case wasm::kRef:
+ case wasm::kOptRef: {
+ // TODO(v8:11804): implement ref type check
+ UNREACHABLE();
+ }
+
+ case wasm::kS128:
+ // TODO(v8:11804): implement
+ UNREACHABLE();
+
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ // Rtt values are not supposed to be made available to JavaScript side.
+ UNREACHABLE();
+
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+}
+
+// Conversions from Numeric objects.
+// static
+template <typename ElementType>
+ElementType WasmObject::FromNumber(Object value) {
+ // The value must already be prepared for storing to numeric fields.
+ DCHECK(value.IsNumber());
+ if (value.IsSmi()) {
+ return static_cast<ElementType>(Smi::ToInt(value));
+
+ } else if (value.IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value).value();
+ if (std::is_same<ElementType, double>::value ||
+ std::is_same<ElementType, float>::value) {
+ return static_cast<ElementType>(double_value);
+ } else {
+ CHECK(std::is_integral<ElementType>::value);
+ return static_cast<ElementType>(DoubleToInt32(double_value));
+ }
+ }
+ UNREACHABLE();
+}
+
+// static
+void WasmObject::WriteValueAt(Isolate* isolate, Handle<HeapObject> obj,
+ wasm::ValueType type, uint32_t offset,
+ Handle<Object> value) {
+ Address field_address = obj->GetFieldAddress(offset);
+ switch (type.kind()) {
+ case wasm::kI8: {
+ auto scalar_value = FromNumber<int8_t>(*value);
+ base::Memory<int8_t>(field_address) = scalar_value;
+ break;
+ }
+ case wasm::kI16: {
+ auto scalar_value = FromNumber<int16_t>(*value);
+ base::Memory<int16_t>(field_address) = scalar_value;
+ break;
+ }
+ case wasm::kI32: {
+ auto scalar_value = FromNumber<int32_t>(*value);
+ base::Memory<int32_t>(field_address) = scalar_value;
+ break;
+ }
+ case wasm::kI64: {
+ int64_t scalar_value = BigInt::cast(*value).AsInt64();
+ base::WriteUnalignedValue<int64_t>(field_address, scalar_value);
+ break;
+ }
+ case wasm::kF32: {
+ auto scalar_value = FromNumber<float>(*value);
+ base::Memory<float>(field_address) = scalar_value;
+ break;
+ }
+ case wasm::kF64: {
+ auto scalar_value = FromNumber<double>(*value);
+ base::WriteUnalignedValue<double>(field_address, scalar_value);
+ break;
+ }
+ case wasm::kRef:
+ case wasm::kOptRef:
+ // TODO(v8:11804): implement
+ UNREACHABLE();
+
+ case wasm::kS128:
+ // TODO(v8:11804): implement
+ UNREACHABLE();
+
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ // Rtt values are not supposed to be made available to JavaScript side.
+ UNREACHABLE();
+
+ case wasm::kVoid:
+ case wasm::kBottom:
+ UNREACHABLE();
+ }
+}
wasm::StructType* WasmStruct::type(Map map) {
WasmTypeInfo type_info = map.wasm_type_info();
@@ -439,9 +559,33 @@ int WasmStruct::GcSafeSize(Map map) {
wasm::StructType* WasmStruct::type() const { return type(map()); }
-ObjectSlot WasmStruct::RawField(int raw_offset) {
+Address WasmStruct::RawFieldAddress(int raw_offset) {
int offset = WasmStruct::kHeaderSize + raw_offset;
- return ObjectSlot(FIELD_ADDR(*this, offset));
+ return FIELD_ADDR(*this, offset);
+}
+
+ObjectSlot WasmStruct::RawField(int raw_offset) {
+ return ObjectSlot(RawFieldAddress(raw_offset));
+}
+
+// static
+Handle<Object> WasmStruct::GetField(Isolate* isolate, Handle<WasmStruct> obj,
+ uint32_t field_index) {
+ wasm::StructType* type = obj->type();
+ CHECK_LT(field_index, type->field_count());
+ wasm::ValueType field_type = type->field(field_index);
+ int offset = WasmStruct::kHeaderSize + type->field_offset(field_index);
+ return ReadValueAt(isolate, obj, field_type, offset);
+}
+
+// static
+void WasmStruct::SetField(Isolate* isolate, Handle<WasmStruct> obj,
+ uint32_t field_index, Handle<Object> value) {
+ wasm::StructType* type = obj->type();
+ CHECK_LT(field_index, type->field_count());
+ wasm::ValueType field_type = type->field(field_index);
+ int offset = WasmStruct::kHeaderSize + type->field_offset(field_index);
+ WriteValueAt(isolate, obj, field_type, offset, value);
}
wasm::ArrayType* WasmArray::type(Map map) {
@@ -472,6 +616,18 @@ int WasmArray::GcSafeSizeFor(Map map, int length) {
return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
}
+// static
+Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
+ uint32_t index) {
+ if (index >= array->length()) {
+ return isolate->factory()->undefined_value();
+ }
+ wasm::ValueType element_type = array->type()->element_type();
+ uint32_t offset =
+ WasmArray::kHeaderSize + index * element_type.element_size_bytes();
+ return ReadValueAt(isolate, array, element_type, offset);
+}
+
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
// Due to the type-specific pointer tags for external pointers, we need to
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 718124debf..7b94b60561 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -5,6 +5,7 @@
#include "src/wasm/wasm-objects.h"
#include "src/base/iterator.h"
+#include "src/base/vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/wasm-compiler.h"
@@ -16,7 +17,6 @@
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/utils.h"
-#include "src/utils/vector.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -190,24 +190,25 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Handle<String> WasmModuleObject::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<WasmModuleObject> module_object,
wasm::WireBytesRef ref, InternalizeString internalize) {
- Vector<const uint8_t> wire_bytes =
+ base::Vector<const uint8_t> wire_bytes =
module_object->native_module()->wire_bytes();
return ExtractUtf8StringFromModuleBytes(isolate, wire_bytes, ref,
internalize);
}
Handle<String> WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- Isolate* isolate, Vector<const uint8_t> wire_bytes, wasm::WireBytesRef ref,
- InternalizeString internalize) {
- Vector<const uint8_t> name_vec =
+ Isolate* isolate, base::Vector<const uint8_t> wire_bytes,
+ wasm::WireBytesRef ref, InternalizeString internalize) {
+ base::Vector<const uint8_t> name_vec =
wire_bytes.SubVector(ref.offset(), ref.end_offset());
// UTF8 validation happens at decode time.
DCHECK(unibrow::Utf8::ValidateEncoding(name_vec.begin(), name_vec.length()));
auto* factory = isolate->factory();
return internalize
? factory->InternalizeUtf8String(
- Vector<const char>::cast(name_vec))
- : factory->NewStringFromUtf8(Vector<const char>::cast(name_vec))
+ base::Vector<const char>::cast(name_vec))
+ : factory
+ ->NewStringFromUtf8(base::Vector<const char>::cast(name_vec))
.ToHandleChecked();
}
@@ -232,9 +233,10 @@ MaybeHandle<String> WasmModuleObject::GetFunctionNameOrNull(
kNoInternalize);
}
-Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(int func_index) {
+base::Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
+ int func_index) {
if (func_index == wasm::kAnonymousFuncIndex) {
- return Vector<const uint8_t>({nullptr, 0});
+ return base::Vector<const uint8_t>({nullptr, 0});
}
DCHECK_GT(module()->functions.size(), func_index);
wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
@@ -242,7 +244,7 @@ Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(int func_index) {
module()->lazily_generated_names.LookupFunctionName(wire_bytes,
func_index);
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
- return Vector<const uint8_t>::cast(name);
+ return base::Vector<const uint8_t>::cast(name);
}
Handle<WasmTableObject> WasmTableObject::New(
@@ -614,17 +616,25 @@ void WasmTableObject::UpdateDispatchTables(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
- // TODO(jkummerow): Find a way to avoid recompiling wrappers.
wasm::NativeModule* native_module =
instance->module_object().native_module();
- Address host_address = capi_function->GetHostCallTarget();
- wasm::WasmCodeRefScope code_ref_scope;
- wasm::WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
- isolate->wasm_engine(), native_module, &sig, host_address);
- isolate->counters()->wasm_generated_code_size()->Increment(
- wasm_code->instructions().length());
- isolate->counters()->wasm_reloc_size()->Increment(
- wasm_code->reloc_info().length());
+ wasm::WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
+ auto kind = compiler::WasmImportCallKind::kWasmToCapi;
+ wasm::WasmCode* wasm_code = cache->MaybeGet(kind, &sig, param_count);
+ if (wasm_code == nullptr) {
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmImportWrapperCache::ModificationScope cache_scope(cache);
+ wasm_code = compiler::CompileWasmCapiCallWrapper(native_module, &sig);
+ wasm::WasmImportWrapperCache::CacheKey key(kind, &sig, param_count);
+ cache_scope[key] = wasm_code;
+ wasm_code->IncRef();
+ isolate->counters()->wasm_generated_code_size()->Increment(
+ wasm_code->instructions().length());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ wasm_code->reloc_info().length());
+ }
+ // There is a cached tuple on the {capi_function}, but it is instance-
+ // independent, so we prefer to allocate a fresh tuple here.
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
instance, capi_function, AllocationType::kOld);
// Note that {SignatureMap::Find} may return {-1} if the signature is
@@ -773,7 +783,8 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
bool is_wasm_module = instance->module()->origin == wasm::kWasmOrigin;
bool use_trap_handler =
- instance->module_object().native_module()->use_trap_handler();
+ instance->module_object().native_module()->bounds_checks() ==
+ wasm::kTrapHandler;
// Wasm modules compiled to use the trap handler don't have bounds checks,
// so they must have a memory that has guard regions.
CHECK_IMPLIES(is_wasm_module && use_trap_handler,
@@ -877,7 +888,7 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
? Handle<WeakArrayList>(memory->instances(), isolate)
: handle(ReadOnlyRoots(isolate->heap()).empty_weak_array_list(),
isolate);
- Handle<WeakArrayList> new_instances = WeakArrayList::AddToEnd(
+ Handle<WeakArrayList> new_instances = WeakArrayList::Append(
isolate, old_instances, MaybeObjectHandle::Weak(instance));
memory->set_instances(*new_instances);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate);
@@ -913,31 +924,43 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// therefore this memory cannot be grown.
if (old_buffer->is_asmjs_memory()) return -1;
- // Checks for maximum memory size.
- uint32_t maximum_pages = wasm::max_mem_pages();
- if (memory_object->has_maximum_pages()) {
- maximum_pages = std::min(
- maximum_pages, static_cast<uint32_t>(memory_object->maximum_pages()));
- }
+ std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
+ if (!backing_store) return -1;
+
+ // Check for maximum memory size.
+ // Note: The {wasm::max_mem_pages()} limit is already checked in
+ // {BackingStore::CopyWasmMemory}, and is irrelevant for
+ // {GrowWasmMemoryInPlace} because memory is never allocated with more
+ // capacity than that limit.
size_t old_size = old_buffer->byte_length();
DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
size_t old_pages = old_size / wasm::kWasmPageSize;
- CHECK_GE(wasm::max_mem_pages(), old_pages);
- if (pages > maximum_pages - old_pages) return -1;
- std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
- if (!backing_store) return -1;
+ uint32_t max_pages = wasm::kSpecMaxMemoryPages;
+ if (memory_object->has_maximum_pages()) {
+ DCHECK_GE(max_pages, memory_object->maximum_pages());
+ max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
+ }
+ DCHECK_GE(max_pages, old_pages);
+ if (pages > max_pages - old_pages) return -1;
- // Try to handle shared memory first.
+ base::Optional<size_t> result_inplace =
+ backing_store->GrowWasmMemoryInPlace(isolate, pages, max_pages);
+ // Handle shared memory first.
if (old_buffer->is_shared()) {
- base::Optional<size_t> result =
- backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
// Shared memories can only be grown in place; no copying.
- if (!result.has_value()) return -1;
+ if (!result_inplace.has_value()) {
+ // There are different limits per platform, thus crash if the correctness
+ // fuzzer is running.
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("could not grow wasm memory");
+ }
+ return -1;
+ }
BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store);
// Broadcasting the update should update this memory object too.
CHECK_NE(*old_buffer, memory_object->array_buffer());
- size_t new_pages = result.value() + pages;
+ size_t new_pages = result_inplace.value() + pages;
// If the allocation succeeded, then this can't possibly overflow:
size_t new_byte_length = new_pages * wasm::kWasmPageSize;
// This is a less than check, as it is not guaranteed that the SAB
@@ -949,13 +972,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// As {old_pages} was read racefully, we return here the synchronized
// value provided by {GrowWasmMemoryInPlace}, to provide the atomic
// read-modify-write behavior required by the spec.
- return static_cast<int32_t>(result.value()); // success
+ return static_cast<int32_t>(result_inplace.value()); // success
}
- base::Optional<size_t> result =
- backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
- // Try to grow non-shared memory in-place.
- if (result.has_value()) {
+ // Check if the non-shared memory could grow in-place.
+ if (result_inplace.has_value()) {
// Detach old and create a new one with the grown backing store.
old_buffer->Detach(true);
Handle<JSArrayBuffer> new_buffer =
@@ -966,11 +987,12 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<Symbol> symbol =
isolate->factory()->array_buffer_wasm_memory_symbol();
JSObject::SetProperty(isolate, new_buffer, symbol, memory_object).Check();
- DCHECK_EQ(result.value(), old_pages);
- return static_cast<int32_t>(result.value()); // success
+ DCHECK_EQ(result_inplace.value(), old_pages);
+ return static_cast<int32_t>(result_inplace.value()); // success
}
size_t new_pages = old_pages + pages;
+ DCHECK_LT(old_pages, new_pages);
// Try allocating a new backing store and copying.
std::unique_ptr<BackingStore> new_backing_store =
backing_store->CopyWasmMemory(isolate, new_pages);
@@ -1292,11 +1314,10 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
// Insert the new instance into the scripts weak list of instances. This list
// is used for breakpoints affecting all instances belonging to the script.
- // TODO(wasm): Allow to reuse holes in the {WeakArrayList} below.
if (module_object->script().type() == Script::TYPE_WASM) {
Handle<WeakArrayList> weak_instance_list(
module_object->script().wasm_weak_instance_list(), isolate);
- weak_instance_list = WeakArrayList::AddToEnd(
+ weak_instance_list = WeakArrayList::Append(
isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
module_object->script().set_wasm_weak_instance_list(*weak_instance_list);
}
@@ -1462,7 +1483,7 @@ WasmInstanceObject::GetOrCreateWasmExternalFunction(
// later use.
wrapper = wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
isolate, function.sig, instance->module(), function.imported);
- module_object->export_wrappers().set(wrapper_index, *wrapper);
+ module_object->export_wrappers().set(wrapper_index, ToCodeT(*wrapper));
}
result = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
isolate, instance, function_index,
@@ -1523,7 +1544,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
.internal_formal_parameter_count();
}
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- isolate->wasm_engine(), &env, kind, sig, false, expected_arity);
+ &env, kind, sig, false, expected_arity);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -1593,7 +1614,7 @@ wasm::WasmValue WasmInstanceObject::GetGlobalValue(
switch (global.type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
case wasm::valuetype: \
- return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(ptr));
+ return wasm::WasmValue(base::ReadUnalignedValue<ctype>(ptr));
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
#undef CASE_TYPE
default:
@@ -1609,7 +1630,7 @@ wasm::WasmValue WasmStruct::GetFieldValue(uint32_t index) {
switch (field_type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
case wasm::valuetype: \
- return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(field_address));
+ return wasm::WasmValue(base::ReadUnalignedValue<ctype>(field_address));
CASE_TYPE(kI8, int8_t)
CASE_TYPE(kI16, int16_t)
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
@@ -1639,7 +1660,7 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) {
switch (element_type.kind()) {
#define CASE_TYPE(value_type, ctype) \
case wasm::value_type: \
- return wasm::WasmValue(base::ReadLittleEndianValue<ctype>(element_address));
+ return wasm::WasmValue(base::ReadUnalignedValue<ctype>(element_address));
CASE_TYPE(kI8, int8_t)
CASE_TYPE(kI16, int16_t)
FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
@@ -1660,6 +1681,18 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) {
}
}
+ObjectSlot WasmArray::ElementSlot(uint32_t index) {
+ DCHECK_LE(index, length());
+ DCHECK(type()->element_type().is_reference());
+ return RawField(kHeaderSize + kTaggedSize * index);
+}
+
+Address WasmArray::ElementAddress(uint32_t index) {
+ DCHECK_LE(index, length());
+ return ptr() + WasmArray::kHeaderSize +
+ index * type()->element_type().element_size_bytes() - kHeapObjectTag;
+}
+
// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
@@ -1826,8 +1859,9 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
if (!object.IsJSFunction()) return false;
JSFunction js_function = JSFunction::cast(object);
- if (CodeKind::JS_TO_WASM_FUNCTION != js_function.code().kind() &&
- js_function.code().builtin_index() != Builtins::kGenericJSToWasmWrapper) {
+ Code code = js_function.code();
+ if (CodeKind::JS_TO_WASM_FUNCTION != code.kind() &&
+ code.builtin_id() != Builtin::kGenericJSToWasmWrapper) {
return false;
}
DCHECK(js_function.shared().HasWasmExportedFunctionData());
@@ -1853,11 +1887,16 @@ Handle<WasmCapiFunction> WasmCapiFunction::New(
// TODO(jkummerow): Install a JavaScript wrapper. For now, calling
// these functions directly is unsupported; they can only be called
// from Wasm code.
+
+ // To support simulator builds, we potentially have to redirect the
+ // call target (which is an address pointing into the C++ binary).
+ call_target = ExternalReference::Create(call_target).address();
+
Handle<WasmCapiFunctionData> fun_data =
isolate->factory()->NewWasmCapiFunctionData(
call_target, embedder_data,
- isolate->builtins()->builtin_handle(Builtins::kIllegal),
- serialized_signature, AllocationType::kOld);
+ isolate->builtins()->code_handle(Builtin::kIllegal),
+ serialized_signature);
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
return Handle<WasmCapiFunction>::cast(
@@ -1876,10 +1915,9 @@ int WasmExportedFunction::function_index() {
Handle<WasmExportedFunction> WasmExportedFunction::New(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
int arity, Handle<Code> export_wrapper) {
- DCHECK(
- CodeKind::JS_TO_WASM_FUNCTION == export_wrapper->kind() ||
- (export_wrapper->is_builtin() &&
- export_wrapper->builtin_index() == Builtins::kGenericJSToWasmWrapper));
+ DCHECK(CodeKind::JS_TO_WASM_FUNCTION == export_wrapper->kind() ||
+ (export_wrapper->is_builtin() &&
+ export_wrapper->builtin_id() == Builtin::kGenericJSToWasmWrapper));
int num_imported_functions = instance->module()->num_imported_functions;
Handle<Object> ref =
func_index >= num_imported_functions
@@ -1904,11 +1942,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
}
Handle<String> name;
if (!maybe_name.ToHandle(&name)) {
- EmbeddedVector<char, 16> buffer;
+ base::EmbeddedVector<char, 16> buffer;
int length = SNPrintF(buffer, "%d", func_index);
name = factory
->NewStringFromOneByte(
- Vector<uint8_t>::cast(buffer.SubVector(0, length)))
+ base::Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
}
Handle<Map> function_map;
@@ -1983,7 +2021,7 @@ std::unique_ptr<char[]> WasmExportedFunction::GetDebugName(
constexpr const char kPrefix[] = "js-to-wasm:";
// prefix + parameters + delimiter + returns + zero byte
size_t len = strlen(kPrefix) + sig->all().size() + 2;
- auto buffer = OwnedVector<char>::New(len);
+ auto buffer = base::OwnedVector<char>::New(len);
memcpy(buffer.start(), kPrefix, strlen(kPrefix));
PrintSignature(buffer.as_vector() + strlen(kPrefix), sig);
return buffer.ReleaseData();
@@ -2097,10 +2135,6 @@ bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
return function_data.serialized_signature().matches(expected, sig_size);
}
-Address WasmCapiFunction::GetHostCallTarget() const {
- return shared().wasm_capi_function_data().call_target();
-}
-
PodArray<wasm::ValueType> WasmCapiFunction::GetSerializedSignature() const {
return shared().wasm_capi_function_data().serialized_signature();
}
@@ -2172,18 +2206,20 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
case HeapType::kI31: {
// TODO(7748): Change this when we have a decision on the JS API for
// structs/arrays.
- Handle<Name> key = isolate->factory()->wasm_wrapped_object_symbol();
- LookupIterator it(isolate, value, key,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() != LookupIterator::DATA) {
- *error_message =
- "eqref/dataref/i31ref object must be null (if nullable) or "
- "wrapped with the wasm object wrapper";
- return false;
+ if (!FLAG_wasm_gc_js_interop) {
+ Handle<Name> key = isolate->factory()->wasm_wrapped_object_symbol();
+ LookupIterator it(isolate, value, key,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.state() != LookupIterator::DATA) {
+ *error_message =
+ "eqref/dataref/i31ref object must be null (if nullable) or "
+ "wrapped with the wasm object wrapper";
+ return false;
+ }
+ value = it.GetDataValue();
}
if (expected.is_reference_to(HeapType::kEq)) return true;
- Handle<Object> value = it.GetDataValue();
if (expected.is_reference_to(HeapType::kData)) {
if (value->IsSmi()) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 473c4725cc..1969a3a478 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -17,6 +17,7 @@
#include "src/debug/debug.h"
#include "src/heap/heap.h"
#include "src/objects/js-function.h"
+#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/value-type.h"
@@ -124,13 +125,9 @@ class ImportedFunctionEntry {
enum InternalizeString : bool { kInternalize = true, kNoInternalize = false };
// Representation of a WebAssembly.Module JavaScript-level object.
-class WasmModuleObject : public JSObject {
+class WasmModuleObject
+ : public TorqueGeneratedWasmModuleObject<WasmModuleObject, JSObject> {
public:
- DECL_CAST(WasmModuleObject)
-
- DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
- DECL_ACCESSORS(export_wrappers, FixedArray)
- DECL_ACCESSORS(script, Script)
inline wasm::NativeModule* native_module() const;
inline const std::shared_ptr<wasm::NativeModule>& shared_native_module()
const;
@@ -138,10 +135,6 @@ class WasmModuleObject : public JSObject {
// Dispatched behavior.
DECL_PRINTER(WasmModuleObject)
- DECL_VERIFIER(WasmModuleObject)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_WASM_MODULE_OBJECT_FIELDS)
// Creates a new {WasmModuleObject} for an existing {NativeModule} that is
// reference counted and might be shared between multiple Isolates.
@@ -170,7 +163,7 @@ class WasmModuleObject : public JSObject {
// given index.
// Meant to be used for debugging or frame printing.
// Does not allocate, hence gc-safe.
- Vector<const uint8_t> GetRawFunctionName(int func_index);
+ base::Vector<const uint8_t> GetRawFunctionName(int func_index);
// Extract a portion of the wire bytes as UTF-8 string, optionally
// internalized. (Prefer to internalize early if the string will be used for a
@@ -179,52 +172,33 @@ class WasmModuleObject : public JSObject {
Isolate*, Handle<WasmModuleObject>, wasm::WireBytesRef,
InternalizeString);
static Handle<String> ExtractUtf8StringFromModuleBytes(
- Isolate*, Vector<const uint8_t> wire_byte, wasm::WireBytesRef,
+ Isolate*, base::Vector<const uint8_t> wire_byte, wasm::WireBytesRef,
InternalizeString);
- OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(WasmModuleObject)
};
// Representation of a WebAssembly.Table JavaScript-level object.
-class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
+class WasmTableObject
+ : public TorqueGeneratedWasmTableObject<WasmTableObject, JSObject> {
public:
- DECL_CAST(WasmTableObject)
-
- // The instance in which this WasmTableObject is defined.
- // This field is undefined if the global is defined outside any Wasm module,
- // i.e., through the JS API (WebAssembly.Table).
- // Because it might be undefined, we declare it as a HeapObject.
- DECL_ACCESSORS(instance, HeapObject)
- // The entries array is at least as big as {current_length()}, but might be
- // bigger to make future growth more efficient.
- DECL_ACCESSORS(entries, FixedArray)
- DECL_INT_ACCESSORS(current_length)
- // TODO(titzer): introduce DECL_I64_ACCESSORS macro
- DECL_ACCESSORS(maximum_length, Object)
- DECL_ACCESSORS(dispatch_tables, FixedArray)
- DECL_INT_ACCESSORS(raw_type)
-
// Dispatched behavior.
DECL_PRINTER(WasmTableObject)
- DECL_VERIFIER(WasmTableObject)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_WASM_TABLE_OBJECT_FIELDS)
inline wasm::ValueType type();
- static int Grow(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t count, Handle<Object> init_value);
+ V8_EXPORT_PRIVATE static int Grow(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ uint32_t count, Handle<Object> init_value);
- static Handle<WasmTableObject> New(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- wasm::ValueType type, uint32_t initial,
- bool has_maximum, uint32_t maximum,
- Handle<FixedArray>* entries);
+ V8_EXPORT_PRIVATE static Handle<WasmTableObject> New(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ wasm::ValueType type, uint32_t initial, bool has_maximum,
+ uint32_t maximum, Handle<FixedArray>* entries);
- static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
- Handle<WasmInstanceObject> instance,
- int table_index);
+ V8_EXPORT_PRIVATE static void AddDispatchTable(
+ Isolate* isolate, Handle<WasmTableObject> table,
+ Handle<WasmInstanceObject> instance, int table_index);
static bool IsInBounds(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t entry_index);
@@ -232,14 +206,18 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static bool IsValidElement(Isolate* isolate, Handle<WasmTableObject> table,
Handle<Object> entry);
- static void Set(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t index, Handle<Object> entry);
+ V8_EXPORT_PRIVATE static void Set(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ uint32_t index, Handle<Object> entry);
- static Handle<Object> Get(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t index);
+ V8_EXPORT_PRIVATE static Handle<Object> Get(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ uint32_t index);
- static void Fill(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t start, Handle<Object> entry, uint32_t count);
+ V8_EXPORT_PRIVATE static void Fill(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ uint32_t start, Handle<Object> entry,
+ uint32_t count);
// TODO(wasm): Unify these three methods into one.
static void UpdateDispatchTables(Isolate* isolate,
@@ -260,11 +238,9 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table, int index);
- static void SetFunctionTablePlaceholder(Isolate* isolate,
- Handle<WasmTableObject> table,
- int entry_index,
- Handle<WasmInstanceObject> instance,
- int func_index);
+ V8_EXPORT_PRIVATE static void SetFunctionTablePlaceholder(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ Handle<WasmInstanceObject> instance, int func_index);
// This function reads the content of a function table entry and returns it
// through the out parameters {is_valid}, {is_null}, {instance},
@@ -281,24 +257,17 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
Handle<FixedArray> entries, int entry_index,
Handle<Object> entry);
- OBJECT_CONSTRUCTORS(WasmTableObject, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(WasmTableObject)
};
// Representation of a WebAssembly.Memory JavaScript-level object.
-class WasmMemoryObject : public JSObject {
+class WasmMemoryObject
+ : public TorqueGeneratedWasmMemoryObject<WasmMemoryObject, JSObject> {
public:
- DECL_CAST(WasmMemoryObject)
-
- DECL_ACCESSORS(array_buffer, JSArrayBuffer)
- DECL_INT_ACCESSORS(maximum_pages)
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
// Dispatched behavior.
DECL_PRINTER(WasmMemoryObject)
- DECL_VERIFIER(WasmMemoryObject)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_WASM_MEMORY_OBJECT_FIELDS)
// Add an instance to the internal (weak) list.
V8_EXPORT_PRIVATE static void AddInstance(Isolate* isolate,
@@ -321,34 +290,19 @@ class WasmMemoryObject : public JSObject {
V8_EXPORT_PRIVATE static int32_t Grow(Isolate*, Handle<WasmMemoryObject>,
uint32_t pages);
- OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(WasmMemoryObject)
};
// Representation of a WebAssembly.Global JavaScript-level object.
-class WasmGlobalObject : public JSObject {
+class WasmGlobalObject
+ : public TorqueGeneratedWasmGlobalObject<WasmGlobalObject, JSObject> {
public:
- DECL_CAST(WasmGlobalObject)
-
- // The instance in which this WasmGlobalObject is defined.
- // This field is undefined if the global is defined outside any Wasm module,
- // i.e., through the JS API (WebAssembly.Global).
- // Because it might be undefined, we declare it as a HeapObject.
- DECL_ACCESSORS(instance, HeapObject)
DECL_ACCESSORS(untagged_buffer, JSArrayBuffer)
DECL_ACCESSORS(tagged_buffer, FixedArray)
- DECL_INT32_ACCESSORS(offset)
- DECL_INT_ACCESSORS(raw_type)
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
- // TODO(7748): If we encode mutability in raw_type, turn this into a boolean
- // accessor.
- DECL_INT_ACCESSORS(is_mutable)
// Dispatched behavior.
DECL_PRINTER(WasmGlobalObject)
- DECL_VERIFIER(WasmGlobalObject)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FIELDS)
V8_EXPORT_PRIVATE static MaybeHandle<WasmGlobalObject> New(
Isolate* isolate, Handle<WasmInstanceObject> instance,
@@ -377,7 +331,7 @@ class WasmGlobalObject : public JSObject {
// not have a fixed address.
inline Address address() const;
- OBJECT_CONSTRUCTORS(WasmGlobalObject, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(WasmGlobalObject)
};
// Representation of a WebAssembly.Instance JavaScript-level object.
@@ -587,19 +541,11 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
};
// Representation of WebAssembly.Exception JavaScript-level object.
-class WasmExceptionObject : public JSObject {
+class WasmExceptionObject
+ : public TorqueGeneratedWasmExceptionObject<WasmExceptionObject, JSObject> {
public:
- DECL_CAST(WasmExceptionObject)
-
- DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
- DECL_ACCESSORS(exception_tag, HeapObject)
-
// Dispatched behavior.
DECL_PRINTER(WasmExceptionObject)
- DECL_VERIFIER(WasmExceptionObject)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_WASM_EXCEPTION_OBJECT_FIELDS)
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this exception object.
@@ -609,7 +555,7 @@ class WasmExceptionObject : public JSObject {
const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag);
- OBJECT_CONSTRUCTORS(WasmExceptionObject, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(WasmExceptionObject)
};
// A Wasm exception that has been thrown out of Wasm code.
@@ -690,7 +636,6 @@ class WasmCapiFunction : public JSFunction {
Isolate* isolate, Address call_target, Handle<Foreign> embedder_data,
Handle<PodArray<wasm::ValueType>> serialized_signature);
- Address GetHostCallTarget() const;
PodArray<wasm::ValueType> GetSerializedSignature() const;
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this C-API function object.
@@ -713,40 +658,33 @@ class WasmExternalFunction : public JSFunction {
OBJECT_CONSTRUCTORS(WasmExternalFunction, JSFunction);
};
-class WasmIndirectFunctionTable : public Struct {
+class WasmIndirectFunctionTable
+ : public TorqueGeneratedWasmIndirectFunctionTable<WasmIndirectFunctionTable,
+ Struct> {
public:
- DECL_PRIMITIVE_ACCESSORS(size, uint32_t)
DECL_PRIMITIVE_ACCESSORS(sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(targets, Address*)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
- DECL_ACCESSORS(refs, FixedArray)
V8_EXPORT_PRIVATE static Handle<WasmIndirectFunctionTable> New(
Isolate* isolate, uint32_t size);
static void Resize(Isolate* isolate, Handle<WasmIndirectFunctionTable> table,
uint32_t new_size);
- DECL_CAST(WasmIndirectFunctionTable)
-
DECL_PRINTER(WasmIndirectFunctionTable)
- DECL_VERIFIER(WasmIndirectFunctionTable)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(
- HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_INDIRECT_FUNCTION_TABLE_FIELDS)
STATIC_ASSERT(kStartOfStrongFieldsOffset == kManagedNativeAllocationsOffset);
using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
- OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
+ TQ_OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable)
};
class WasmFunctionData
: public TorqueGeneratedWasmFunctionData<WasmFunctionData, Foreign> {
public:
DECL_ACCESSORS(ref, Object)
+ DECL_ACCESSORS(wrapper_code, Code)
- DECL_CAST(WasmFunctionData)
DECL_PRINTER(WasmFunctionData)
TQ_OBJECT_CONSTRUCTORS(WasmFunctionData)
@@ -755,61 +693,50 @@ class WasmFunctionData
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
-class WasmExportedFunctionData : public WasmFunctionData {
+class WasmExportedFunctionData
+ : public TorqueGeneratedWasmExportedFunctionData<WasmExportedFunctionData,
+ WasmFunctionData> {
public:
- DECL_ACCESSORS(wrapper_code, Code)
- // This is the instance that exported the function (which in case of
- // imported and re-exported functions is different from the instance
- // where the function is defined -- for the latter see WasmFunctionData::ref).
- DECL_ACCESSORS(instance, WasmInstanceObject)
- DECL_INT_ACCESSORS(function_index)
- DECL_ACCESSORS(signature, Foreign)
- DECL_INT_ACCESSORS(wrapper_budget)
- DECL_ACCESSORS(c_wrapper_code, Object)
- DECL_INT_ACCESSORS(packed_args_size)
-
inline wasm::FunctionSig* sig() const;
- DECL_CAST(WasmExportedFunctionData)
-
// Dispatched behavior.
DECL_PRINTER(WasmExportedFunctionData)
DECL_VERIFIER(WasmExportedFunctionData)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- WasmFunctionData::kSize,
- TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS)
-
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(WasmExportedFunctionData, WasmFunctionData);
+ TQ_OBJECT_CONSTRUCTORS(WasmExportedFunctionData)
};
// Information for a WasmJSFunction which is referenced as the function data of
// the SharedFunctionInfo underlying the function. For details please see the
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
-class WasmJSFunctionData : public WasmFunctionData {
+class WasmJSFunctionData
+ : public TorqueGeneratedWasmJSFunctionData<WasmJSFunctionData,
+ WasmFunctionData> {
public:
- DECL_INT_ACCESSORS(serialized_return_count)
- DECL_INT_ACCESSORS(serialized_parameter_count)
- DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
- DECL_ACCESSORS(wrapper_code, Code)
DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
- DECL_CAST(WasmJSFunctionData)
-
// Dispatched behavior.
DECL_PRINTER(WasmJSFunctionData)
- DECL_VERIFIER(WasmJSFunctionData)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(WasmFunctionData::kSize,
- TORQUE_GENERATED_WASM_JS_FUNCTION_DATA_FIELDS)
+ class BodyDescriptor;
+
+ private:
+ DECL_ACCESSORS(raw_wasm_to_js_wrapper_code, CodeT)
+
+ TQ_OBJECT_CONSTRUCTORS(WasmJSFunctionData)
+};
+
+class WasmCapiFunctionData
+ : public TorqueGeneratedWasmCapiFunctionData<WasmCapiFunctionData,
+ WasmFunctionData> {
+ public:
+ DECL_PRINTER(WasmCapiFunctionData)
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(WasmJSFunctionData, WasmFunctionData);
+ TQ_OBJECT_CONSTRUCTORS(WasmCapiFunctionData)
};
class WasmScript : public AllStatic {
@@ -827,6 +754,12 @@ class WasmScript : public AllStatic {
V8_EXPORT_PRIVATE static bool SetBreakPoint(Handle<Script>, int* position,
Handle<BreakPoint> break_point);
+ // Set an "on entry" breakpoint (a.k.a. instrumentation breakpoint) inside
+ // the given module. This will affect all live and future instances of the
+ // module.
+ V8_EXPORT_PRIVATE static void SetBreakPointOnEntry(
+ Handle<Script>, Handle<BreakPoint> break_point);
+
// Set a breakpoint on first breakable position of the given function index
// inside the given module. This will affect all live and future instances of
// the module.
@@ -887,31 +820,21 @@ class WasmExceptionTag
// Data annotated to the asm.js Module function. Used for later instantiation of
// that function.
-class AsmWasmData : public Struct {
+class AsmWasmData : public TorqueGeneratedAsmWasmData<AsmWasmData, Struct> {
public:
static Handle<AsmWasmData> New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<FixedArray> export_wrappers, Handle<HeapNumber> uses_bitset);
- DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
- DECL_ACCESSORS(export_wrappers, FixedArray)
- DECL_ACCESSORS(uses_bitset, HeapNumber)
-
- DECL_CAST(AsmWasmData)
DECL_PRINTER(AsmWasmData)
- DECL_VERIFIER(AsmWasmData)
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_ASM_WASM_DATA_FIELDS)
-
- OBJECT_CONSTRUCTORS(AsmWasmData, Struct);
+ TQ_OBJECT_CONSTRUCTORS(AsmWasmData)
};
class WasmTypeInfo : public TorqueGeneratedWasmTypeInfo<WasmTypeInfo, Foreign> {
public:
inline void clear_foreign_address(Isolate* isolate);
- DECL_CAST(WasmTypeInfo)
DECL_PRINTER(WasmTypeInfo)
class BodyDescriptor;
@@ -919,7 +842,32 @@ class WasmTypeInfo : public TorqueGeneratedWasmTypeInfo<WasmTypeInfo, Foreign> {
TQ_OBJECT_CONSTRUCTORS(WasmTypeInfo)
};
-class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
+class WasmObject : public TorqueGeneratedWasmObject<WasmObject, JSReceiver> {
+ public:
+ // Prepares given value for being stored into a field of given Wasm type.
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToWasmValue(
+ Isolate* isolate, wasm::ValueType type, Handle<Object> value);
+
+ protected:
+ // Returns boxed value of the object's field/element with given type and
+ // offset.
+ static inline Handle<Object> ReadValueAt(Isolate* isolate,
+ Handle<HeapObject> obj,
+ wasm::ValueType type,
+ uint32_t offset);
+
+ static inline void WriteValueAt(Isolate* isolate, Handle<HeapObject> obj,
+ wasm::ValueType type, uint32_t offset,
+ Handle<Object> value);
+
+ private:
+ template <typename ElementType>
+ static ElementType FromNumber(Object value);
+
+ TQ_OBJECT_CONSTRUCTORS(WasmObject)
+};
+
+class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, WasmObject> {
public:
static inline wasm::StructType* type(Map map);
inline wasm::StructType* type() const;
@@ -927,11 +875,22 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
static inline int Size(const wasm::StructType* type);
static inline int GcSafeSize(Map map);
+ // Returns the address of the field at given offset.
+ inline Address RawFieldAddress(int raw_offset);
+
+ // Returns the ObjectSlot for tagged value at given offset.
inline ObjectSlot RawField(int raw_offset);
wasm::WasmValue GetFieldValue(uint32_t field_index);
- DECL_CAST(WasmStruct)
+ // Returns boxed value of the object's field.
+ static inline Handle<Object> GetField(Isolate* isolate,
+ Handle<WasmStruct> obj,
+ uint32_t field_index);
+
+ static inline void SetField(Isolate* isolate, Handle<WasmStruct> obj,
+ uint32_t field_index, Handle<Object> value);
+
DECL_PRINTER(WasmStruct)
class BodyDescriptor;
@@ -939,18 +898,28 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
TQ_OBJECT_CONSTRUCTORS(WasmStruct)
};
-class WasmArray : public TorqueGeneratedWasmArray<WasmArray, HeapObject> {
+class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
public:
static inline wasm::ArrayType* type(Map map);
inline wasm::ArrayType* type() const;
static inline wasm::ArrayType* GcSafeType(Map map);
+ // Get the {ObjectSlot} corresponding to the element at {index}. Requires that
+ // this is a reference array.
+ ObjectSlot ElementSlot(uint32_t index);
wasm::WasmValue GetElement(uint32_t index);
static inline int SizeFor(Map map, int length);
static inline int GcSafeSizeFor(Map map, int length);
- DECL_CAST(WasmArray)
+ // Returns boxed value of the array's element.
+ static inline Handle<Object> GetElement(Isolate* isolate,
+ Handle<WasmArray> array,
+ uint32_t index);
+
+ // Returns the Address of the element at {index}.
+ Address ElementAddress(uint32_t index);
+
DECL_PRINTER(WasmArray)
class BodyDescriptor;
@@ -968,7 +937,7 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
int array_index, MaybeHandle<Map> rtt_parent);
Handle<Map> AllocateSubRtt(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t type,
- Handle<Map> parent);
+ Handle<Map> parent, WasmRttSubMode mode);
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
Handle<Object> value, ValueType expected,
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index 13911e590d..cc66d1ebc0 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -21,37 +21,43 @@ extern class WasmFunctionData extends Foreign {
// For imported functions, this value equals the respective entry in
// the module's imported_function_refs array.
ref: WasmInstanceObject|Tuple2;
+ // Used for calling this function from JavaScript.
+ @if(V8_EXTERNAL_CODE_SPACE) wrapper_code: CodeDataContainer;
+ @ifnot(V8_EXTERNAL_CODE_SPACE) wrapper_code: Code;
}
+@generateCppClass
extern class WasmExportedFunctionData extends WasmFunctionData {
- wrapper_code: Code;
+ // This is the instance that exported the function (which in case of
+ // imported and re-exported functions is different from the instance
+ // where the function is defined -- for the latter see WasmFunctionData::ref).
instance: WasmInstanceObject;
function_index: Smi;
signature: Foreign;
wrapper_budget: Smi;
// The remaining fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
- c_wrapper_code: Object;
+ @if(V8_EXTERNAL_CODE_SPACE) c_wrapper_code: CodeDataContainer;
+ @ifnot(V8_EXTERNAL_CODE_SPACE) c_wrapper_code: Code;
packed_args_size: Smi;
}
+@generateCppClass
extern class WasmJSFunctionData extends WasmFunctionData {
- wrapper_code: Code;
- wasm_to_js_wrapper_code: Code;
+ @if(V8_EXTERNAL_CODE_SPACE) wasm_to_js_wrapper_code: CodeDataContainer;
+ @ifnot(V8_EXTERNAL_CODE_SPACE) wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
serialized_parameter_count: Smi;
serialized_signature: PodArrayOfWasmValueType;
}
-// TODO(jkummerow): Derive from WasmFunctionData.
-@export
-class WasmCapiFunctionData extends HeapObject {
- call_target: RawPtr;
+@generateCppClass
+extern class WasmCapiFunctionData extends WasmFunctionData {
embedder_data: Foreign; // Managed<wasm::FuncData>
- wrapper_code: Code;
serialized_signature: PodArrayOfWasmValueType;
}
+@generateCppClass
extern class WasmIndirectFunctionTable extends Struct {
size: uint32;
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@@ -70,14 +76,22 @@ extern class WasmExceptionTag extends Struct {
index: Smi;
}
+@generateCppClass
extern class WasmModuleObject extends JSObject {
- native_module: ManagedWasmNativeModule;
+ managed_native_module: ManagedWasmNativeModule;
export_wrappers: FixedArray;
script: Script;
}
+@generateCppClass
extern class WasmTableObject extends JSObject {
+ // The instance in which this WasmTableObject is defined.
+ // This field is undefined if the global is defined outside any Wasm module,
+ // i.e., through the JS API (WebAssembly.Table).
+ // Because it might be undefined, we declare it as a HeapObject.
instance: WasmInstanceObject|Undefined;
+ // The entries array is at least as big as {current_length()}, but might be
+ // bigger to make future growth more efficient.
entries: FixedArray;
current_length: Smi;
maximum_length: Smi|HeapNumber|Undefined;
@@ -85,21 +99,30 @@ extern class WasmTableObject extends JSObject {
raw_type: Smi;
}
+@generateCppClass
extern class WasmMemoryObject extends JSObject {
array_buffer: JSArrayBuffer;
maximum_pages: Smi;
instances: WeakArrayList|Undefined;
}
+@generateCppClass
extern class WasmGlobalObject extends JSObject {
+ // The instance in which this WasmGlobalObject is defined.
+ // This field is undefined if the global is defined outside any Wasm module,
+ // i.e., through the JS API (WebAssembly.Global).
+ // Because it might be undefined, we declare it as a HeapObject.
instance: WasmInstanceObject|Undefined;
untagged_buffer: JSArrayBuffer|Undefined;
tagged_buffer: FixedArray|Undefined;
offset: Smi;
raw_type: Smi;
+ // TODO(7748): If we encode mutability in raw_type, turn this into a boolean
+ // accessor.
is_mutable: Smi;
}
+@generateCppClass
extern class WasmExceptionObject extends JSObject {
serialized_signature: PodArrayOfWasmValueType;
exception_tag: HeapObject;
@@ -107,6 +130,7 @@ extern class WasmExceptionObject extends JSObject {
type WasmExportedFunction extends JSFunction;
+@generateCppClass
extern class AsmWasmData extends Struct {
managed_native_module: ManagedWasmNativeModule;
export_wrappers: FixedArray;
@@ -121,12 +145,20 @@ extern class WasmTypeInfo extends Foreign {
instance_size: Smi;
}
+// WasmObject corresponds to data ref types which are WasmStruct and WasmArray.
+@abstract
+@generateCppClass
+extern class WasmObject extends JSReceiver {
+}
+
@generateCppClass
-extern class WasmStruct extends HeapObject {
+@highestInstanceTypeWithinParentClassRange
+extern class WasmStruct extends WasmObject {
}
@generateCppClass
-extern class WasmArray extends HeapObject {
+@lowestInstanceTypeWithinParentClassRange
+extern class WasmArray extends WasmObject {
length: uint32;
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index bc14a4adef..550d7f4671 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -187,6 +187,8 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_INT_OP(StoreMem16, "store16")
CASE_I64_OP(StoreMem32, "store32")
CASE_S128_OP(StoreMem, "store128")
+ CASE_OP(RefEq, "ref.eq")
+ CASE_OP(Let, "let")
// Exception handling opcodes.
CASE_OP(Try, "try")
@@ -195,7 +197,6 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(Throw, "throw")
CASE_OP(Rethrow, "rethrow")
CASE_OP(CatchAll, "catch-all")
- CASE_OP(Unwind, "unwind")
// asm.js-only opcodes.
CASE_F64_OP(Acos, "acos")
@@ -391,13 +392,16 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(ArrayGet, "array.get")
CASE_OP(ArrayGetS, "array.get_s")
CASE_OP(ArrayGetU, "array.get_u")
- CASE_OP(ArrayLen, "array.len")
CASE_OP(ArraySet, "array.set")
+ CASE_OP(ArrayLen, "array.len")
+ CASE_OP(ArrayCopy, "array.copy")
+ CASE_OP(ArrayInit, "array.init")
CASE_OP(I31New, "i31.new")
CASE_OP(I31GetS, "i31.get_s")
CASE_OP(I31GetU, "i31.get_u")
CASE_OP(RttCanon, "rtt.canon")
CASE_OP(RttSub, "rtt.sub")
+ CASE_OP(RttFreshSub, "rtt.fresh_sub")
CASE_OP(RefTest, "ref.test")
CASE_OP(RefCast, "ref.cast")
CASE_OP(BrOnCast, "br_on_cast")
@@ -411,8 +415,9 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(BrOnFunc, "br_on_func")
CASE_OP(BrOnData, "br_on_data")
CASE_OP(BrOnI31, "br_on_i31")
- CASE_OP(RefEq, "ref.eq")
- CASE_OP(Let, "let")
+ CASE_OP(BrOnNonFunc, "br_on_non_func")
+ CASE_OP(BrOnNonData, "br_on_non_data")
+ CASE_OP(BrOnNonI31, "br_on_non_i31")
case kNumericPrefix:
case kSimdPrefix:
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 5de6892124..50e813ad02 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -42,7 +42,6 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(Catch, 0x07, _ /* eh_prototype */) \
V(Throw, 0x08, _ /* eh_prototype */) \
V(Rethrow, 0x09, _ /* eh_prototype */) \
- V(Unwind, 0x0a, _ /* eh_prototype */) \
V(End, 0x0b, _) \
V(Br, 0x0c, _) \
V(BrIf, 0x0d, _) \
@@ -649,38 +648,44 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
-#define FOREACH_GC_OPCODE(V) \
- V(StructNewWithRtt, 0xfb01, _) \
- V(StructNewDefault, 0xfb02, _) \
- V(StructGet, 0xfb03, _) \
- V(StructGetS, 0xfb04, _) \
- V(StructGetU, 0xfb05, _) \
- V(StructSet, 0xfb06, _) \
- V(ArrayNewWithRtt, 0xfb11, _) \
- V(ArrayNewDefault, 0xfb12, _) \
- V(ArrayGet, 0xfb13, _) \
- V(ArrayGetS, 0xfb14, _) \
- V(ArrayGetU, 0xfb15, _) \
- V(ArraySet, 0xfb16, _) \
- V(ArrayLen, 0xfb17, _) \
- V(I31New, 0xfb20, _) \
- V(I31GetS, 0xfb21, _) \
- V(I31GetU, 0xfb22, _) \
- V(RttCanon, 0xfb30, _) \
- V(RttSub, 0xfb31, _) \
- V(RefTest, 0xfb40, _) \
- V(RefCast, 0xfb41, _) \
- V(BrOnCast, 0xfb42, _) \
- V(BrOnCastFail, 0xfb43, _) \
- V(RefIsFunc, 0xfb50, _) \
- V(RefIsData, 0xfb51, _) \
- V(RefIsI31, 0xfb52, _) \
- V(RefAsFunc, 0xfb58, _) \
- V(RefAsData, 0xfb59, _) \
- V(RefAsI31, 0xfb5a, _) \
- V(BrOnFunc, 0xfb60, _) \
- V(BrOnData, 0xfb61, _) \
- V(BrOnI31, 0xfb62, _)
+#define FOREACH_GC_OPCODE(V) \
+ V(StructNewWithRtt, 0xfb01, _) \
+ V(StructNewDefault, 0xfb02, _) \
+ V(StructGet, 0xfb03, _) \
+ V(StructGetS, 0xfb04, _) \
+ V(StructGetU, 0xfb05, _) \
+ V(StructSet, 0xfb06, _) \
+ V(ArrayNewWithRtt, 0xfb11, _) \
+ V(ArrayNewDefault, 0xfb12, _) \
+ V(ArrayGet, 0xfb13, _) \
+ V(ArrayGetS, 0xfb14, _) \
+ V(ArrayGetU, 0xfb15, _) \
+ V(ArraySet, 0xfb16, _) \
+ V(ArrayLen, 0xfb17, _) \
+ V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
+ V(ArrayInit, 0xfb19, _) /* not standardized - V8 experimental */ \
+ V(I31New, 0xfb20, _) \
+ V(I31GetS, 0xfb21, _) \
+ V(I31GetU, 0xfb22, _) \
+ V(RttCanon, 0xfb30, _) \
+ V(RttSub, 0xfb31, _) \
+ V(RttFreshSub, 0xfb32, _) /* not standardized - V8 experimental */ \
+ V(RefTest, 0xfb40, _) \
+ V(RefCast, 0xfb41, _) \
+ V(BrOnCast, 0xfb42, _) \
+ V(BrOnCastFail, 0xfb43, _) \
+ V(RefIsFunc, 0xfb50, _) \
+ V(RefIsData, 0xfb51, _) \
+ V(RefIsI31, 0xfb52, _) \
+ V(RefAsFunc, 0xfb58, _) \
+ V(RefAsData, 0xfb59, _) \
+ V(RefAsI31, 0xfb5a, _) \
+ V(BrOnFunc, 0xfb60, _) \
+ V(BrOnData, 0xfb61, _) \
+ V(BrOnI31, 0xfb62, _) \
+ V(BrOnNonFunc, 0xfb63, _) \
+ V(BrOnNonData, 0xfb64, _) \
+ V(BrOnNonI31, 0xfb65, _)
#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
/* AtomicFence does not target a particular linear memory. */ \
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index fc1104b8d0..5f49e54ddb 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -4,13 +4,13 @@
#include "src/wasm/wasm-result.h"
+#include "src/base/platform/platform.h"
+#include "src/base/strings.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/objects/objects.h"
-#include "src/base/platform/platform.h"
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -28,9 +28,10 @@ void VPrintFToString(std::string* str, size_t str_offset, const char* format,
str->resize(len);
va_list args_copy;
va_copy(args_copy, args);
- int written = VSNPrintF(Vector<char>(&str->front() + str_offset,
- static_cast<int>(len - str_offset)),
- format, args_copy);
+ int written =
+ base::VSNPrintF(base::Vector<char>(&str->front() + str_offset,
+ static_cast<int>(len - str_offset)),
+ format, args_copy);
va_end(args_copy);
if (written < 0) continue; // not enough space.
str->resize(str_offset + written);
@@ -127,7 +128,7 @@ Handle<Object> ErrorThrower::Reify() {
break;
}
Handle<String> message = isolate_->factory()
- ->NewStringFromUtf8(VectorOf(error_msg_))
+ ->NewStringFromUtf8(base::VectorOf(error_msg_))
.ToHandleChecked();
Reset();
return isolate_->factory()->NewError(constructor, message);
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index a47e420cb1..4c60d82c1b 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -35,13 +35,13 @@ namespace {
// in Wasm, e.g. StreamProcessor and ZoneBuffer, with these.
class Writer {
public:
- explicit Writer(Vector<byte> buffer)
+ explicit Writer(base::Vector<byte> buffer)
: start_(buffer.begin()), end_(buffer.end()), pos_(buffer.begin()) {}
size_t bytes_written() const { return pos_ - start_; }
byte* current_location() const { return pos_; }
size_t current_size() const { return end_ - pos_; }
- Vector<byte> current_buffer() const {
+ base::Vector<byte> current_buffer() const {
return {current_location(), current_size()};
}
@@ -56,10 +56,10 @@ class Writer {
}
}
- void WriteVector(const Vector<const byte> v) {
+ void WriteVector(const base::Vector<const byte> v) {
DCHECK_GE(current_size(), v.size());
if (v.size() > 0) {
- base::Memcpy(current_location(), v.begin(), v.size());
+ memcpy(current_location(), v.begin(), v.size());
pos_ += v.size();
}
if (FLAG_trace_wasm_serialization) {
@@ -78,13 +78,13 @@ class Writer {
class Reader {
public:
- explicit Reader(Vector<const byte> buffer)
+ explicit Reader(base::Vector<const byte> buffer)
: start_(buffer.begin()), end_(buffer.end()), pos_(buffer.begin()) {}
size_t bytes_read() const { return pos_ - start_; }
const byte* current_location() const { return pos_; }
size_t current_size() const { return end_ - pos_; }
- Vector<const byte> current_buffer() const {
+ base::Vector<const byte> current_buffer() const {
return {current_location(), current_size()};
}
@@ -102,16 +102,16 @@ class Reader {
}
template <typename T>
- Vector<const T> ReadVector(size_t size) {
+ base::Vector<const T> ReadVector(size_t size) {
DCHECK_GE(current_size(), size);
- Vector<const byte> bytes{pos_, size * sizeof(T)};
+ base::Vector<const byte> bytes{pos_, size * sizeof(T)};
pos_ += size * sizeof(T);
if (FLAG_trace_wasm_serialization) {
StdoutStream{} << "read vector of " << size << " elements of size "
<< sizeof(T) << " (total size " << size * sizeof(T) << ")"
<< std::endl;
}
- return Vector<const T>::cast(bytes);
+ return base::Vector<const T>::cast(bytes);
}
void Skip(size_t size) { pos_ += size; }
@@ -275,7 +275,7 @@ static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
- NativeModuleSerializer(const NativeModule*, Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModule*, base::Vector<WasmCode* const>);
NativeModuleSerializer(const NativeModuleSerializer&) = delete;
NativeModuleSerializer& operator=(const NativeModuleSerializer&) = delete;
@@ -288,13 +288,13 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
bool WriteCode(const WasmCode*, Writer*);
const NativeModule* const native_module_;
- const Vector<WasmCode* const> code_table_;
+ const base::Vector<WasmCode* const> code_table_;
bool write_called_ = false;
size_t total_written_code_ = 0;
};
NativeModuleSerializer::NativeModuleSerializer(
- const NativeModule* module, Vector<WasmCode* const> code_table)
+ const NativeModule* module, base::Vector<WasmCode* const> code_table)
: native_module_(module), code_table_(code_table) {
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
@@ -352,7 +352,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->code_comments_offset());
writer->Write(code->unpadded_binary_size());
writer->Write(code->stack_slots());
- writer->Write(code->tagged_parameter_slots());
+ writer->Write(code->raw_tagged_parameter_slots_for_serialization());
writer->Write(code->instructions().length());
writer->Write(code->reloc_info().length());
writer->Write(code->source_positions().length());
@@ -383,7 +383,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
code_start = aligned_buffer.get();
}
#endif
- base::Memcpy(code_start, code->instructions().begin(), code_size);
+ memcpy(code_start, code->instructions().begin(), code_size);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL) |
@@ -430,7 +430,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
}
// If we copied to an aligned buffer, copy code into serialized buffer.
if (code_start != serialized_code_start) {
- base::Memcpy(serialized_code_start, code_start, code_size);
+ memcpy(serialized_code_start, code_start, code_size);
}
total_written_code_ += code_size;
return true;
@@ -464,12 +464,14 @@ WasmSerializer::WasmSerializer(NativeModule* native_module)
code_table_(native_module->SnapshotCodeTable()) {}
size_t WasmSerializer::GetSerializedNativeModuleSize() const {
- NativeModuleSerializer serializer(native_module_, VectorOf(code_table_));
+ NativeModuleSerializer serializer(native_module_,
+ base::VectorOf(code_table_));
return kHeaderSize + serializer.Measure();
}
-bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
- NativeModuleSerializer serializer(native_module_, VectorOf(code_table_));
+bool WasmSerializer::SerializeNativeModule(base::Vector<byte> buffer) const {
+ NativeModuleSerializer serializer(native_module_,
+ base::VectorOf(code_table_));
size_t measured_size = kHeaderSize + serializer.Measure();
if (buffer.size() < measured_size) return false;
@@ -482,7 +484,7 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
}
struct DeserializationUnit {
- Vector<const byte> src_code_buffer;
+ base::Vector<const byte> src_code_buffer;
std::unique_ptr<WasmCode> code;
NativeModule::JumpTablesRef jump_tables;
};
@@ -550,7 +552,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
// Updated in {ReadCode}.
size_t remaining_code_size_ = 0;
- Vector<byte> current_code_space_;
+ base::Vector<byte> current_code_space_;
NativeModule::JumpTablesRef current_jump_tables_;
};
@@ -566,9 +568,7 @@ class CopyAndRelocTask : public JobTask {
publish_handle_(std::move(publish_handle)) {}
void Run(JobDelegate* delegate) override {
- CODE_SPACE_WRITE_SCOPE
- NativeModuleModificationScope native_module_modification_scope(
- deserializer_->native_module_);
+ CodeSpaceWriteScope code_space_write_scope(deserializer_->native_module_);
do {
auto batch = from_queue_->Pop();
if (batch.empty()) break;
@@ -698,7 +698,7 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
int code_comment_offset = reader->Read<int>();
int unpadded_binary_size = reader->Read<int>();
int stack_slot_count = reader->Read<int>();
- int tagged_parameter_slots = reader->Read<int>();
+ uint32_t tagged_parameter_slots = reader->Read<uint32_t>();
int code_size = reader->Read<int>();
int reloc_size = reader->Read<int>();
int source_position_size = reader->Read<int>();
@@ -727,7 +727,8 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
auto protected_instructions =
reader->ReadVector<byte>(protected_instructions_size);
- Vector<uint8_t> instructions = current_code_space_.SubVector(0, code_size);
+ base::Vector<uint8_t> instructions =
+ current_code_space_.SubVector(0, code_size);
current_code_space_ += code_size;
remaining_code_size_ -= code_size;
@@ -742,8 +743,8 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
void NativeModuleDeserializer::CopyAndRelocate(
const DeserializationUnit& unit) {
- base::Memcpy(unit.code->instructions().begin(), unit.src_code_buffer.begin(),
- unit.src_code_buffer.size());
+ memcpy(unit.code->instructions().begin(), unit.src_code_buffer.begin(),
+ unit.src_code_buffer.size());
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
@@ -802,14 +803,14 @@ void NativeModuleDeserializer::Publish(std::vector<DeserializationUnit> batch) {
for (auto& unit : batch) {
codes.emplace_back(std::move(unit).code);
}
- auto published_codes = native_module_->PublishCode(VectorOf(codes));
+ auto published_codes = native_module_->PublishCode(base::VectorOf(codes));
for (auto* wasm_code : published_codes) {
wasm_code->MaybePrint();
wasm_code->Validate();
}
}
-bool IsSupportedVersion(Vector<const byte> header) {
+bool IsSupportedVersion(base::Vector<const byte> header) {
if (header.size() < WasmSerializer::kHeaderSize) return false;
byte current_version[WasmSerializer::kHeaderSize];
Writer writer({current_version, WasmSerializer::kHeaderSize});
@@ -819,17 +820,18 @@ bool IsSupportedVersion(Vector<const byte> header) {
}
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
- Isolate* isolate, Vector<const byte> data,
- Vector<const byte> wire_bytes_vec, Vector<const char> source_url) {
+ Isolate* isolate, base::Vector<const byte> data,
+ base::Vector<const byte> wire_bytes_vec,
+ base::Vector<const char> source_url) {
if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) return {};
if (!IsSupportedVersion(data)) return {};
// Make the copy of the wire bytes early, so we use the same memory for
// decoding, lookup in the native module cache, and insertion into the cache.
- auto owned_wire_bytes = OwnedVector<uint8_t>::Of(wire_bytes_vec);
+ auto owned_wire_bytes = base::OwnedVector<uint8_t>::Of(wire_bytes_vec);
// TODO(titzer): module features should be part of the serialization format.
- WasmEngine* wasm_engine = isolate->wasm_engine();
+ WasmEngine* wasm_engine = GetWasmEngine();
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
ModuleResult decode_result = DecodeWasmModule(
enabled_features, owned_wire_bytes.start(), owned_wire_bytes.end(), false,
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index a8aff9a6b8..dc33dcf5cc 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE WasmSerializer {
// Serialize the {NativeModule} into the provided {buffer}. Returns true on
// success and false if the given buffer it too small for serialization.
- bool SerializeNativeModule(Vector<byte> buffer) const;
+ bool SerializeNativeModule(base::Vector<byte> buffer) const;
// The data header consists of uint32_t-sized entries (see {WriteVersion}):
// [0] magic number
@@ -54,12 +54,12 @@ class V8_EXPORT_PRIVATE WasmSerializer {
// Support for deserializing WebAssembly {NativeModule} objects.
// Checks the version header of the data against the current version.
-bool IsSupportedVersion(Vector<const byte> data);
+bool IsSupportedVersion(base::Vector<const byte> data);
// Deserializes the given data to create a Wasm module object.
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> DeserializeNativeModule(
- Isolate*, Vector<const byte> data, Vector<const byte> wire_bytes,
- Vector<const char> source_url);
+ Isolate*, base::Vector<const byte> data,
+ base::Vector<const byte> wire_bytes, base::Vector<const char> source_url);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index faaad18076..b2e5aca74d 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -42,16 +42,21 @@ class Simd128 {
explicit Simd128(sType val) { \
base::WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
} \
- sType to_##name() { \
+ sType to_##name() const { \
return base::ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
}
FOREACH_SIMD_TYPE(DEFINE_SIMD_TYPE_SPECIFIC_METHODS)
#undef DEFINE_SIMD_TYPE_SPECIFIC_METHODS
+ explicit Simd128(byte* bytes) {
+ memcpy(static_cast<void*>(val_), reinterpret_cast<void*>(bytes),
+ kSimd128Size);
+ }
+
const uint8_t* bytes() { return val_; }
template <typename T>
- inline T to();
+ inline T to() const;
private:
uint8_t val_[16] = {0};
@@ -59,7 +64,7 @@ class Simd128 {
#define DECLARE_CAST(cType, sType, name, size) \
template <> \
- inline sType Simd128::to() { \
+ inline sType Simd128::to() const { \
return to_##name(); \
}
FOREACH_SIMD_TYPE(DECLARE_CAST)
@@ -90,30 +95,38 @@ class WasmValue {
public:
WasmValue() : type_(kWasmVoid), bit_pattern_{} {}
-#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
- explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
- static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
- "size too big for WasmValue"); \
- base::WriteLittleEndianValue<ctype>( \
- reinterpret_cast<Address>(bit_pattern_), v); \
- } \
- ctype to_##name() const { \
- DCHECK_EQ(localtype, type_); \
- return to_##name##_unchecked(); \
- } \
- ctype to_##name##_unchecked() const { \
- return base::ReadLittleEndianValue<ctype>( \
- reinterpret_cast<Address>(bit_pattern_)); \
+#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
+ explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
+ static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
+ "size too big for WasmValue"); \
+ base::WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), \
+ v); \
+ } \
+ ctype to_##name() const { \
+ DCHECK_EQ(localtype, type_); \
+ return to_##name##_unchecked(); \
+ } \
+ ctype to_##name##_unchecked() const { \
+ return base::ReadUnalignedValue<ctype>( \
+ reinterpret_cast<Address>(bit_pattern_)); \
}
+
FOREACH_PRIMITIVE_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS
+ WasmValue(byte* raw_bytes, ValueType type) : type_(type), bit_pattern_{} {
+ DCHECK(type_.is_numeric());
+ memcpy(bit_pattern_, raw_bytes, type.element_size_bytes());
+ }
+
WasmValue(Handle<Object> ref, ValueType type) : type_(type), bit_pattern_{} {
static_assert(sizeof(Handle<Object>) <= sizeof(bit_pattern_),
"bit_pattern_ must be large enough to fit a Handle");
+ DCHECK(type.is_reference());
base::WriteUnalignedValue<Handle<Object>>(
reinterpret_cast<Address>(bit_pattern_), ref);
}
+
Handle<Object> to_ref() const {
DCHECK(type_.is_reference());
return base::ReadUnalignedValue<Handle<Object>>(
@@ -125,7 +138,30 @@ class WasmValue {
// Checks equality of type and bit pattern (also for float and double values).
bool operator==(const WasmValue& other) const {
return type_ == other.type_ &&
- !memcmp(bit_pattern_, other.bit_pattern_, 16);
+ !memcmp(bit_pattern_, other.bit_pattern_,
+ type_.is_reference() ? sizeof(Handle<Object>)
+ : type_.element_size_bytes());
+ }
+
+ void CopyTo(byte* to) const {
+ STATIC_ASSERT(sizeof(float) == sizeof(Float32));
+ STATIC_ASSERT(sizeof(double) == sizeof(Float64));
+ DCHECK(type_.is_numeric());
+ memcpy(to, bit_pattern_, type_.element_size_bytes());
+ }
+
+ // If {packed_type.is_packed()}, create a new value of {packed_type()}.
+ // Otherwise, return this object.
+ WasmValue Packed(ValueType packed_type) const {
+ if (packed_type == kWasmI8) {
+ DCHECK_EQ(type_, kWasmI32);
+ return WasmValue(static_cast<int8_t>(to_i32()));
+ }
+ if (packed_type == kWasmI16) {
+ DCHECK_EQ(type_, kWasmI32);
+ return WasmValue(static_cast<int16_t>(to_i32()));
+ }
+ return *this;
}
template <typename T>
@@ -140,6 +176,40 @@ class WasmValue {
return WasmValue{type{value}};
}
+ inline std::string to_string() const {
+ switch (type_.kind()) {
+ case kI8:
+ return std::to_string(to_i8());
+ case kI16:
+ return std::to_string(to_i16());
+ case kI32:
+ return std::to_string(to_i32());
+ case kI64:
+ return std::to_string(to_i64());
+ case kF32:
+ return std::to_string(to_f32());
+ case kF64:
+ return std::to_string(to_f64());
+ case kS128: {
+ std::stringstream stream;
+ stream << "0x" << std::hex;
+ for (int8_t byte : bit_pattern_) {
+ if (!(byte & 0xf0)) stream << '0';
+ stream << byte;
+ }
+ return stream.str();
+ }
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ return "Handle [" + std::to_string(to_ref().address()) + "]";
+ case kVoid:
+ case kBottom:
+ UNREACHABLE();
+ }
+ }
+
private:
ValueType type_;
uint8_t bit_pattern_[16];