summaryrefslogtreecommitdiff
path: root/chromium/v8/src/wasm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-13 16:23:34 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-14 10:37:21 +0000
commit38a9a29f4f9436cace7f0e7abf9c586057df8a4e (patch)
treec4e8c458dc595bc0ddb435708fa2229edfd00bd4 /chromium/v8/src/wasm
parente684a3455bcc29a6e3e66a004e352dea4e1141e7 (diff)
downloadqtwebengine-chromium-38a9a29f4f9436cace7f0e7abf9c586057df8a4e.tar.gz
BASELINE: Update Chromium to 73.0.3683.37
Change-Id: I08c9af2948b645f671e5d933aca1f7a90ea372f2 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/wasm')
-rw-r--r--chromium/v8/src/wasm/OWNERS2
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h429
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h23
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h107
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.cc498
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h49
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc335
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.h8
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-register.h49
-rw-r--r--chromium/v8/src/wasm/baseline/mips/OWNERS3
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h59
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/OWNERS3
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h39
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h14
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h14
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h62
-rw-r--r--chromium/v8/src/wasm/compilation-environment.h32
-rw-r--r--chromium/v8/src/wasm/decoder.h87
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h1563
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.cc12
-rw-r--r--chromium/v8/src/wasm/function-compiler.cc184
-rw-r--r--chromium/v8/src/wasm/function-compiler.h66
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.cc258
-rw-r--r--chromium/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h41
-rw-r--r--chromium/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc2037
-rw-r--r--chromium/v8/src/wasm/module-compiler.h23
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc191
-rw-r--r--chromium/v8/src/wasm/module-instantiate.cc1537
-rw-r--r--chromium/v8/src/wasm/module-instantiate.h35
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.cc40
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.h25
-rw-r--r--chromium/v8/src/wasm/value-type.h10
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc152
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h44
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h13
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc51
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc40
-rw-r--r--chromium/v8/src/wasm/wasm-engine.h9
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.cc13
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--chromium/v8/src/wasm/wasm-feature-flags.h2
-rw-r--r--chromium/v8/src/wasm/wasm-import-wrapper-cache-inl.h11
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.cc83
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc503
-rw-r--r--chromium/v8/src/wasm/wasm-linkage.h2
-rw-r--r--chromium/v8/src/wasm/wasm-memory.cc28
-rw-r--r--chromium/v8/src/wasm/wasm-memory.h12
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc5
-rw-r--r--chromium/v8/src/wasm/wasm-module.h18
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h134
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc341
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h306
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.cc33
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h38
-rw-r--r--chromium/v8/src/wasm/wasm-result.cc2
-rw-r--r--chromium/v8/src/wasm/wasm-result.h108
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.cc150
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.h7
-rw-r--r--chromium/v8/src/wasm/wasm-text.cc20
60 files changed, 5815 insertions, 4154 deletions
diff --git a/chromium/v8/src/wasm/OWNERS b/chromium/v8/src/wasm/OWNERS
index 2d8fbeb65be..c9b1aa4d780 100644
--- a/chromium/v8/src/wasm/OWNERS
+++ b/chromium/v8/src/wasm/OWNERS
@@ -8,4 +8,6 @@ gdeepti@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
+per-file wasm-js.*=adamk@chromium.org
+
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index ff7e88ed76c..a4de6ceed7e 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -37,11 +37,12 @@ namespace liftoff {
// | | v
// -----+--------------------+ <-- stack ptr (sp)
//
-static_assert(2 * kPointerSize == LiftoffAssembler::kStackSlotSize,
+static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer.");
-constexpr int32_t kInstanceOffset = 2 * kPointerSize;
-constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + 2 * kPointerSize;
-constexpr int32_t kConstantStackSpace = kPointerSize;
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset =
+ kInstanceOffset + 2 * kSystemPointerSize;
+constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub.
@@ -53,20 +54,14 @@ inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -offset);
}
-inline MemOperand GetHalfStackSlot(uint32_t half_index) {
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset +
- half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
}
-inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
- if (half == kLowWord) {
- return GetHalfStackSlot(2 * index);
- } else {
- return GetHalfStackSlot(2 * index - 1);
- }
-}
-
inline MemOperand GetInstanceOperand() {
return MemOperand(fp, -kInstanceOffset);
}
@@ -144,88 +139,64 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-// safe_amount_reg is the register in which the register holding the shift
-// amount can be held without being clobbered, thus the original register
-// holding the shift amount can be moved into it if required.
template <void (TurboAssembler::*op)(Register, Register, Register, Register,
- Register)>
+ Register),
+ bool is_left_shift>
inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src, Register amount,
- Register safe_amount_reg, LiftoffRegList pinned) {
- DCHECK(safe_amount_reg == dst.low_gp() || safe_amount_reg == dst.high_gp());
- Register other_reg =
- (safe_amount_reg == dst.low_gp()) ? dst.high_gp() : dst.low_gp();
- pinned.set(other_reg);
- pinned.set(src.low_gp());
- pinned.set(src.high_gp());
- Register scratch = assm->GetUnusedRegister(kGpReg, pinned).gp();
- assm->and_(scratch, amount, Operand(0x3F));
- (assm->*op)(dst.low_gp(), dst.high_gp(), src.low_gp(), src.high_gp(),
- scratch);
+ LiftoffRegList pinned) {
+ Register src_low = src.low_gp();
+ Register src_high = src.high_gp();
+ Register dst_low = dst.low_gp();
+ Register dst_high = dst.high_gp();
+ // Left shift writes {dst_high} then {dst_low}, right shifts write {dst_low}
+ // then {dst_high}.
+ Register clobbered_dst_reg = is_left_shift ? dst_high : dst_low;
+ pinned.set(clobbered_dst_reg);
+ pinned.set(src);
+ Register amount_capped =
+ pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
+ assm->and_(amount_capped, amount, Operand(0x3F));
+
+ // Ensure that writing the first half of {dst} does not overwrite the still
+ // needed half of {src}.
+ Register* later_src_reg = is_left_shift ? &src_low : &src_high;
+ if (*later_src_reg == clobbered_dst_reg) {
+ *later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg);
+ }
+
+ (assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped);
+}
+
+inline FloatRegister GetFloatRegister(DoubleRegister reg) {
+ DCHECK_LT(reg.code(), kDoubleCode_d16);
+ return LowDwVfpRegister::from_code(reg.code()).low();
}
enum class MinOrMax : uint8_t { kMin, kMax };
-inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,
- DoubleRegister lhs, DoubleRegister rhs,
+template <typename RegisterType>
+inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
+ RegisterType lhs, RegisterType rhs,
MinOrMax min_or_max) {
- Label is_nan;
- Label lhs_below_rhs;
- Label lhs_above_rhs;
- Label done;
- // Check the easy cases first: nan (e.g. unordered), smaller and greater.
- assm->VFPCompareAndSetFlags(lhs, rhs);
- assm->b(&is_nan, vs);
-
- if (CpuFeatures::IsSupported(ARMv8)) {
- CpuFeatureScope scope(assm, ARMv8);
- if (min_or_max == MinOrMax::kMin) {
- assm->vminnm(dst, lhs, rhs);
- } else {
- assm->vmaxnm(dst, lhs, rhs);
- }
- assm->b(&done);
- assm->bind(&is_nan);
- // Create a NaN output.
- assm->vadd(dst, lhs, rhs);
+ DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8);
+ if (lhs == rhs) {
+ assm->TurboAssembler::Move(dst, lhs);
+ return;
+ }
+ Label done, is_nan;
+ if (min_or_max == MinOrMax::kMin) {
+ assm->TurboAssembler::FloatMin(dst, lhs, rhs, &is_nan);
} else {
- assm->b(&lhs_below_rhs, lt);
- assm->b(&lhs_above_rhs, gt);
-
- UseScratchRegisterScope temps(assm);
- Register scratch = temps.Acquire();
-
- // If we get here, then either
- // a) {lhs == rhs},
- // b) {lhs == -0.0} and {rhs == 0.0}, or
- // c) {lhs == 0.0} and {rhs == -0.0}.
- // For a), it does not matter whether we return {lhs} or {rhs}. Check the
- // sign bit of {rhs} to differentiate b) and c).
- assm->VmovHigh(scratch, lhs);
- assm->cmp(scratch, Operand(0));
- assm->b(&lhs_below_rhs, mi);
- assm->b(&lhs_above_rhs);
- assm->bind(&is_nan);
- // Create a NaN output.
- assm->vadd(dst, lhs, rhs);
-
- assm->b(&done);
- assm->bind(&lhs_below_rhs);
- DoubleRegister lhs_below_rhs_src =
- (min_or_max == MinOrMax::kMin) ? lhs : rhs;
- if (dst != lhs_below_rhs_src) {
- assm->vmov(dst, lhs_below_rhs_src);
- }
- assm->b(&done);
-
- assm->bind(&lhs_above_rhs);
- DoubleRegister lhs_above_rhs_src =
- (min_or_max == MinOrMax::kMin) ? rhs : lhs;
- if (dst != lhs_above_rhs_src) {
- assm->vmov(dst, lhs_above_rhs_src);
- }
+ assm->TurboAssembler::FloatMax(dst, lhs, rhs, &is_nan);
}
+ assm->b(&done);
+ assm->bind(&is_nan);
+ // Create a NaN output.
+ assm->vadd(dst, lhs, rhs);
assm->bind(&done);
}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -258,7 +229,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
return;
}
#endif
- PatchingAssembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
+ PatchingAssembler patching_assembler(AssemblerOptions{},
+ buffer_start_ + offset,
liftoff::kPatchInstructionsRequired);
patching_assembler.sub(sp, sp, Operand(bytes));
patching_assembler.PadWithNops();
@@ -283,7 +255,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break;
}
case kWasmF32:
- BAILOUT("Load f32 Constant");
+ vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
case kWasmF64: {
Register extra_scratch = GetUnusedRegister(kGpReg).gp();
@@ -303,6 +275,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
ldr(dst, MemOperand(dst, offset));
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
str(instance, liftoff::GetInstanceOperand());
}
@@ -311,6 +288,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -324,12 +310,22 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
return;
}
UseScratchRegisterScope temps(this);
- if (type.value() == LoadType::kF64Load) {
- // Armv6 is not supported so Neon can be used to avoid alignment issues.
- CpuFeatureScope scope(this, NEON);
+ if (type.value() == LoadType::kF64Load ||
+ type.value() == LoadType::kF32Load) {
Register actual_src_addr = liftoff::CalculateActualAddress(
this, &temps, src_addr, offset_reg, offset_imm);
- vld1(Neon64, NeonListOperand(dst.fp()), NeonMemOperand(actual_src_addr));
+ if (type.value() == LoadType::kF64Load) {
+ // Armv6 is not supported so Neon can be used to avoid alignment issues.
+ CpuFeatureScope scope(this, NEON);
+ vld1(Neon64, NeonListOperand(dst.fp()), NeonMemOperand(actual_src_addr));
+ } else {
+ // TODO(arm): Use vld1 for f32 when implemented in simulator as used for
+ // f64. It supports unaligned access.
+ Register scratch =
+ (actual_src_addr == src_addr) ? temps.Acquire() : actual_src_addr;
+ ldr(scratch, MemOperand(actual_src_addr));
+ vmov(liftoff::GetFloatRegister(dst.fp()), scratch);
+ }
} else {
MemOperand src_op =
liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
@@ -387,9 +383,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
ldr(dst.high_gp(), src_op);
break;
- case LoadType::kF32Load:
- BAILOUT("Load f32");
- break;
default:
UNREACHABLE();
}
@@ -409,11 +402,24 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
UseScratchRegisterScope temps(this);
if (type.value() == StoreType::kF64Store) {
+ Register actual_dst_addr = liftoff::CalculateActualAddress(
+ this, &temps, dst_addr, offset_reg, offset_imm);
// Armv6 is not supported so Neon can be used to avoid alignment issues.
CpuFeatureScope scope(this, NEON);
+ vst1(Neon64, NeonListOperand(src.fp()), NeonMemOperand(actual_dst_addr));
+ } else if (type.value() == StoreType::kF32Store) {
+ // TODO(arm): Use vst1 for f32 when implemented in simulator as used for
+ // f64. It supports unaligned access.
+ // CalculateActualAddress will only not use a scratch register if the
+ // following condition holds, otherwise another register must be
+ // retrieved.
+ Register scratch = (offset_reg == no_reg && offset_imm == 0)
+ ? temps.Acquire()
+ : GetUnusedRegister(kGpReg, pinned).gp();
Register actual_dst_addr = liftoff::CalculateActualAddress(
this, &temps, dst_addr, offset_reg, offset_imm);
- vst1(Neon64, NeonListOperand(src.fp()), NeonMemOperand(actual_dst_addr));
+ vmov(scratch, liftoff::GetFloatRegister(src.fp()));
+ str(scratch, MemOperand(actual_dst_addr));
} else {
MemOperand dst_op =
liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
@@ -450,9 +456,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
str(src.high_gp(), dst_op);
break;
- case StoreType::kF32Store:
- BAILOUT("Store f32");
- break;
default:
UNREACHABLE();
}
@@ -473,7 +476,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
ldr(dst.high_gp(), MemOperand(fp, offset + kRegisterSize));
break;
case kWasmF32:
- BAILOUT("Load Caller Frame Slot for f32");
+ vldr(liftoff::GetFloatRegister(dst.fp()), src);
break;
case kWasmF64:
vldr(dst.fp(), src);
@@ -486,7 +489,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_index, src_index);
- LiftoffRegister reg = GetUnusedRegister(kGpReg);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
}
@@ -501,7 +504,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
DCHECK_NE(dst, src);
if (type == kWasmF32) {
- BAILOUT("Move DoubleRegister");
+ vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
} else {
DCHECK_EQ(kWasmF64, type);
vmov(dst, src);
@@ -521,7 +524,7 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
str(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
- BAILOUT("Spill Register f32");
+ vstr(liftoff::GetFloatRegister(reg.fp()), dst);
break;
case kWasmF64:
vstr(reg.fp(), dst);
@@ -574,7 +577,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
- BAILOUT("Fill Register");
+ vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(index));
break;
case kWasmF64:
vldr(reg.fp(), liftoff::GetStackSlot(index));
@@ -584,8 +587,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
- ldr(reg, liftoff::GetHalfStackSlot(half_index));
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ ldr(reg, liftoff::GetHalfStackSlot(index, half));
}
#define I32_BINOP(name, instruction) \
@@ -601,6 +605,18 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
and_(scratch, amount, Operand(0x1f)); \
instruction(dst, src, Operand(scratch)); \
}
+#define FP32_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(liftoff::GetFloatRegister(dst), \
+ liftoff::GetFloatRegister(src)); \
+ }
+#define FP32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(liftoff::GetFloatRegister(dst), \
+ liftoff::GetFloatRegister(lhs), \
+ liftoff::GetFloatRegister(rhs)); \
+ }
#define FP64_UNOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst, src); \
@@ -610,15 +626,6 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
DoubleRegister rhs) { \
instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
- DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
- }
-#define UNIMPLEMENTED_FP_UNOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
- }
I32_BINOP(i32_add, add)
I32_BINOP(i32_sub, sub)
@@ -629,16 +636,13 @@ I32_BINOP(i32_xor, eor)
I32_SHIFTOP(i32_shl, lsl)
I32_SHIFTOP(i32_sar, asr)
I32_SHIFTOP(i32_shr, lsr)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_min)
-UNIMPLEMENTED_FP_BINOP(f32_max)
-UNIMPLEMENTED_FP_BINOP(f32_copysign)
-UNIMPLEMENTED_FP_UNOP(f32_abs)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP(f32_sqrt)
+FP32_BINOP(f32_add, vadd)
+FP32_BINOP(f32_sub, vsub)
+FP32_BINOP(f32_mul, vmul)
+FP32_BINOP(f32_div, vdiv)
+FP32_UNOP(f32_abs, vabs)
+FP32_UNOP(f32_neg, vneg)
+FP32_UNOP(f32_sqrt, vsqrt)
FP64_BINOP(f64_add, vadd)
FP64_BINOP(f64_sub, vsub)
FP64_BINOP(f64_mul, vmul)
@@ -649,8 +653,10 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef I32_BINOP
#undef I32_SHIFTOP
-#undef UNIMPLEMENTED_FP_BINOP
-#undef UNIMPLEMENTED_FP_UNOP
+#undef FP32_UNOP
+#undef FP32_BINOP
+#undef FP64_UNOP
+#undef FP64_BINOP
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src);
@@ -666,13 +672,12 @@ bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
{
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch_2 = GetUnusedRegister(kGpReg, pinned).gp();
// x = x - ((x & (0x55555555 << 1)) >> 1)
and_(scratch, src, Operand(0xaaaaaaaa));
sub(dst, src, Operand(scratch, LSR, 1));
- LiftoffRegList pinned;
- pinned.set(dst);
- Register scratch_2 = GetUnusedRegister(kGpReg, pinned).gp();
// x = (x & 0x33333333) + ((x & (0x33333333 << 2)) >> 2)
mov(scratch, Operand(0x33333333));
and_(scratch_2, dst, Operand(scratch, LSL, 2));
@@ -837,45 +842,87 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned) {
- liftoff::I64Shiftop<&TurboAssembler::LslPair>(this, dst, src, amount,
- dst.low_gp(), pinned);
+ liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount,
+ pinned);
}
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned) {
- liftoff::I64Shiftop<&TurboAssembler::AsrPair>(this, dst, src, amount,
- dst.high_gp(), pinned);
+ liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount,
+ pinned);
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned) {
- liftoff::I64Shiftop<&TurboAssembler::LsrPair>(this, dst, src, amount,
- dst.high_gp(), pinned);
+ liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount,
+ pinned);
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
int amount) {
DCHECK(is_uint6(amount));
- LsrPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src.high_gp(), amount);
+ UseScratchRegisterScope temps(this);
+ Register src_high = src.high_gp();
+ // {src.high_gp()} will still be needed after writing {dst.low_gp()}.
+ if (src_high == dst.low_gp()) {
+ src_high = GetUnusedRegister(kGpReg).gp();
+ TurboAssembler::Move(src_high, dst.low_gp());
+ }
+
+ LsrPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, amount);
}
bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintp(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
return false;
}
bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintm(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
return false;
}
bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintz(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
return false;
}
bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
DoubleRegister src) {
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(this, ARMv8);
+ vrintn(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
+ return true;
+ }
return false;
}
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatMinOrMax(
+ this, liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(lhs),
+ liftoff::GetFloatRegister(rhs), liftoff::MinOrMax::kMin);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatMinOrMax(
+ this, liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(lhs),
+ liftoff::GetFloatRegister(rhs), liftoff::MinOrMax::kMax);
+}
+
bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(this, ARMv8);
@@ -927,14 +974,31 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
// This is a nop on arm.
}
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
+ UseScratchRegisterScope temps(this);
+ Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch2 = temps.Acquire();
+ VmovLow(scratch, lhs);
+ // Clear sign bit in {scratch}.
+ bic(scratch, scratch, Operand(kF32SignBit));
+ VmovLow(scratch2, rhs);
+ // Isolate sign bit in {scratch2}.
+ and_(scratch2, scratch2, Operand(kF32SignBit));
+ // Combine {scratch2} into {scratch}.
+ orr(scratch, scratch, scratch2);
+ VmovLow(dst, scratch);
+}
+
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF64SignBitHighWord = uint32_t{1} << 31;
// On arm, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- Register scratch2 = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch2 = temps.Acquire();
VmovHigh(scratch, lhs);
// Clear sign bit in {scratch}.
bic(scratch, scratch, Operand(kF64SignBitHighWord));
@@ -955,11 +1019,35 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::Move(dst.gp(), src.low_gp());
return true;
case kExprI32SConvertF32: {
- BAILOUT("kExprI32SConvertF32");
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ // Check underflow and NaN.
+ vmov(scratch_f, Float32(static_cast<float>(INT32_MIN)));
+ VFPCompareAndSetFlags(liftoff::GetFloatRegister(src.fp()), scratch_f);
+ b(trap, lt);
+ // Check overflow.
+ cmp(dst.gp(), Operand(-1));
+ b(trap, vs);
return true;
}
case kExprI32UConvertF32: {
- BAILOUT("kExprI32UConvertF32");
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ // Check underflow and NaN.
+ vmov(scratch_f, Float32(-1.0f));
+ VFPCompareAndSetFlags(liftoff::GetFloatRegister(src.fp()), scratch_f);
+ b(trap, le);
+ // Check overflow.
+ cmp(dst.gp(), Operand(-1));
+ b(trap, eq);
return true;
}
case kExprI32SConvertF64: {
@@ -995,7 +1083,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
}
case kExprI32ReinterpretF32:
- BAILOUT("kExprI32ReinterpretF32");
+ vmov(dst.gp(), liftoff::GetFloatRegister(src.fp()));
return true;
case kExprI64SConvertI32:
if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
@@ -1008,34 +1096,36 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI64ReinterpretF64:
vmov(dst.low_gp(), dst.high_gp(), src.fp());
return true;
- case kExprF32SConvertI32:
- BAILOUT("kExprF32SConvertI32");
+ case kExprF32SConvertI32: {
+ SwVfpRegister dst_float = liftoff::GetFloatRegister(dst.fp());
+ vmov(dst_float, src.gp());
+ vcvt_f32_s32(dst_float, dst_float);
return true;
- case kExprF32UConvertI32:
- BAILOUT("kExprF32UConvertI32");
+ }
+ case kExprF32UConvertI32: {
+ SwVfpRegister dst_float = liftoff::GetFloatRegister(dst.fp());
+ vmov(dst_float, src.gp());
+ vcvt_f32_u32(dst_float, dst_float);
return true;
+ }
case kExprF32ConvertF64:
- BAILOUT("kExprF32ConvertF64");
+ vcvt_f32_f64(liftoff::GetFloatRegister(dst.fp()), src.fp());
return true;
case kExprF32ReinterpretI32:
- BAILOUT("kExprF32ReinterpretI32");
+ vmov(liftoff::GetFloatRegister(dst.fp()), src.gp());
return true;
case kExprF64SConvertI32: {
- UseScratchRegisterScope temps(this);
- SwVfpRegister scratch = temps.AcquireS();
- vmov(scratch, src.gp());
- vcvt_f64_s32(dst.fp(), scratch);
+ vmov(liftoff::GetFloatRegister(dst.fp()), src.gp());
+ vcvt_f64_s32(dst.fp(), liftoff::GetFloatRegister(dst.fp()));
return true;
}
case kExprF64UConvertI32: {
- UseScratchRegisterScope temps(this);
- SwVfpRegister scratch = temps.AcquireS();
- vmov(scratch, src.gp());
- vcvt_f64_u32(dst.fp(), scratch);
+ vmov(liftoff::GetFloatRegister(dst.fp()), src.gp());
+ vcvt_f64_u32(dst.fp(), liftoff::GetFloatRegister(dst.fp()));
return true;
}
case kExprF64ConvertF32:
- BAILOUT("kExprF64ConvertF32");
+ vcvt_f64_f32(dst.fp(), liftoff::GetFloatRegister(src.fp()));
return true;
case kExprF64ReinterpretI64:
vmov(dst.fp(), src.low_gp(), src.high_gp());
@@ -1160,7 +1250,14 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ VFPCompareAndSetFlags(liftoff::GetFloatRegister(lhs),
+ liftoff::GetFloatRegister(rhs));
+ mov(dst, Operand(0), LeaveCC);
+ mov(dst, Operand(1), LeaveCC, cond);
+ if (cond != ne) {
+ // If V flag set, at least one of the arguments was a Nan -> false.
+ mov(dst, Operand(0), LeaveCC, vs);
+ }
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
@@ -1249,7 +1346,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
ExternalReference ext_ref) {
// Arguments are passed by pushing them all to the stack and then passing
// a pointer to them.
- DCHECK_EQ(stack_bytes % kPointerSize, 0);
+ DCHECK(IsAligned(stack_bytes, kSystemPointerSize));
// Reserve space in the stack.
sub(sp, sp, Operand(stack_bytes));
@@ -1264,7 +1361,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
str(args->high_gp(), MemOperand(sp, arg_bytes + kRegisterSize));
break;
case kWasmF32:
- BAILOUT("Call C for f32 parameter");
+ vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
break;
case kWasmF64:
vstr(args->fp(), MemOperand(sp, arg_bytes));
@@ -1304,10 +1401,10 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
break;
case kWasmI64:
ldr(result_reg->low_gp(), MemOperand(sp));
- ldr(result_reg->high_gp(), MemOperand(sp, kPointerSize));
+ ldr(result_reg->high_gp(), MemOperand(sp, kSystemPointerSize));
break;
case kWasmF32:
- BAILOUT("Call C for f32 parameter");
+ vldr(liftoff::GetFloatRegister(result_reg->fp()), MemOperand(sp));
break;
case kWasmF64:
vldr(result_reg->fp(), MemOperand(sp));
@@ -1354,16 +1451,14 @@ void LiftoffStackSlots::Construct() {
// i32 and i64 can be treated as similar cases, i64 being previously
// split into two i32 registers
case kWasmI32:
- case kWasmI64: {
+ case kWasmI64:
+ case kWasmF32: {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->Push(scratch);
} break;
- case kWasmF32:
- asm_->BAILOUT("Construct f32 from kStack");
- break;
case kWasmF64: {
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
@@ -1386,7 +1481,7 @@ void LiftoffStackSlots::Construct() {
asm_->push(src.reg().gp());
break;
case kWasmF32:
- asm_->BAILOUT("Construct f32 from kRegister");
+ asm_->vpush(liftoff::GetFloatRegister(src.reg().fp()));
break;
case kWasmF64:
asm_->vpush(src.reg().fp());
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 0be8a0455cb..d85b9b268be 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -41,8 +41,8 @@ namespace liftoff {
// -----+--------------------+ <-- stack ptr (sp)
//
-constexpr int32_t kInstanceOffset = 2 * kPointerSize;
-constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kPointerSize;
+constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
inline MemOperand GetStackSlot(uint32_t index) {
@@ -148,7 +148,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
return;
}
#endif
- PatchingAssembler patching_assembler(AssemblerOptions{}, buffer_ + offset, 1);
+ PatchingAssembler patching_assembler(AssemblerOptions{},
+ buffer_start_ + offset, 1);
patching_assembler.PatchSubSp(bytes);
}
@@ -188,6 +189,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
@@ -196,6 +202,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI64Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -349,7 +364,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
Ldr(liftoff::GetRegFromType(reg, type), src);
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 351c0a8b76a..067c79be321 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -34,8 +34,10 @@ inline Operand GetStackSlot(uint32_t index) {
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
-inline Operand GetHalfStackSlot(uint32_t half_index) {
- int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize - half_offset;
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
@@ -132,6 +134,19 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
}
+inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
+ const Operand& dst) {
+ if (assm->cache_state()->has_unused_register(kGpReg)) {
+ Register tmp = assm->cache_state()->unused_register(kGpReg).gp();
+ assm->mov(tmp, src);
+ assm->mov(dst, tmp);
+ } else {
+ // No free register, move via the stack.
+ assm->push(src);
+ assm->pop(dst);
+ }
+}
+
constexpr DoubleRegister kScratchDoubleReg = xmm7;
constexpr int kSubSpSize = 6; // 6 bytes for "sub esp, <imm32>"
@@ -152,8 +167,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
- kAvailableSpace);
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
#if V8_OS_WIN
constexpr int kPageSize = 4 * 1024;
if (bytes > kPageSize) {
@@ -222,6 +238,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
mov(dst, Operand(dst, offset));
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
mov(liftoff::GetInstanceOperand(), instance);
}
@@ -230,6 +251,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -379,19 +409,22 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- liftoff::Load(this, dst, ebp, kPointerSize * (caller_slot_idx + 1), type);
+ liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
+ type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register(kGpReg)) {
- LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index, type);
- Spill(dst_index, reg, type);
+ if (needs_reg_pair(type)) {
+ liftoff::MoveStackValue(this,
+ liftoff::GetHalfStackSlot(src_index, kLowWord),
+ liftoff::GetHalfStackSlot(dst_index, kLowWord));
+ liftoff::MoveStackValue(this,
+ liftoff::GetHalfStackSlot(src_index, kHighWord),
+ liftoff::GetHalfStackSlot(dst_index, kHighWord));
} else {
- push(liftoff::GetStackSlot(src_index));
- pop(liftoff::GetStackSlot(dst_index));
+ liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_index),
+ liftoff::GetStackSlot(dst_index));
}
}
@@ -421,8 +454,8 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
mov(dst, reg.gp());
break;
case kWasmI64:
- mov(dst, reg.low_gp());
- mov(liftoff::GetHalfStackSlot(2 * index - 1), reg.high_gp());
+ mov(liftoff::GetHalfStackSlot(index, kLowWord), reg.low_gp());
+ mov(liftoff::GetHalfStackSlot(index, kHighWord), reg.high_gp());
break;
case kWasmF32:
movss(dst, reg.fp());
@@ -445,8 +478,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
case kWasmI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
- mov(dst, Immediate(low_word));
- mov(liftoff::GetHalfStackSlot(2 * index - 1), Immediate(high_word));
+ mov(liftoff::GetHalfStackSlot(index, kLowWord), Immediate(low_word));
+ mov(liftoff::GetHalfStackSlot(index, kHighWord), Immediate(high_word));
break;
}
default:
@@ -463,8 +496,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
mov(reg.gp(), src);
break;
case kWasmI64:
- mov(reg.low_gp(), src);
- mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index - 1));
+ mov(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ mov(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
movss(reg.fp(), src);
@@ -477,8 +510,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
- mov(reg, liftoff::GetHalfStackSlot(half_index));
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ mov(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
@@ -490,12 +524,17 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
- if (dst == rhs) {
- neg(dst);
- add(dst, lhs);
- } else {
+ if (dst != rhs) {
+ // Default path.
if (dst != lhs) mov(dst, lhs);
sub(dst, rhs);
+ } else if (lhs == rhs) {
+ // Degenerate case.
+ xor_(dst, dst);
+ } else {
+ // Emit {dst = lhs + -rhs} if dst == rhs.
+ neg(dst);
+ add(dst, lhs);
}
}
@@ -855,7 +894,9 @@ inline void Emit64BitShiftOperation(
// Temporary registers cannot overlap with {dst}.
pinned.set(dst);
- std::vector<LiftoffAssembler::ParallelRegisterMoveTuple> reg_moves;
+ constexpr size_t kMaxRegMoves = 3;
+ base::SmallVector<LiftoffAssembler::ParallelRegisterMoveTuple, kMaxRegMoves>
+ reg_moves;
// If {dst} contains {ecx}, replace it by an unused register, which is then
// moved to {ecx} in the end.
@@ -875,7 +916,7 @@ inline void Emit64BitShiftOperation(
reg_moves.emplace_back(dst, src, kWasmI64);
reg_moves.emplace_back(ecx, amount, kWasmI32);
- assm->ParallelRegisterMove({reg_moves.data(), reg_moves.size()});
+ assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift.
(assm->*emit_shift)(dst.high_gp(), dst.low_gp());
@@ -1413,7 +1454,9 @@ void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- movsx_b(dst.low_gp(), src.low_gp());
+ Register byte_reg = liftoff::GetTmpByteRegister(this, src.low_gp());
+ if (byte_reg != src.low_gp()) mov(byte_reg, src.low_gp());
+ movsx_b(dst.low_gp(), byte_reg);
liftoff::SignExtendI32ToI64(this, dst);
}
@@ -1634,8 +1677,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
- ret(static_cast<int>(num_stack_slots * kPointerSize));
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -1723,10 +1767,9 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
- asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
+ asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
}
- asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ -
- (slot.half_ == kLowWord ? 0 : 1)));
+ asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
index 0c2dad6cb28..e7415e20799 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -7,10 +7,13 @@
#include <sstream>
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/macro-assembler-inl.h"
+#include "src/ostreams.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
@@ -30,47 +33,44 @@ namespace {
class StackTransferRecipe {
struct RegisterMove {
- LiftoffRegister dst;
LiftoffRegister src;
ValueType type;
- constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
- ValueType type)
- : dst(dst), src(src), type(type) {}
+ constexpr RegisterMove(LiftoffRegister src, ValueType type)
+ : src(src), type(type) {}
};
+
struct RegisterLoad {
enum LoadKind : uint8_t {
- kConstant, // load a constant value into a register.
- kStack, // fill a register from a stack slot.
- kHalfStack // fill one half of a register pair from half a stack slot.
+ kConstant, // load a constant value into a register.
+ kStack, // fill a register from a stack slot.
+ kLowHalfStack, // fill a register from the low half of a stack slot.
+ kHighHalfStack // fill a register from the high half of a stack slot.
};
- LiftoffRegister dst;
LoadKind kind;
ValueType type;
int32_t value; // i32 constant value or stack index, depending on kind.
// Named constructors.
- static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
+ static RegisterLoad Const(WasmValue constant) {
if (constant.type() == kWasmI32) {
- return {dst, kConstant, kWasmI32, constant.to_i32()};
+ return {kConstant, kWasmI32, constant.to_i32()};
}
DCHECK_EQ(kWasmI64, constant.type());
DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
- return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
+ return {kConstant, kWasmI64, constant.to_i32_unchecked()};
}
- static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
- ValueType type) {
- return {dst, kStack, type, stack_index};
+ static RegisterLoad Stack(int32_t stack_index, ValueType type) {
+ return {kStack, type, stack_index};
}
- static RegisterLoad HalfStack(LiftoffRegister dst,
- int32_t half_stack_index) {
- return {dst, kHalfStack, kWasmI32, half_stack_index};
+ static RegisterLoad HalfStack(int32_t stack_index, RegPairHalf half) {
+ return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
+ stack_index};
}
private:
- RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
- int32_t value)
- : dst(dst), kind(kind), type(type), value(value) {}
+ RegisterLoad(LoadKind kind, ValueType type, int32_t value)
+ : kind(kind), type(type), value(value) {}
};
public:
@@ -80,81 +80,18 @@ class StackTransferRecipe {
void Execute() {
// First, execute register moves. Then load constants and stack values into
// registers.
-
- if ((move_dst_regs_ & move_src_regs_).is_empty()) {
- // No overlap in src and dst registers. Just execute the moves in any
- // order.
- for (RegisterMove& rm : register_moves_) {
- asm_->Move(rm.dst, rm.src, rm.type);
- }
- register_moves_.clear();
- } else {
- // Keep use counters of src registers.
- uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
- for (RegisterMove& rm : register_moves_) {
- ++src_reg_use_count[rm.src.liftoff_code()];
- }
- // Now repeatedly iterate the list of register moves, and execute those
- // whose dst register does not appear as src any more. The remaining moves
- // are compacted during this iteration.
- // If no more moves can be executed (because of a cycle), spill one
- // register to the stack, add a RegisterLoad to reload it later, and
- // continue.
- uint32_t next_spill_slot = asm_->cache_state()->stack_height();
- while (!register_moves_.empty()) {
- int executed_moves = 0;
- for (auto& rm : register_moves_) {
- if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
- asm_->Move(rm.dst, rm.src, rm.type);
- ++executed_moves;
- DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
- --src_reg_use_count[rm.src.liftoff_code()];
- } else if (executed_moves) {
- // Compaction: Move not-executed moves to the beginning of the list.
- (&rm)[-executed_moves] = rm;
- }
- }
- if (executed_moves == 0) {
- // There is a cycle. Spill one register, then continue.
- // TODO(clemensh): Use an unused register if available.
- RegisterMove& rm = register_moves_.back();
- LiftoffRegister spill_reg = rm.src;
- asm_->Spill(next_spill_slot, spill_reg, rm.type);
- // Remember to reload into the destination register later.
- LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
- DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
- src_reg_use_count[spill_reg.liftoff_code()] = 0;
- ++next_spill_slot;
- executed_moves = 1;
- }
- register_moves_.erase(register_moves_.end() - executed_moves,
- register_moves_.end());
- }
- }
-
- for (RegisterLoad& rl : register_loads_) {
- switch (rl.kind) {
- case RegisterLoad::kConstant:
- asm_->LoadConstant(rl.dst, rl.type == kWasmI64
- ? WasmValue(int64_t{rl.value})
- : WasmValue(int32_t{rl.value}));
- break;
- case RegisterLoad::kStack:
- asm_->Fill(rl.dst, rl.value, rl.type);
- break;
- case RegisterLoad::kHalfStack:
- // As half of a register pair, {rl.dst} must be a gp register.
- asm_->FillI64Half(rl.dst.gp(), rl.value);
- break;
- }
- }
- register_loads_.clear();
+ ExecuteMoves();
+ DCHECK(move_dst_regs_.is_empty());
+ ExecuteLoads();
+ DCHECK(load_dst_regs_.is_empty());
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
- uint32_t dst_index, uint32_t src_index) {
+ uint32_t dst_index,
+ const LiftoffAssembler::CacheState& src_state,
+ uint32_t src_index) {
const VarState& dst = dst_state.stack_state[dst_index];
- const VarState& src = __ cache_state()->stack_state[src_index];
+ const VarState& src = src_state.stack_state[src_index];
DCHECK_EQ(dst.type(), src.type());
switch (dst.loc()) {
case VarState::kStack:
@@ -206,7 +143,7 @@ class StackTransferRecipe {
DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) {
case VarState::kStack:
- LoadI64HalfStackSlot(dst, 2 * index - (half == kLowWord ? 0 : 1));
+ LoadI64HalfStackSlot(dst, index, half);
break;
case VarState::kRegister: {
LiftoffRegister src_half =
@@ -234,99 +171,302 @@ class StackTransferRecipe {
MoveRegister(dst.high(), src.high(), kWasmI32);
return;
}
- DCHECK(!move_dst_regs_.has(dst));
+ if (move_dst_regs_.has(dst)) {
+ DCHECK_EQ(register_move(dst)->src, src);
+ // Non-fp registers can only occur with the exact same type.
+ DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->type == type);
+ // It can happen that one fp register holds both the f32 zero and the f64
+ // zero, as the initial value for local variables. Move the value as f64
+ // in that case.
+ if (type == kWasmF64) register_move(dst)->type = kWasmF64;
+ return;
+ }
move_dst_regs_.set(dst);
- move_src_regs_.set(src);
- register_moves_.emplace_back(dst, src, type);
+ ++*src_reg_use_count(src);
+ *register_move(dst) = {src, type};
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
- register_loads_.push_back(RegisterLoad::Const(dst, value));
+ DCHECK(!load_dst_regs_.has(dst));
+ load_dst_regs_.set(dst);
+ if (dst.is_pair()) {
+ DCHECK_EQ(kWasmI64, value.type());
+ int64_t i64 = value.to_i64();
+ *register_load(dst.low()) =
+ RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64)));
+ *register_load(dst.high()) =
+ RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64 >> 32)));
+ } else {
+ *register_load(dst) = RegisterLoad::Const(value);
+ }
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
ValueType type) {
- register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
+ if (load_dst_regs_.has(dst)) {
+ // It can happen that we spilled the same register to different stack
+ // slots, and then we reload them later into the same dst register.
+ // In that case, it is enough to load one of the stack slots.
+ return;
+ }
+ load_dst_regs_.set(dst);
+ if (dst.is_pair()) {
+ DCHECK_EQ(kWasmI64, type);
+ *register_load(dst.low()) =
+ RegisterLoad::HalfStack(stack_index, kLowWord);
+ *register_load(dst.high()) =
+ RegisterLoad::HalfStack(stack_index, kHighWord);
+ } else {
+ *register_load(dst) = RegisterLoad::Stack(stack_index, type);
+ }
}
- void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
- register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
+ void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t stack_index,
+ RegPairHalf half) {
+ if (load_dst_regs_.has(dst)) {
+ // It can happen that we spilled the same register to different stack
+ // slots, and then we reload them later into the same dst register.
+ // In that case, it is enough to load one of the stack slots.
+ return;
+ }
+ load_dst_regs_.set(dst);
+ *register_load(dst) = RegisterLoad::HalfStack(stack_index, half);
}
private:
- // TODO(clemensh): Avoid unconditionally allocating on the heap.
- std::vector<RegisterMove> register_moves_;
- std::vector<RegisterLoad> register_loads_;
+ using MovesStorage =
+ std::aligned_storage<kAfterMaxLiftoffRegCode * sizeof(RegisterMove),
+ alignof(RegisterMove)>::type;
+ using LoadsStorage =
+ std::aligned_storage<kAfterMaxLiftoffRegCode * sizeof(RegisterLoad),
+ alignof(RegisterLoad)>::type;
+
+ ASSERT_TRIVIALLY_COPYABLE(RegisterMove);
+ ASSERT_TRIVIALLY_COPYABLE(RegisterLoad);
+
+ MovesStorage register_moves_; // uninitialized
+ LoadsStorage register_loads_; // uninitialized
+ int src_reg_use_count_[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList move_dst_regs_;
- LiftoffRegList move_src_regs_;
+ LiftoffRegList load_dst_regs_;
LiftoffAssembler* const asm_;
+
+ RegisterMove* register_move(LiftoffRegister reg) {
+ return reinterpret_cast<RegisterMove*>(&register_moves_) +
+ reg.liftoff_code();
+ }
+ RegisterLoad* register_load(LiftoffRegister reg) {
+ return reinterpret_cast<RegisterLoad*>(&register_loads_) +
+ reg.liftoff_code();
+ }
+ int* src_reg_use_count(LiftoffRegister reg) {
+ return src_reg_use_count_ + reg.liftoff_code();
+ }
+
+ void ExecuteMove(LiftoffRegister dst) {
+ RegisterMove* move = register_move(dst);
+ DCHECK_EQ(0, *src_reg_use_count(dst));
+ asm_->Move(dst, move->src, move->type);
+ ClearExecutedMove(dst);
+ }
+
+ void ClearExecutedMove(LiftoffRegister dst) {
+ DCHECK(move_dst_regs_.has(dst));
+ move_dst_regs_.clear(dst);
+ RegisterMove* move = register_move(dst);
+ DCHECK_LT(0, *src_reg_use_count(move->src));
+ if (--*src_reg_use_count(move->src)) return;
+ // src count dropped to zero. If this is a destination register, execute
+ // that move now.
+ if (!move_dst_regs_.has(move->src)) return;
+ ExecuteMove(move->src);
+ }
+
+ void ExecuteMoves() {
+ // Execute all moves whose {dst} is not being used as src in another move.
+ // If any src count drops to zero, also (transitively) execute the
+ // corresponding move to that register.
+ for (LiftoffRegister dst : move_dst_regs_) {
+ // Check if already handled via transitivity in {ClearExecutedMove}.
+ if (!move_dst_regs_.has(dst)) continue;
+ if (*src_reg_use_count(dst)) continue;
+ ExecuteMove(dst);
+ }
+
+ // All remaining moves are parts of a cycle. Just spill the first one, then
+ // process all remaining moves in that cycle. Repeat for all cycles.
+ uint32_t next_spill_slot = asm_->cache_state()->stack_height();
+ while (!move_dst_regs_.is_empty()) {
+ // TODO(clemensh): Use an unused register if available.
+ LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
+ RegisterMove* move = register_move(dst);
+ LiftoffRegister spill_reg = move->src;
+ asm_->Spill(next_spill_slot, spill_reg, move->type);
+ // Remember to reload into the destination register later.
+ LoadStackSlot(dst, next_spill_slot, move->type);
+ ++next_spill_slot;
+ ClearExecutedMove(dst);
+ }
+ }
+
+ void ExecuteLoads() {
+ for (LiftoffRegister dst : load_dst_regs_) {
+ RegisterLoad* load = register_load(dst);
+ switch (load->kind) {
+ case RegisterLoad::kConstant:
+ asm_->LoadConstant(dst, load->type == kWasmI64
+ ? WasmValue(int64_t{load->value})
+ : WasmValue(int32_t{load->value}));
+ break;
+ case RegisterLoad::kStack:
+ asm_->Fill(dst, load->value, load->type);
+ break;
+ case RegisterLoad::kLowHalfStack:
+ // Half of a register pair, {dst} must be a gp register.
+ asm_->FillI64Half(dst.gp(), load->value, kLowWord);
+ break;
+ case RegisterLoad::kHighHalfStack:
+ // Half of a register pair, {dst} must be a gp register.
+ asm_->FillI64Half(dst.gp(), load->value, kHighWord);
+ break;
+ }
+ }
+ load_dst_regs_ = {};
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(StackTransferRecipe);
};
+class RegisterReuseMap {
+ public:
+ void Add(LiftoffRegister src, LiftoffRegister dst) {
+ if (auto previous = Lookup(src)) {
+ DCHECK_EQ(previous, dst);
+ return;
+ }
+ map_.emplace_back(src);
+ map_.emplace_back(dst);
+ }
+
+ base::Optional<LiftoffRegister> Lookup(LiftoffRegister src) {
+ for (auto it = map_.begin(), end = map_.end(); it != end; it += 2) {
+ if (it->is_pair() == src.is_pair() && *it == src) return *(it + 1);
+ }
+ return {};
+ }
+
+ private:
+ // {map_} holds pairs of <src, dst>.
+ base::SmallVector<LiftoffRegister, 8> map_;
+};
+
+enum MergeKeepStackSlots : bool {
+ kKeepStackSlots = true,
+ kTurnStackSlotsIntoRegisters = false
+};
+enum MergeAllowConstants : bool {
+ kConstantsAllowed = true,
+ kConstantsNotAllowed = false
+};
+enum ReuseRegisters : bool {
+ kReuseRegisters = true,
+ kNoReuseRegisters = false
+};
+void InitMergeRegion(LiftoffAssembler::CacheState* state,
+ const VarState* source, VarState* target, uint32_t count,
+ MergeKeepStackSlots keep_stack_slots,
+ MergeAllowConstants allow_constants,
+ ReuseRegisters reuse_registers, LiftoffRegList used_regs) {
+ RegisterReuseMap register_reuse_map;
+ for (const VarState* source_end = source + count; source < source_end;
+ ++source, ++target) {
+ if ((source->is_stack() && keep_stack_slots) ||
+ (source->is_const() && allow_constants)) {
+ *target = *source;
+ continue;
+ }
+ base::Optional<LiftoffRegister> reg;
+ // First try: Keep the same register, if it's free.
+ if (source->is_reg() && state->is_free(source->reg())) {
+ reg = source->reg();
+ }
+ // Second try: Use the same register we used before (if we reuse registers).
+ if (!reg && reuse_registers) {
+ reg = register_reuse_map.Lookup(source->reg());
+ }
+ // Third try: Use any free register.
+ RegClass rc = reg_class_for(source->type());
+ if (!reg && state->has_unused_register(rc, used_regs)) {
+ reg = state->unused_register(rc, used_regs);
+ }
+ if (!reg) {
+ // No free register; make this a stack slot.
+ *target = VarState(source->type());
+ continue;
+ }
+ if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
+ state->inc_used(*reg);
+ *target = VarState(source->type(), *reg);
+ }
+}
+
} // namespace
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
uint32_t num_locals,
- uint32_t arity) {
+ uint32_t arity,
+ uint32_t stack_depth) {
+ // |------locals------|---(in between)----|--(discarded)--|----merge----|
+ // <-- num_locals --> <-- stack_depth -->^stack_base <-- arity -->
+
+ uint32_t stack_base = stack_depth + num_locals;
+ uint32_t target_height = stack_base + arity;
+ uint32_t discarded = source.stack_height() - target_height;
DCHECK(stack_state.empty());
+
DCHECK_GE(source.stack_height(), stack_base);
- stack_state.resize(stack_base + arity, VarState(kWasmStmt));
-
- // |------locals------|--(in between)--|--(discarded)--|----merge----|
- // <-- num_locals --> ^stack_base <-- arity -->
-
- // First, initialize merge slots and locals. Keep them in the registers which
- // are being used in {source}, but avoid using a register multiple times. Use
- // unused registers where necessary and possible.
- for (int range = 0; range < 2; ++range) {
- auto src_idx = range ? 0 : source.stack_state.size() - arity;
- auto src_end = range ? num_locals : source.stack_state.size();
- auto dst_idx = range ? 0 : stack_state.size() - arity;
- for (; src_idx < src_end; ++src_idx, ++dst_idx) {
- auto& dst = stack_state[dst_idx];
- auto& src = source.stack_state[src_idx];
- // Just initialize to any register; will be overwritten before use.
- LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
- RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
- if (src.is_reg() && is_free(src.reg())) {
- reg = src.reg();
- } else if (has_unused_register(rc)) {
- reg = unused_register(rc);
- } else {
- // Make this a stack slot.
- dst = VarState(src.type());
- continue;
- }
- dst = VarState(src.type(), reg);
- inc_used(reg);
- }
+ stack_state.resize_no_init(target_height);
+
+ const VarState* source_begin = source.stack_state.data();
+ VarState* target_begin = stack_state.data();
+
+ // Try to keep locals and the merge region in their registers. Register used
+ // multiple times need to be copied to another free register. Compute the list
+ // of used registers.
+ LiftoffRegList used_regs;
+ for (auto& src : VectorOf(source_begin, num_locals)) {
+ if (src.is_reg()) used_regs.set(src.reg());
+ }
+ for (auto& src : VectorOf(source_begin + stack_base + discarded, arity)) {
+ if (src.is_reg()) used_regs.set(src.reg());
}
+
+ // Initialize the merge region. If this region moves, try to turn stack slots
+ // into registers since we need to load the value anyways.
+ MergeKeepStackSlots keep_merge_stack_slots =
+ discarded == 0 ? kKeepStackSlots : kTurnStackSlotsIntoRegisters;
+ InitMergeRegion(this, source_begin + stack_base + discarded,
+ target_begin + stack_base, arity, keep_merge_stack_slots,
+ kConstantsNotAllowed, kNoReuseRegisters, used_regs);
+
+ // Initialize the locals region. Here, stack slots stay stack slots (because
+ // they do not move). Try to keep register in registers, but avoid duplicates.
+ InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
+ kConstantsNotAllowed, kNoReuseRegisters, used_regs);
+ // Sanity check: All the {used_regs} are really in use now.
+ DCHECK_EQ(used_regs, used_registers & used_regs);
+
// Last, initialize the section in between. Here, constants are allowed, but
// registers which are already used for the merge region or locals must be
- // spilled.
- for (uint32_t i = num_locals; i < stack_base; ++i) {
- auto& dst = stack_state[i];
- auto& src = source.stack_state[i];
- if (src.is_reg()) {
- if (is_used(src.reg())) {
- // Make this a stack slot.
- dst = VarState(src.type());
- } else {
- dst = VarState(src.type(), src.reg());
- inc_used(src.reg());
- }
- } else if (src.is_const()) {
- dst = src;
- } else {
- DCHECK(src.is_stack());
- // Make this a stack slot.
- dst = VarState(src.type());
- }
- }
- last_spilled_regs = source.last_spilled_regs;
+ // moved to other registers or spilled. If a register appears twice in the
+ // source region, ensure to use the same register twice in the target region.
+ InitMergeRegion(this, source_begin + num_locals, target_begin + num_locals,
+ stack_depth, kKeepStackSlots, kConstantsAllowed,
+ kReuseRegisters, used_regs);
}
-void LiftoffAssembler::CacheState::Steal(CacheState& source) {
+void LiftoffAssembler::CacheState::Steal(const CacheState& source) {
// Just use the move assignment operator.
*this = std::move(source);
}
@@ -346,9 +486,9 @@ constexpr AssemblerOptions DefaultLiftoffOptions() {
// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
// size.
-LiftoffAssembler::LiftoffAssembler()
- : TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
- CodeObjectRequired::kNo) {
+LiftoffAssembler::LiftoffAssembler(std::unique_ptr<AssemblerBuffer> buffer)
+ : TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo,
+ std::move(buffer)) {
set_abort_hard(true); // Avoid calls to Abort.
}
@@ -383,18 +523,20 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
UNREACHABLE();
}
-void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
- DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
+void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
+ const CacheState& source) {
+ DCHECK_EQ(source.stack_height(), target.stack_height());
// TODO(clemensh): Reuse the same StackTransferRecipe object to save some
// allocations.
StackTransferRecipe transfers(this);
- for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
- transfers.TransferStackSlot(target, i, i);
+ for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
+ transfers.TransferStackSlot(target, i, source, i);
}
}
-void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
- // Before: ----------------|------ pop_count -----|--- arity ---|
+void LiftoffAssembler::MergeStackWith(const CacheState& target,
+ uint32_t arity) {
+ // Before: ----------------|----- (discarded) ----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
// ^ ^target_stack_height
@@ -407,10 +549,11 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
for (uint32_t i = 0; i < target_stack_base; ++i) {
- transfers.TransferStackSlot(target, i, i);
+ transfers.TransferStackSlot(target, i, cache_state_, i);
}
for (uint32_t i = 0; i < arity; ++i) {
- transfers.TransferStackSlot(target, target_stack_base + i, stack_base + i);
+ transfers.TransferStackSlot(target, target_stack_base + i, cache_state_,
+ stack_base + i);
}
}
@@ -556,8 +699,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
stack_transfers.Execute();
// Pop parameters from the value stack.
- auto stack_end = cache_state_.stack_state.end();
- cache_state_.stack_state.erase(stack_end - num_params, stack_end);
+ cache_state_.stack_state.pop_back(num_params);
// Reset register use counters.
cache_state_.reset_used_registers();
@@ -620,6 +762,22 @@ void LiftoffAssembler::ParallelRegisterMove(
}
}
+void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
+ // We do not support multi-value yet.
+ DCHECK_EQ(1, sig->return_count());
+ ValueType return_type = sig->GetReturn(0);
+ StackTransferRecipe stack_transfers(this);
+ LiftoffRegister return_reg =
+ needs_reg_pair(return_type)
+ ? LiftoffRegister::ForPair(kGpReturnRegisters[0],
+ kGpReturnRegisters[1])
+ : reg_class_for(return_type) == kGpReg
+ ? LiftoffRegister(kGpReturnRegisters[0])
+ : LiftoffRegister(kFpReturnRegisters[0]);
+ stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
+ cache_state_.stack_height() - 1);
+}
+
#ifdef ENABLE_SLOW_DCHECKS
bool LiftoffAssembler::ValidateCacheState() const {
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
@@ -645,7 +803,7 @@ bool LiftoffAssembler::ValidateCacheState() const {
<< PrintCollection(register_use_count) << "\n";
os << "found: used_regs " << cache_state_.used_registers << ", counts "
<< PrintCollection(cache_state_.register_use_count) << "\n";
- os << "Use --trace-liftoff to debug.";
+ os << "Use --trace-wasm-decoder and --trace-liftoff to debug.";
FATAL("%s", os.str().c_str());
}
#endif
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index 6476f0c22d3..3ff60a42ab0 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -9,6 +9,7 @@
#include <memory>
#include "src/base/bits.h"
+#include "src/base/small-vector.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
@@ -35,7 +36,7 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr uint32_t kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr =
- kPointerSize == 8 ? kWasmI64 : kWasmI32;
+ kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
class VarState {
public:
@@ -113,16 +114,13 @@ class LiftoffAssembler : public TurboAssembler {
struct CacheState {
// Allow default construction, move construction, and move assignment.
CacheState() = default;
- CacheState(CacheState&&) = default;
- CacheState& operator=(CacheState&&) = default;
+ CacheState(CacheState&&) V8_NOEXCEPT = default;
+ CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
- // TODO(clemensh): Improve memory management here; avoid std::vector.
- std::vector<VarState> stack_state;
+ base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
- // TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
- uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
@@ -232,9 +230,9 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
- uint32_t arity);
+ uint32_t arity, uint32_t stack_depth);
- void Steal(CacheState& source);
+ void Steal(const CacheState& source);
void Split(const CacheState& source);
@@ -244,12 +242,12 @@ class LiftoffAssembler : public TurboAssembler {
private:
// Make the copy assignment operator private (to be used from {Split()}).
- CacheState& operator=(const CacheState&) = default;
+ CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
// Disallow copy construction.
CacheState(const CacheState&) = delete;
};
- LiftoffAssembler();
+ explicit LiftoffAssembler(std::unique_ptr<AssemblerBuffer>);
~LiftoffAssembler() override;
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
@@ -300,8 +298,8 @@ class LiftoffAssembler : public TurboAssembler {
return SpillOneRegister(candidates, pinned);
}
- void MergeFullStackWith(CacheState&);
- void MergeStackWith(CacheState&, uint32_t arity);
+ void MergeFullStackWith(const CacheState& target, const CacheState& source);
+ void MergeStackWith(const CacheState& target, uint32_t arity);
void Spill(uint32_t index);
void SpillLocals();
@@ -338,6 +336,8 @@ class LiftoffAssembler : public TurboAssembler {
};
void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
+ void MoveToReturnRegisters(FunctionSig*);
+
#ifdef ENABLE_SLOW_DCHECKS
// Validate that the register use counts reflect the state of the cache.
bool ValidateCacheState() const;
@@ -359,8 +359,12 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
inline void LoadFromInstance(Register dst, uint32_t offset, int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
+ inline void LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LiftoffRegList pinned);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
@@ -380,9 +384,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void Spill(uint32_t index, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
- // 4 bytes on the stack holding half of a 64-bit value. The two half_indexes
- // corresponding to slot {index} are {2*index} and {2*index-1}.
- inline void FillI64Half(Register, uint32_t half_index);
+ // 4 bytes on the stack holding half of a 64-bit value.
+ inline void FillI64Half(Register, uint32_t index, RegPairHalf);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
@@ -447,7 +450,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_to_intptr(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
@@ -455,7 +458,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
@@ -463,7 +466,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
@@ -471,7 +474,7 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
} else {
emit_i32_shr(dst, src, amount);
@@ -713,7 +716,7 @@ class LiftoffStackSlots {
private:
struct Slot {
// Allow move construction.
- Slot(Slot&&) = default;
+ Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
RegPairHalf half)
: src_(src), src_index_(src_index), half_(half) {}
@@ -725,8 +728,10 @@ class LiftoffStackSlots {
RegPairHalf half_;
};
- std::vector<Slot> slots_;
+ base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index d15e0c4ecff..8c5203479ef 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -10,9 +10,13 @@
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
+#include "src/interface-descriptors.h"
+#include "src/log.h"
#include "src/macro-assembler-inl.h"
#include "src/objects/smi.h"
+#include "src/ostreams.h"
#include "src/tracing/trace-event.h"
+#include "src/utils.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
@@ -40,7 +44,7 @@ namespace {
if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
} while (false)
-#define WASM_INSTANCE_OBJECT_OFFSET(name) \
+#define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \
ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
template <int expected_size, int actual_size>
@@ -50,14 +54,19 @@ struct assert_field_size {
static constexpr int size = actual_size;
};
-#define WASM_INSTANCE_OBJECT_SIZE(name) \
- (WasmInstanceObject::k##name##OffsetEnd - \
- WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
+#define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
+ FIELD_SIZE(WasmInstanceObject::k##name##Offset)
-#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
- __ LoadFromInstance( \
- dst, WASM_INSTANCE_OBJECT_OFFSET(name), \
- assert_field_size<WASM_INSTANCE_OBJECT_SIZE(name), load_size>::size);
+#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
+ __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
+ assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
+ load_size>::size);
+
+#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \
+ static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
+ "field in WasmInstance does not have the expected size"); \
+ __ LoadTaggedPointerFromInstance(dst, \
+ WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef DEBUG
#define DEBUG_CODE_COMMENT(str) \
@@ -69,7 +78,7 @@ struct assert_field_size {
#endif
constexpr LoadType::LoadTypeValue kPointerLoadType =
- kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+ kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
@@ -100,8 +109,9 @@ class MovableLabel {
compiler::CallDescriptor* GetLoweredCallDescriptor(
Zone* zone, compiler::CallDescriptor* call_desc) {
- return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
- : call_desc;
+ return kSystemPointerSize == 4
+ ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
+ : call_desc;
}
constexpr ValueType kSupportedTypesArr[] = {kWasmI32, kWasmI64, kWasmF32,
@@ -121,12 +131,16 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState state;
};
- struct Control : public ControlWithNamedConstructors<Control, Value> {
- MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
-
+ struct Control : public ControlBase<Value> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
+
+ template <typename... Args>
+ explicit Control(Args&&... args) V8_NOEXCEPT
+ : ControlBase(std::forward<Args>(args)...) {}
};
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
@@ -151,8 +165,10 @@ class LiftoffCompiler {
};
LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
- CompilationEnv* env, Zone* compilation_zone)
- : descriptor_(
+ CompilationEnv* env, Zone* compilation_zone,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : asm_(std::move(buffer)),
+ descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
compilation_zone_(compilation_zone),
@@ -181,7 +197,8 @@ class LiftoffCompiler {
void unsupported(FullDecoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
- decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
+ decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
+ reason);
BindUnboundLabels(decoder);
}
@@ -229,7 +246,7 @@ class LiftoffCompiler {
}
void StartFunction(FullDecoder* decoder) {
- int num_locals = decoder->NumLocals();
+ int num_locals = decoder->num_locals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
__ set_local_type(i, decoder->GetLocalType(i));
@@ -298,7 +315,7 @@ class LiftoffCompiler {
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
Register limit_address = __ GetUnusedRegister(kGpReg).gp();
- LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kPointerSize);
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
}
@@ -370,7 +387,6 @@ class LiftoffCompiler {
UNIMPLEMENTED();
}
}
- block->label_state.stack_base = __ num_locals();
// The function-prologue stack check is associated with position 0, which
// is never a position of any instruction in the function.
@@ -409,7 +425,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool.position), false);
__ CallRuntimeStub(ool.stub);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
@@ -445,13 +461,9 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
}
- void Block(FullDecoder* decoder, Control* block) {
- block->label_state.stack_base = __ cache_state()->stack_height();
- }
+ void Block(FullDecoder* decoder, Control* block) {}
void Loop(FullDecoder* decoder, Control* loop) {
- loop->label_state.stack_base = __ cache_state()->stack_height();
-
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
@@ -473,6 +485,10 @@ class LiftoffCompiler {
unsupported(decoder, "try");
}
+ void Catch(FullDecoder* decoder, Control* block, Value* exception) {
+ unsupported(decoder, "catch");
+ }
+
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -488,31 +504,69 @@ class LiftoffCompiler {
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
value);
- if_block->label_state.stack_base = __ cache_state()->stack_height();
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
}
void FallThruTo(FullDecoder* decoder, Control* c) {
if (c->end_merge.reached) {
- __ MergeFullStackWith(c->label_state);
- } else if (c->is_onearmed_if()) {
- c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- c->br_merge()->arity);
- __ MergeFullStackWith(c->label_state);
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
} else {
c->label_state.Split(*__ cache_state());
}
TraceCacheState(decoder);
}
- void PopControl(FullDecoder* decoder, Control* c) {
- if (!c->is_loop() && c->end_merge.reached) {
+ void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
+ DCHECK(c->is_onearmed_if());
+ if (c->end_merge.reached) {
+ // Someone already merged to the end of the if. Merge both arms into that.
+ if (c->reachable()) {
+ // Merge the if state into the end state.
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ emit_jump(c->label.get());
+ }
+ // Merge the else state into the end state.
+ __ bind(c->else_state->label.get());
+ __ MergeFullStackWith(c->label_state, c->else_state->state);
+ __ cache_state()->Steal(c->label_state);
+ } else if (c->reachable()) {
+ // No merge yet at the end of the if, but we need to create a merge for
+ // the both arms of this if. Thus init the merge point from the else
+ // state, then merge the if state into that.
+ DCHECK_EQ(0, c->end_merge.arity);
+ c->label_state.InitMerge(c->else_state->state, __ num_locals(), 0,
+ c->stack_depth);
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ emit_jump(c->label.get());
+ // Merge the else state into the end state.
+ __ bind(c->else_state->label.get());
+ __ MergeFullStackWith(c->label_state, c->else_state->state);
__ cache_state()->Steal(c->label_state);
+ } else {
+ // No merge needed, just continue with the else state.
+ __ bind(c->else_state->label.get());
+ __ cache_state()->Steal(c->else_state->state);
}
- if (!c->label.get()->is_bound()) {
- __ bind(c->label.get());
+ }
+
+ void PopControl(FullDecoder* decoder, Control* c) {
+ if (c->is_loop()) return; // A loop just falls through.
+ if (c->is_onearmed_if()) {
+ // Special handling for one-armed ifs.
+ FinishOneArmedIf(decoder, c);
+ } else if (c->end_merge.reached) {
+ // There is a merge already. Merge our state into that, then continue with
+ // that state.
+ if (c->reachable()) {
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ }
+ __ cache_state()->Steal(c->label_state);
+ } else {
+ // No merge, just continue with our current state.
}
+
+ if (!c->label.get()->is_bound()) __ bind(c->label.get());
}
void EndControl(FullDecoder* decoder, Control* c) {}
@@ -614,8 +668,8 @@ class LiftoffCompiler {
__ PushRegister(dst_type, dst);
}
- void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
- const Value& value, Value* result) {
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
+ Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
EmitUnOp<kWasmI32, kWasmI32>( \
@@ -768,8 +822,8 @@ class LiftoffCompiler {
}
}
- void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
- const Value& lhs, const Value& rhs, Value* result) {
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
+ const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI32, kWasmI32>( \
@@ -1059,30 +1113,19 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back();
}
- void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
- if (implicit) {
- DCHECK_EQ(1, decoder->control_depth());
- Control* func_block = decoder->control_at(0);
- __ bind(func_block->label.get());
- __ cache_state()->Steal(func_block->label_state);
- }
- if (!values.is_empty()) {
- if (values.size() > 1) return unsupported(decoder, "multi-return");
- LiftoffRegister reg = __ PopToRegister();
- LiftoffRegister return_reg =
- kNeedI64RegPair && values[0].type == kWasmI64
- ? LiftoffRegister::ForPair(kGpReturnRegisters[0],
- kGpReturnRegisters[1])
- : reg_class_for(values[0].type) == kGpReg
- ? LiftoffRegister(kGpReturnRegisters[0])
- : LiftoffRegister(kFpReturnRegisters[0]);
- if (reg != return_reg) __ Move(return_reg, reg, values[0].type);
- }
+ void ReturnImpl(FullDecoder* decoder) {
+ size_t num_returns = decoder->sig_->return_count();
+ if (num_returns > 1) return unsupported(decoder, "multi-return");
+ if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
+ void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) {
+ ReturnImpl(decoder);
+ }
+
void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
@@ -1160,12 +1203,12 @@ class LiftoffCompiler {
LiftoffRegList& pinned, uint32_t* offset) {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
if (global->mutability && global->imported) {
- LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerSize);
+ LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
global->index * sizeof(Address), kPointerLoadType, pinned);
*offset = 0;
} else {
- LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerSize);
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
*offset = global->offset;
}
return addr;
@@ -1231,23 +1274,32 @@ class LiftoffCompiler {
__ bind(&cont);
}
- void Br(Control* target) {
+ void BrImpl(Control* target) {
if (!target->br_merge()->reached) {
target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
- target->br_merge()->arity);
+ target->br_merge()->arity,
+ target->stack_depth);
}
__ MergeStackWith(target->label_state, target->br_merge()->arity);
__ jmp(target->label.get());
}
- void Br(FullDecoder* decoder, Control* target) { Br(target); }
+ void Br(FullDecoder* decoder, Control* target) { BrImpl(target); }
- void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
+ void BrOrRet(FullDecoder* decoder, uint32_t depth) {
+ if (depth == decoder->control_depth() - 1) {
+ ReturnImpl(decoder);
+ } else {
+ BrImpl(decoder->control_at(depth));
+ }
+ }
+
+ void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
Label cont_false;
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
- Br(target);
+ BrOrRet(decoder, depth);
__ bind(&cont_false);
}
@@ -1260,7 +1312,7 @@ class LiftoffCompiler {
__ jmp(label.get());
} else {
__ bind(label.get());
- Br(decoder->control_at(br_depth));
+ BrOrRet(decoder, br_depth);
}
}
@@ -1317,10 +1369,17 @@ class LiftoffCompiler {
DCHECK(!table_iterator.has_next());
}
- void Else(FullDecoder* decoder, Control* if_block) {
- if (if_block->reachable()) __ emit_jump(if_block->label.get());
- __ bind(if_block->else_state->label.get());
- __ cache_state()->Steal(if_block->else_state->state);
+ void Else(FullDecoder* decoder, Control* c) {
+ if (c->reachable()) {
+ if (!c->end_merge.reached) {
+ c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
+ c->end_merge.arity, c->stack_depth);
+ }
+ __ MergeFullStackWith(c->label_state, *__ cache_state());
+ __ emit_jump(c->label.get());
+ }
+ __ bind(c->else_state->label.get());
+ __ cache_state()->Steal(c->else_state->state);
}
Label* AddOutOfLineTrap(WasmCodePosition position,
@@ -1339,8 +1398,8 @@ class LiftoffCompiler {
// (a jump to the trap was generated then); return false otherwise.
bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint32_t offset, Register index, LiftoffRegList pinned) {
- const bool statically_oob = access_size > env_->max_memory_size ||
- offset > env_->max_memory_size - access_size;
+ const bool statically_oob =
+ !IsInBounds(offset, access_size, env_->max_memory_size);
if (!statically_oob &&
(FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
@@ -1374,9 +1433,9 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
Register mem_size = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
} else {
__ LoadConstant(end_offset_reg,
@@ -1464,9 +1523,9 @@ class LiftoffCompiler {
__ TurboAssembler::Move(kContextRegister,
Smi::FromInt(Context::kNoContext));
Register centry = kJavaScriptCallCodeStartRegister;
- LOAD_INSTANCE_FIELD(centry, CEntryStub, kPointerSize);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(centry, CEntryStub);
__ CallRuntimeWithCEntry(runtime_function, centry);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
}
@@ -1486,7 +1545,7 @@ class LiftoffCompiler {
Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
__ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
__ emit_ptrsize_add(index, index, tmp);
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kPointerSize);
+ LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
return index;
@@ -1507,7 +1566,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
RegClass rc = reg_class_for(value_type);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
@@ -1541,7 +1600,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, pinned);
DEBUG_CODE_COMMENT("Store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
uint32_t protected_store_pc = 0;
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
@@ -1560,7 +1619,7 @@ class LiftoffCompiler {
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg).gp();
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerSize);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shr(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
}
@@ -1587,7 +1646,7 @@ class LiftoffCompiler {
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
if (kReturnRegister0 != result.gp()) {
@@ -1606,7 +1665,6 @@ class LiftoffCompiler {
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return"))
return;
- if (DidAssemblerBailout(decoder)) return;
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
@@ -1621,17 +1679,17 @@ class LiftoffCompiler {
Register imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
- kPointerSize);
+ kSystemPointerSize);
__ Load(LiftoffRegister(target), imported_targets, no_reg,
imm.index * sizeof(Address), kPointerLoadType, pinned);
Register imported_function_refs = tmp;
- LOAD_INSTANCE_FIELD(imported_function_refs, ImportedFunctionRefs,
- kPointerSize);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
+ ImportedFunctionRefs);
Register imported_function_ref = tmp;
- __ Load(LiftoffRegister(imported_function_ref), imported_function_refs,
- no_reg, ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index),
- kPointerLoadType, pinned);
+ __ LoadTaggedPointer(
+ imported_function_ref, imported_function_refs, no_reg,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
@@ -1640,7 +1698,7 @@ class LiftoffCompiler {
__ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1655,7 +1713,7 @@ class LiftoffCompiler {
Address addr = static_cast<Address>(imm.index);
__ CallNativeWasmCode(addr);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1730,7 +1788,7 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerSize);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
__ LoadConstant(LiftoffRegister(tmp_const),
WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
// TODO(wasm): use a emit_i32_shli() instead of a multiply.
@@ -1748,24 +1806,27 @@ class LiftoffCompiler {
LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
DEBUG_CODE_COMMENT("Execute indirect call");
- if (kPointerSize == 8) {
+ if (kSystemPointerSize == 8) {
// {index} has already been multiplied by 4. Multiply by another 2.
__ LoadConstant(LiftoffRegister(tmp_const), WasmValue(2));
__ emit_i32_mul(index, index, tmp_const);
}
+ // Load the instance from {instance->ift_instances[key]}
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
+ // {index} has already been multiplied by kSystemPointerSizeLog2.
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ __ LoadTaggedPointer(tmp_const, table, index,
+ ObjectAccess::ElementOffsetInTaggedFixedArray(0),
+ pinned);
+ Register* explicit_instance = &tmp_const;
+
// Load the target from {instance->ift_targets[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerSize);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
+ kSystemPointerSize);
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
- // Load the instance from {instance->ift_instances[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableRefs, kPointerSize);
- __ Load(LiftoffRegister(tmp_const), table, index,
- ObjectAccess::ElementOffsetInTaggedFixedArray(0), kPointerLoadType,
- pinned);
- Register* explicit_instance = &tmp_const;
-
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -1778,7 +1839,7 @@ class LiftoffCompiler {
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
@@ -1808,24 +1869,21 @@ class LiftoffCompiler {
const Vector<Value>& args) {
unsupported(decoder, "throw");
}
- void Rethrow(FullDecoder* decoder, Control* block) {
+ void Rethrow(FullDecoder* decoder, const Value& exception) {
unsupported(decoder, "rethrow");
}
- void CatchException(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm,
- Control* block, Vector<Value> caught_values) {
- unsupported(decoder, "catch");
- }
- void CatchAll(FullDecoder* decoder, Control* block) {
- unsupported(decoder, "catch-all");
+ void BrOnException(FullDecoder* decoder, const Value& exception,
+ const ExceptionIndexImmediate<validate>& imm,
+ uint32_t depth, Vector<Value> values) {
+ unsupported(decoder, "br_on_exn");
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
unsupported(decoder, "atomicop");
}
void MemoryInit(FullDecoder* decoder,
- const MemoryInitImmediate<validate>& imm,
- Vector<Value> args) {
+ const MemoryInitImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
unsupported(decoder, "memory.init");
}
void MemoryDrop(FullDecoder* decoder,
@@ -1833,13 +1891,13 @@ class LiftoffCompiler {
unsupported(decoder, "memory.drop");
}
void MemoryCopy(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
- Vector<Value> args) {
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
unsupported(decoder, "memory.copy");
}
void MemoryFill(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
- Vector<Value> args) {
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& value, const Value& size) {
unsupported(decoder, "memory.fill");
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
@@ -1894,10 +1952,9 @@ class LiftoffCompiler {
} // namespace
-bool LiftoffCompilationUnit::ExecuteCompilation(CompilationEnv* env,
- const FunctionBody& func_body,
- Counters* counters,
- WasmFeatures* detected) {
+WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
+ CompilationEnv* env, const FunctionBody& func_body, Counters* counters,
+ WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
@@ -1910,17 +1967,19 @@ bool LiftoffCompilationUnit::ExecuteCompilation(CompilationEnv* env,
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, counters->liftoff_compile_time());
+ std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
+ wasm::WasmInstructionBuffer::New();
WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
- &zone, module, wasm_unit_->native_module_->enabled_features(), detected,
- func_body, call_descriptor, env, &zone);
+ &zone, module, env->enabled_features, detected, func_body,
+ call_descriptor, env, &zone, instruction_buffer->CreateView());
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) return false; // validation error
+ if (decoder.failed()) return WasmCompilationResult{decoder.error()};
if (!compiler->ok()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
- return false;
+ return WasmCompilationResult{WasmError{0, "Liftoff bailout"}};
}
counters->liftoff_compiled_functions()->Increment();
@@ -1933,28 +1992,24 @@ bool LiftoffCompilationUnit::ExecuteCompilation(CompilationEnv* env,
static_cast<unsigned>(func_body.end - func_body.start), compile_ms);
}
- CodeDesc desc;
- compiler->GetCode(&desc);
- OwnedVector<byte> source_positions = compiler->GetSourcePositionTable();
- OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions =
- compiler->GetProtectedInstructions();
- uint32_t frame_slot_count = compiler->GetTotalFrameSlotCount();
- int safepoint_table_offset = compiler->GetSafepointTableOffset();
-
- WasmCode* code = wasm_unit_->native_module_->AddCode(
- wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
- 0, std::move(protected_instructions), std::move(source_positions),
- WasmCode::kFunction, WasmCode::kLiftoff);
- wasm_unit_->SetResult(code, counters);
+ WasmCompilationResult result;
+ compiler->GetCode(&result.code_desc);
+ result.instr_buffer = instruction_buffer->ReleaseBuffer();
+ result.source_positions = compiler->GetSourcePositionTable();
+ result.protected_instructions = compiler->GetProtectedInstructions();
+ result.frame_slot_count = compiler->GetTotalFrameSlotCount();
+ result.safepoint_table_offset = compiler->GetSafepointTableOffset();
- return true;
+ DCHECK(result.succeeded());
+ return result;
}
#undef __
#undef TRACE
-#undef WASM_INSTANCE_OBJECT_OFFSET
-#undef WASM_INSTANCE_OBJECT_SIZE
+#undef WASM_INSTANCE_OBJECT_FIELD_OFFSET
+#undef WASM_INSTANCE_OBJECT_FIELD_SIZE
#undef LOAD_INSTANCE_FIELD
+#undef LOAD_TAGGED_PTR_INSTANCE_FIELD
#undef DEBUG_CODE_COMMENT
} // namespace wasm
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.h b/chromium/v8/src/wasm/baseline/liftoff-compiler.h
index 9aeed016287..e1fb79138f7 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.h
@@ -6,6 +6,7 @@
#define V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
#include "src/base/macros.h"
+#include "src/wasm/function-compiler.h"
namespace v8 {
namespace internal {
@@ -16,7 +17,7 @@ namespace wasm {
struct CompilationEnv;
struct FunctionBody;
-class WasmCompilationUnit;
+class NativeModule;
struct WasmFeatures;
class LiftoffCompilationUnit final {
@@ -24,8 +25,9 @@ class LiftoffCompilationUnit final {
explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
: wasm_unit_(wasm_unit) {}
- bool ExecuteCompilation(CompilationEnv*, const FunctionBody&, Counters*,
- WasmFeatures* detected);
+ WasmCompilationResult ExecuteCompilation(CompilationEnv*, const FunctionBody&,
+ Counters*,
+ WasmFeatures* detected_features);
private:
WasmCompilationUnit* const wasm_unit_;
diff --git a/chromium/v8/src/wasm/baseline/liftoff-register.h b/chromium/v8/src/wasm/baseline/liftoff-register.h
index da0f00ab51c..267a0055470 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-register.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-register.h
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace wasm {
-static constexpr bool kNeedI64RegPair = kPointerSize == 4;
+static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
enum RegClass : uint8_t {
kGpReg,
@@ -26,7 +26,7 @@ enum RegClass : uint8_t {
kNoReg = kGpRegPair + kNeedI64RegPair
};
-enum RegPairHalf : uint8_t { kLowWord, kHighWord };
+enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
static inline constexpr bool needs_reg_pair(ValueType type) {
return kNeedI64RegPair && type == kWasmI64;
@@ -92,7 +92,7 @@ class LiftoffRegister {
DCHECK_EQ(reg, fp());
}
- static LiftoffRegister from_liftoff_code(int code) {
+ static LiftoffRegister from_liftoff_code(uint32_t code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
DCHECK_EQ(code, static_cast<storage_t>(code));
@@ -153,7 +153,7 @@ class LiftoffRegister {
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- uint32_t liftoff_code() const {
+ int liftoff_code() const {
DCHECK(is_gp() || is_fp());
return code_;
}
@@ -195,6 +195,8 @@ inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
class LiftoffRegList {
public:
+ class Iterator;
+
static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
using storage_t = std::conditional<
@@ -265,13 +267,13 @@ class LiftoffRegList {
LiftoffRegister GetFirstRegSet() const {
DCHECK(!is_empty());
- unsigned first_code = base::bits::CountTrailingZeros(regs_);
+ int first_code = base::bits::CountTrailingZeros(regs_);
return LiftoffRegister::from_liftoff_code(first_code);
}
LiftoffRegister GetLastRegSet() const {
DCHECK(!is_empty());
- unsigned last_code =
+ int last_code =
8 * sizeof(regs_) - 1 - base::bits::CountLeadingZeros(regs_);
return LiftoffRegister::from_liftoff_code(last_code);
}
@@ -282,6 +284,12 @@ class LiftoffRegList {
return FromBits(regs_ & ~mask.regs_);
}
+ RegList GetGpList() { return regs_ & kGpMask; }
+ RegList GetFpList() { return (regs_ & kFpMask) >> kAfterMaxLiftoffGpRegCode; }
+
+ inline Iterator begin() const;
+ inline Iterator end() const;
+
static LiftoffRegList FromBits(storage_t bits) {
DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
return LiftoffRegList(bits);
@@ -300,9 +308,6 @@ class LiftoffRegList {
return list;
}
- RegList GetGpList() { return regs_ & kGpMask; }
- RegList GetFpList() { return (regs_ & kFpMask) >> kAfterMaxLiftoffGpRegCode; }
-
private:
storage_t regs_ = 0;
@@ -316,8 +321,32 @@ static constexpr LiftoffRegList kGpCacheRegList =
static constexpr LiftoffRegList kFpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
+class LiftoffRegList::Iterator {
+ public:
+ LiftoffRegister operator*() { return remaining_.GetFirstRegSet(); }
+ Iterator& operator++() {
+ remaining_.clear(remaining_.GetFirstRegSet());
+ return *this;
+ }
+ bool operator==(Iterator other) { return remaining_ == other.remaining_; }
+ bool operator!=(Iterator other) { return remaining_ != other.remaining_; }
+
+ private:
+ explicit Iterator(LiftoffRegList remaining) : remaining_(remaining) {}
+ friend class LiftoffRegList;
+
+ LiftoffRegList remaining_;
+};
+
+LiftoffRegList::Iterator LiftoffRegList::begin() const {
+ return Iterator{*this};
+}
+LiftoffRegList::Iterator LiftoffRegList::end() const {
+ return Iterator{LiftoffRegList{}};
+}
+
static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
- return rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
+ return rc == kFpReg ? kFpCacheRegList : kGpCacheRegList;
}
inline std::ostream& operator<<(std::ostream& os, LiftoffRegList reglist) {
diff --git a/chromium/v8/src/wasm/baseline/mips/OWNERS b/chromium/v8/src/wasm/baseline/mips/OWNERS
index c653ce404d6..b455d9ef29d 100644
--- a/chromium/v8/src/wasm/baseline/mips/OWNERS
+++ b/chromium/v8/src/wasm/baseline/mips/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
+arikalo@wavecomp.com
+prudic@wavecomp.com
skovacevic@wavecomp.com
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 268e6b56a1d..cb66406de4a 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -34,8 +34,10 @@ inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
-inline MemOperand GetHalfStackSlot(uint32_t half_index) {
- int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize + half_offset;
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
@@ -255,9 +257,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
- buffer_ + offset, kAvailableSpace,
- CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
// If bytes can be represented as 16bit, addiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, addu will be generated.
@@ -301,6 +303,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
lw(dst, MemOperand(dst, offset));
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
sw(instance, liftoff::GetInstanceOperand());
}
@@ -309,6 +316,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -467,7 +483,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- int32_t offset = kPointerSize * (caller_slot_idx + 1);
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
liftoff::Load(this, dst, fp, offset, type);
}
@@ -499,8 +515,8 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
sw(reg.gp(), dst);
break;
case kWasmI64:
- sw(reg.low_gp(), dst);
- sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ sw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ sw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
swc1(reg.fp(), dst);
@@ -531,8 +547,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
TurboAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word));
- sw(tmp.low_gp(), dst);
- sw(tmp.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ sw(tmp.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ sw(tmp.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
}
default:
@@ -550,8 +566,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
lw(reg.gp(), src);
break;
case kWasmI64:
- lw(reg.low_gp(), src);
- lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ lw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ lw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
lwc1(reg.fp(), src);
@@ -564,8 +580,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
- lw(reg, liftoff::GetHalfStackSlot(half_index));
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ lw(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
@@ -1306,11 +1323,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
- unsigned offset = num_gp_regs * kPointerSize;
+ unsigned offset = num_gp_regs * kSystemPointerSize;
addiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
- offset -= kPointerSize;
+ offset -= kSystemPointerSize;
sw(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
@@ -1347,13 +1364,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
lw(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
- gp_offset += kPointerSize;
+ gp_offset += kSystemPointerSize;
}
addiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
@@ -1438,12 +1456,11 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
- liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
+ liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
asm_->push(kScratchReg);
}
asm_->lw(kScratchReg,
- liftoff::GetHalfStackSlot(2 * slot.src_index_ +
- (slot.half_ == kLowWord ? 0 : 1)));
+ liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->push(kScratchReg);
break;
}
diff --git a/chromium/v8/src/wasm/baseline/mips64/OWNERS b/chromium/v8/src/wasm/baseline/mips64/OWNERS
index c653ce404d6..b455d9ef29d 100644
--- a/chromium/v8/src/wasm/baseline/mips64/OWNERS
+++ b/chromium/v8/src/wasm/baseline/mips64/OWNERS
@@ -1,2 +1,3 @@
-ibogosavljevic@wavecomp.com
+arikalo@wavecomp.com
+prudic@wavecomp.com
skovacevic@wavecomp.com
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 1ca6d7b2a12..6f9de8189c5 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -72,18 +72,18 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) {
case kWasmI32:
- assm->daddiu(sp, sp, -kPointerSize);
+ assm->daddiu(sp, sp, -kSystemPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
case kWasmI64:
assm->push(reg.gp());
break;
case kWasmF32:
- assm->daddiu(sp, sp, -kPointerSize);
+ assm->daddiu(sp, sp, -kSystemPointerSize);
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
case kWasmF64:
- assm->daddiu(sp, sp, -kPointerSize);
+ assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
default:
@@ -222,9 +222,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
- buffer_ + offset, kAvailableSpace,
- CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
// If bytes can be represented as 16bit, daddiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, daddu will be generated.
@@ -267,6 +267,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
sd(instance, liftoff::GetInstanceOperand());
}
@@ -275,6 +280,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
ld(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI64Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -392,7 +406,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
}
@@ -482,7 +496,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
@@ -1158,11 +1172,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
- unsigned offset = num_gp_regs * kPointerSize;
+ unsigned offset = num_gp_regs * kSystemPointerSize;
daddiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
- offset -= kPointerSize;
+ offset -= kSystemPointerSize;
sd(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
@@ -1199,13 +1213,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
ld(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
- gp_offset += kPointerSize;
+ gp_offset += kSystemPointerSize;
}
daddiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 7c77fa4a2b4..d6c372e80f1 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -37,6 +37,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
BAILOUT("LoadFromInstance");
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ BAILOUT("LoadTaggedPointerFromInstance");
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
@@ -45,6 +50,13 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ BAILOUT("LoadTaggedPointer");
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -93,7 +105,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
BAILOUT("Fill");
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("FillI64Half");
}
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index b58d1c47dd5..9680d9664fb 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -37,6 +37,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
BAILOUT("LoadFromInstance");
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ BAILOUT("LoadTaggedPointerFromInstance");
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
BAILOUT("SpillInstance");
}
@@ -45,6 +50,13 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
BAILOUT("FillInstanceInto");
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ BAILOUT("LoadTaggedPointer");
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -93,7 +105,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
BAILOUT("Fill");
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("FillI64Half");
}
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index f32761186e0..35a2e855f13 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
- assm->subp(rsp, Immediate(kPointerSize));
+ assm->subp(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
- assm->subp(rsp, Immediate(kPointerSize));
+ assm->subp(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
@@ -146,8 +146,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
- kAvailableSpace);
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
patching_assembler.sub_sp_32(bytes);
}
@@ -195,6 +196,13 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ DCHECK_LE(offset, kMaxInt);
+ movp(dst, liftoff::GetInstanceOperand());
+ LoadTaggedPointerField(dst, Operand(dst, offset));
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
movp(liftoff::GetInstanceOperand(), instance);
}
@@ -203,6 +211,17 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
movp(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
+ Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ LoadTaggedPointerField(dst, src_op);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -293,19 +312,22 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
+ Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_index, src_index);
- if (cache_state_.has_unused_register(kGpReg)) {
- Fill(LiftoffRegister{kScratchRegister}, src_index, type);
- Spill(dst_index, LiftoffRegister{kScratchRegister}, type);
+ Operand src = liftoff::GetStackSlot(src_index);
+ Operand dst = liftoff::GetStackSlot(dst_index);
+ if (ValueTypes::ElementSizeLog2Of(type) == 2) {
+ movl(kScratchRegister, src);
+ movl(dst, kScratchRegister);
} else {
- pushq(liftoff::GetStackSlot(src_index));
- popq(liftoff::GetStackSlot(dst_index));
+ DCHECK_EQ(3, ValueTypes::ElementSizeLog2Of(type));
+ movq(kScratchRegister, src);
+ movq(dst, kScratchRegister);
}
}
@@ -400,7 +422,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
@@ -413,12 +435,17 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
- if (dst == rhs) {
- negl(dst);
- addl(dst, lhs);
- } else {
+ if (dst != rhs) {
+ // Default path.
if (dst != lhs) movl(dst, lhs);
subl(dst, rhs);
+ } else if (lhs == rhs) {
+ // Degenerate case.
+ xorl(dst, dst);
+ } else {
+ // Emit {dst = lhs + -rhs} if dst == rhs.
+ negl(dst);
+ addl(dst, lhs);
}
}
@@ -1439,8 +1466,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
- ret(static_cast<int>(num_stack_slots * kPointerSize));
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
diff --git a/chromium/v8/src/wasm/compilation-environment.h b/chromium/v8/src/wasm/compilation-environment.h
index e66929d4e30..c6bed6c2e4b 100644
--- a/chromium/v8/src/wasm/compilation-environment.h
+++ b/chromium/v8/src/wasm/compilation-environment.h
@@ -5,15 +5,17 @@
#ifndef V8_WASM_COMPILATION_ENVIRONMENT_H_
#define V8_WASM_COMPILATION_ENVIRONMENT_H_
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-tier.h"
namespace v8 {
namespace internal {
namespace wasm {
class NativeModule;
-class ResultBase;
+class WasmError;
enum RuntimeExceptionSupport : bool {
kRuntimeExceptionSupport = true,
@@ -47,11 +49,15 @@ struct CompilationEnv {
// bytes.
const uint64_t max_memory_size;
+ // Features enabled for this compilation.
+ const WasmFeatures enabled_features;
+
const LowerSimd lower_simd;
constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler,
RuntimeExceptionSupport runtime_exception_support,
+ const WasmFeatures& enabled_features,
LowerSimd lower_simd = kNoLowerSimd)
: module(module),
use_trap_handler(use_trap_handler),
@@ -62,6 +68,7 @@ struct CompilationEnv {
? module->maximum_pages
: kV8MaxWasmMemoryPages) *
uint64_t{kWasmPageSize}),
+ enabled_features(enabled_features),
lower_simd(lower_simd) {}
};
@@ -73,22 +80,41 @@ class WireBytesStorage {
virtual Vector<const uint8_t> GetCode(WireBytesRef) const = 0;
};
+// Callbacks will receive either {kFailedCompilation} or both
+// {kFinishedBaselineCompilation} and {kFinishedTopTierCompilation}, in that
+// order. If tier up is off, both events are delivered right after each other.
+enum class CompilationEvent : uint8_t {
+ kFinishedBaselineCompilation,
+ kFinishedTopTierCompilation,
+ kFailedCompilation,
+
+ // Marker:
+ // After an event >= kFirstFinalEvent, no further events are generated.
+ kFirstFinalEvent = kFinishedTopTierCompilation
+};
+
// The implementation of {CompilationState} lives in module-compiler.cc.
// This is the PIMPL interface to that private class.
class CompilationState {
public:
+ using callback_t = std::function<void(CompilationEvent, const WasmError*)>;
~CompilationState();
void CancelAndWait();
- void SetError(uint32_t func_index, const ResultBase& error_result);
+ void SetError(uint32_t func_index, const WasmError& error);
void SetWireBytesStorage(std::shared_ptr<WireBytesStorage>);
- std::shared_ptr<WireBytesStorage> GetWireBytesStorage();
+ std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const;
+
+ void AddCallback(callback_t);
+
+ bool failed() const;
private:
friend class NativeModule;
+ friend class WasmCompilationUnit;
CompilationState() = delete;
static std::unique_ptr<CompilationState> New(Isolate*, NativeModule*);
diff --git a/chromium/v8/src/wasm/decoder.h b/chromium/v8/src/wasm/decoder.h
index 1b023a955e6..2c555bb4138 100644
--- a/chromium/v8/src/wasm/decoder.h
+++ b/chromium/v8/src/wasm/decoder.h
@@ -173,30 +173,30 @@ class Decoder {
return true;
}
- void error(const char* msg) { errorf(pc_, "%s", msg); }
+ // Do not inline error methods. This has measurable impact on validation time,
+ // see https://crbug.com/910432.
+ void V8_NOINLINE error(const char* msg) { errorf(pc_offset(), "%s", msg); }
+ void V8_NOINLINE error(const uint8_t* pc, const char* msg) {
+ errorf(pc_offset(pc), "%s", msg);
+ }
+ void V8_NOINLINE error(uint32_t offset, const char* msg) {
+ errorf(offset, "%s", msg);
+ }
- void error(const byte* pc, const char* msg) { errorf(pc, "%s", msg); }
+ void V8_NOINLINE PRINTF_FORMAT(3, 4)
+ errorf(uint32_t offset, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(offset, format, args);
+ va_end(args);
+ }
- // Sets internal error state.
- void PRINTF_FORMAT(3, 4) errorf(const byte* pc, const char* format, ...) {
- // Only report the first error.
- if (!ok()) return;
-#if DEBUG
- if (FLAG_wasm_break_on_decoder_error) {
- base::OS::DebugBreak();
- }
-#endif
- constexpr int kMaxErrorMsg = 256;
- EmbeddedVector<char, kMaxErrorMsg> buffer;
- va_list arguments;
- va_start(arguments, format);
- int len = VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- error_msg_.assign(buffer.start(), len);
- DCHECK_GE(pc, start_);
- error_offset_ = static_cast<uint32_t>(pc - start_) + buffer_offset_;
- onFirstError();
+ void V8_NOINLINE PRINTF_FORMAT(3, 4)
+ errorf(const uint8_t* pc, const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(pc_offset(pc), format, args);
+ va_end(args);
}
// Behavior triggered on first error, overridden in subclasses.
@@ -218,10 +218,10 @@ class Decoder {
template <typename T, typename U = typename std::remove_reference<T>::type>
Result<U> toResult(T&& val) {
if (failed()) {
- TRACE("Result error: %s\n", error_msg_.c_str());
- return Result<U>::Error(error_offset_, std::move(error_msg_));
+ TRACE("Result error: %s\n", error_.message().c_str());
+ return Result<U>{error_};
}
- return Result<U>(std::forward<T>(val));
+ return Result<U>{std::forward<T>(val)};
}
// Resets the boundaries of this decoder.
@@ -232,24 +232,30 @@ class Decoder {
pc_ = start;
end_ = end;
buffer_offset_ = buffer_offset;
- error_offset_ = 0;
- error_msg_.clear();
+ error_ = {};
}
void Reset(Vector<const uint8_t> bytes, uint32_t buffer_offset = 0) {
Reset(bytes.begin(), bytes.end(), buffer_offset);
}
- bool ok() const { return error_msg_.empty(); }
+ bool ok() const { return error_.empty(); }
bool failed() const { return !ok(); }
bool more() const { return pc_ < end_; }
+ const WasmError& error() const { return error_; }
const byte* start() const { return start_; }
const byte* pc() const { return pc_; }
- uint32_t position() const { return static_cast<uint32_t>(pc_ - start_); }
- uint32_t pc_offset() const {
- return static_cast<uint32_t>(pc_ - start_) + buffer_offset_;
+ uint32_t V8_INLINE position() const {
+ return static_cast<uint32_t>(pc_ - start_);
}
+ // This needs to be inlined for performance (see https://crbug.com/910432).
+ uint32_t V8_INLINE pc_offset(const uint8_t* pc) const {
+ DCHECK_LE(start_, pc);
+ DCHECK_GE(kMaxUInt32 - buffer_offset_, pc - start_);
+ return static_cast<uint32_t>(pc - start_) + buffer_offset_;
+ }
+ uint32_t pc_offset() const { return pc_offset(pc_); }
uint32_t buffer_offset() const { return buffer_offset_; }
// Takes an offset relative to the module start and returns an offset relative
// to the current buffer of the decoder.
@@ -265,10 +271,25 @@ class Decoder {
const byte* end_;
// The offset of the current buffer in the module. Needed for streaming.
uint32_t buffer_offset_;
- uint32_t error_offset_ = 0;
- std::string error_msg_;
+ WasmError error_;
private:
+ void verrorf(uint32_t offset, const char* format, va_list args) {
+ // Only report the first error.
+ if (!ok()) return;
+#if DEBUG
+ if (FLAG_wasm_break_on_decoder_error) {
+ base::OS::DebugBreak();
+ }
+#endif
+ constexpr int kMaxErrorMsg = 256;
+ EmbeddedVector<char, kMaxErrorMsg> buffer;
+ int len = VSNPrintF(buffer, format, args);
+ CHECK_LT(0, len);
+ error_ = {offset, {buffer.start(), static_cast<size_t>(len)}};
+ onFirstError();
+ }
+
template <typename IntType, bool validate>
inline IntType read_little_endian(const byte* pc, const char* msg) {
if (!validate) {
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index 8fe829723ab..578a0ff5b7d 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -63,6 +63,7 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(AtomicWake, Uint32) \
V(I32AtomicWait, Uint32) \
+ V(I64AtomicWait, Uint32) \
V(I32AtomicLoad, Uint32) \
V(I64AtomicLoad, Uint64) \
V(I32AtomicLoad8U, Uint8) \
@@ -287,11 +288,11 @@ struct BlockTypeImmediate {
};
template <Decoder::ValidateFlag validate>
-struct BreakDepthImmediate {
+struct BranchDepthImmediate {
uint32_t depth;
uint32_t length;
- inline BreakDepthImmediate(Decoder* decoder, const byte* pc) {
- depth = decoder->read_u32v<validate>(pc + 1, &length, "break depth");
+ inline BranchDepthImmediate(Decoder* decoder, const byte* pc) {
+ depth = decoder->read_u32v<validate>(pc + 1, &length, "branch depth");
}
};
@@ -513,22 +514,19 @@ struct TableDropImmediate {
// An entry on the value stack.
struct ValueBase {
- const byte* pc;
- ValueType type;
-
- // Named constructors.
- static ValueBase Unreachable(const byte* pc) { return {pc, kWasmVar}; }
+ const byte* pc = nullptr;
+ ValueType type = kWasmStmt;
- static ValueBase New(const byte* pc, ValueType type) { return {pc, type}; }
+ ValueBase(const byte* pc, ValueType type) : pc(pc), type(type) {}
};
template <typename Value>
struct Merge {
- uint32_t arity;
- union {
+ uint32_t arity = 0;
+ union { // Either multiple values or a single value.
Value* array;
Value first;
- } vals; // Either multiple values or a single value.
+ } vals = {nullptr}; // Initialize {array} with {nullptr}.
// Tracks whether this merge was ever reached. Uses precise reachability, like
// Reachability::kReachable.
@@ -548,8 +546,7 @@ enum ControlKind : uint8_t {
kControlBlock,
kControlLoop,
kControlTry,
- kControlTryCatch,
- kControlTryCatchAll
+ kControlTryCatch
};
enum Reachability : uint8_t {
@@ -564,18 +561,24 @@ enum Reachability : uint8_t {
// An entry on the control stack (i.e. if, block, loop, or try).
template <typename Value>
struct ControlBase {
- ControlKind kind;
- uint32_t stack_depth; // stack height at the beginning of the construct.
- const byte* pc;
+ ControlKind kind = kControlBlock;
+ uint32_t stack_depth = 0; // stack height at the beginning of the construct.
+ const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
Merge<Value> start_merge;
Merge<Value> end_merge;
- ControlBase() = default;
- ControlBase(ControlKind kind, uint32_t stack_depth, const byte* pc)
- : kind(kind), stack_depth(stack_depth), pc(pc) {}
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ControlBase);
+
+ ControlBase(ControlKind kind, uint32_t stack_depth, const uint8_t* pc,
+ Reachability reachability)
+ : kind(kind),
+ stack_depth(stack_depth),
+ pc(pc),
+ reachability(reachability),
+ start_merge(reachability == kReachable) {}
// Check whether the current block is reachable.
bool reachable() const { return reachability == kReachable; }
@@ -598,143 +601,91 @@ struct ControlBase {
bool is_loop() const { return kind == kControlLoop; }
bool is_incomplete_try() const { return kind == kControlTry; }
bool is_try_catch() const { return kind == kControlTryCatch; }
- bool is_try_catchall() const { return kind == kControlTryCatchAll; }
- bool is_try() const {
- return is_incomplete_try() || is_try_catch() || is_try_catchall();
- }
+ bool is_try() const { return is_incomplete_try() || is_try_catch(); }
inline Merge<Value>* br_merge() {
return is_loop() ? &this->start_merge : &this->end_merge;
}
-
- // Named constructors.
- static ControlBase Block(const byte* pc, uint32_t stack_depth) {
- return {kControlBlock, stack_depth, pc};
- }
-
- static ControlBase If(const byte* pc, uint32_t stack_depth) {
- return {kControlIf, stack_depth, pc};
- }
-
- static ControlBase Loop(const byte* pc, uint32_t stack_depth) {
- return {kControlLoop, stack_depth, pc};
- }
-
- static ControlBase Try(const byte* pc, uint32_t stack_depth) {
- return {kControlTry, stack_depth, pc};
- }
-};
-
-#define CONCRETE_NAMED_CONSTRUCTOR(concrete_type, abstract_type, name) \
- template <typename... Args> \
- static concrete_type name(Args&&... args) { \
- concrete_type val; \
- static_cast<abstract_type&>(val) = \
- abstract_type::name(std::forward<Args>(args)...); \
- return val; \
- }
-
-// Provide the default named constructors, which default-initialize the
-// ConcreteType and the initialize the fields of ValueBase correctly.
-// Use like this:
-// struct Value : public ValueWithNamedConstructors<Value> { int new_field; };
-template <typename ConcreteType>
-struct ValueWithNamedConstructors : public ValueBase {
- // Named constructors.
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, Unreachable)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ValueBase, New)
-};
-
-// Provide the default named constructors, which default-initialize the
-// ConcreteType and the initialize the fields of ControlBase correctly.
-// Use like this:
-// struct Control : public ControlWithNamedConstructors<Control, Value> {
-// int my_uninitialized_field;
-// char* other_field = nullptr;
-// };
-template <typename ConcreteType, typename Value>
-struct ControlWithNamedConstructors : public ControlBase<Value> {
- // Named constructors.
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Block)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, If)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Loop)
- CONCRETE_NAMED_CONSTRUCTOR(ConcreteType, ControlBase<Value>, Try)
};
// This is the list of callback functions that an interface for the
// WasmFullDecoder should implement.
// F(Name, args...)
-#define INTERFACE_FUNCTIONS(F) \
- /* General: */ \
- F(StartFunction) \
- F(StartFunctionBody, Control* block) \
- F(FinishFunction) \
- F(OnFirstError) \
- F(NextInstruction, WasmOpcode) \
- /* Control: */ \
- F(Block, Control* block) \
- F(Loop, Control* block) \
- F(Try, Control* block) \
- F(If, const Value& cond, Control* if_block) \
- F(FallThruTo, Control* c) \
- F(PopControl, Control* block) \
- F(EndControl, Control* block) \
- /* Instructions: */ \
- F(UnOp, WasmOpcode opcode, FunctionSig*, const Value& value, Value* result) \
- F(BinOp, WasmOpcode opcode, FunctionSig*, const Value& lhs, \
- const Value& rhs, Value* result) \
- F(I32Const, Value* result, int32_t value) \
- F(I64Const, Value* result, int64_t value) \
- F(F32Const, Value* result, float value) \
- F(F64Const, Value* result, double value) \
- F(RefNull, Value* result) \
- F(Drop, const Value& value) \
- F(DoReturn, Vector<Value> values, bool implicit) \
- F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \
- F(SetLocal, const Value& value, const LocalIndexImmediate<validate>& imm) \
- F(TeeLocal, const Value& value, Value* result, \
- const LocalIndexImmediate<validate>& imm) \
- F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
- F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(Unreachable) \
- F(Select, const Value& cond, const Value& fval, const Value& tval, \
- Value* result) \
- F(Br, Control* target) \
- F(BrIf, const Value& cond, Control* target) \
- F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
- F(Else, Control* if_block) \
- F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, Value* result) \
- F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value) \
- F(CurrentMemoryPages, Value* result) \
- F(MemoryGrow, const Value& value, Value* result) \
- F(CallDirect, const CallFunctionImmediate<validate>& imm, \
- const Value args[], Value returns[]) \
- F(CallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[], \
- Value returns[]) \
- F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
- F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
- const Vector<Value> inputs, Value* result) \
- F(SimdShiftOp, WasmOpcode opcode, const SimdShiftImmediate<validate>& imm, \
- const Value& input, Value* result) \
- F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
- const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexImmediate<validate>& imm, \
- const Vector<Value>& args) \
- F(Rethrow, Control* block) \
- F(CatchException, const ExceptionIndexImmediate<validate>& imm, \
- Control* block, Vector<Value> caught_values) \
- F(CatchAll, Control* block) \
- F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result) \
- F(MemoryInit, const MemoryInitImmediate<validate>& imm, Vector<Value> args) \
- F(MemoryDrop, const MemoryDropImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryIndexImmediate<validate>& imm, Vector<Value> args) \
- F(MemoryFill, const MemoryIndexImmediate<validate>& imm, Vector<Value> args) \
- F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
- F(TableDrop, const TableDropImmediate<validate>& imm) \
+#define INTERFACE_FUNCTIONS(F) \
+ /* General: */ \
+ F(StartFunction) \
+ F(StartFunctionBody, Control* block) \
+ F(FinishFunction) \
+ F(OnFirstError) \
+ F(NextInstruction, WasmOpcode) \
+ /* Control: */ \
+ F(Block, Control* block) \
+ F(Loop, Control* block) \
+ F(Try, Control* block) \
+ F(Catch, Control* block, Value* exception) \
+ F(If, const Value& cond, Control* if_block) \
+ F(FallThruTo, Control* c) \
+ F(PopControl, Control* block) \
+ F(EndControl, Control* block) \
+ /* Instructions: */ \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
+ F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
+ Value* result) \
+ F(I32Const, Value* result, int32_t value) \
+ F(I64Const, Value* result, int64_t value) \
+ F(F32Const, Value* result, float value) \
+ F(F64Const, Value* result, double value) \
+ F(RefNull, Value* result) \
+ F(Drop, const Value& value) \
+ F(DoReturn, Vector<Value> values) \
+ F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \
+ F(SetLocal, const Value& value, const LocalIndexImmediate<validate>& imm) \
+ F(TeeLocal, const Value& value, Value* result, \
+ const LocalIndexImmediate<validate>& imm) \
+ F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(Unreachable) \
+ F(Select, const Value& cond, const Value& fval, const Value& tval, \
+ Value* result) \
+ F(Br, Control* target) \
+ F(BrIf, const Value& cond, uint32_t depth) \
+ F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
+ F(Else, Control* if_block) \
+ F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value) \
+ F(CurrentMemoryPages, Value* result) \
+ F(MemoryGrow, const Value& value, Value* result) \
+ F(CallDirect, const CallFunctionImmediate<validate>& imm, \
+ const Value args[], Value returns[]) \
+ F(CallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[], \
+ Value returns[]) \
+ F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
+ F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
+ const Vector<Value> inputs, Value* result) \
+ F(SimdShiftOp, WasmOpcode opcode, const SimdShiftImmediate<validate>& imm, \
+ const Value& input, Value* result) \
+ F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
+ const Value& input0, const Value& input1, Value* result) \
+ F(Throw, const ExceptionIndexImmediate<validate>& imm, \
+ const Vector<Value>& args) \
+ F(Rethrow, const Value& exception) \
+ F(BrOnException, const Value& exception, \
+ const ExceptionIndexImmediate<validate>& imm, uint32_t depth, \
+ Vector<Value> values) \
+ F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
+ const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryDrop, const MemoryDropImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& value, const Value& size) \
+ F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
+ F(TableDrop, const TableDropImmediate<validate>& imm) \
F(TableCopy, const TableIndexImmediate<validate>& imm, Vector<Value> args)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
@@ -896,16 +847,23 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ inline bool Complete(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
imm.index < module_->exceptions.size())) {
- errorf(pc + 1, "Invalid exception index: %u", imm.index);
return false;
}
imm.exception = &module_->exceptions[imm.index];
return true;
}
+ inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ if (!Complete(pc, imm)) {
+ errorf(pc + 1, "Invalid exception index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr && imm.index < module_->globals.size())) {
errorf(pc + 1, "invalid global index: %u", imm.index);
@@ -954,10 +912,10 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Validate(const byte* pc, BreakDepthImmediate<validate>& imm,
+ inline bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
- errorf(pc + 1, "invalid break depth: %u", imm.depth);
+ errorf(pc + 1, "invalid branch depth: %u", imm.depth);
return false;
}
return true;
@@ -1076,12 +1034,21 @@ class WasmDecoder : public Decoder {
inline bool Validate(MemoryInitImmediate<validate>& imm) {
if (!Validate(imm.memory)) return false;
- // TODO(binji): validate imm.data_segment_index
+ if (!VALIDATE(module_ != nullptr &&
+ imm.data_segment_index <
+ module_->num_declared_data_segments)) {
+ errorf(pc_ + 2, "invalid data segment index: %u", imm.data_segment_index);
+ return false;
+ }
return true;
}
inline bool Validate(MemoryDropImmediate<validate>& imm) {
- // TODO(binji): validate imm.data_segment_index
+ if (!VALIDATE(module_ != nullptr &&
+ imm.index < module_->num_declared_data_segments)) {
+ errorf(pc_ + 2, "invalid data segment index: %u", imm.index);
+ return false;
+ }
return true;
}
@@ -1096,7 +1063,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(TableInitImmediate<validate>& imm) {
if (!Validate(pc_ + 1, imm.table)) return false;
if (!VALIDATE(module_ != nullptr &&
- imm.elem_segment_index < module_->table_inits.size())) {
+ imm.elem_segment_index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u",
imm.elem_segment_index);
return false;
@@ -1106,7 +1073,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(TableDropImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->table_inits.size())) {
+ imm.index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u", imm.index);
return false;
}
@@ -1124,10 +1091,9 @@ class WasmDecoder : public Decoder {
MemoryAccessImmediate<validate> imm(decoder, pc, UINT32_MAX);
return 1 + imm.length;
}
- case kExprRethrow:
case kExprBr:
case kExprBrIf: {
- BreakDepthImmediate<validate> imm(decoder, pc);
+ BranchDepthImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
case kExprSetGlobal:
@@ -1153,12 +1119,18 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
- case kExprThrow:
- case kExprCatch: {
+ case kExprThrow: {
ExceptionIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
+ case kExprBrOnExn: {
+ BranchDepthImmediate<validate> imm_br(decoder, pc);
+ if (!VALIDATE(decoder->ok())) return 1 + imm_br.length;
+ ExceptionIndexImmediate<validate> imm_idx(decoder, pc + imm_br.length);
+ return 1 + imm_br.length + imm_idx.length;
+ }
+
case kExprSetLocal:
case kExprTeeLocal:
case kExprGetLocal: {
@@ -1193,6 +1165,7 @@ class WasmDecoder : public Decoder {
case kNumericPrefix: {
byte numeric_index =
decoder->read_u8<validate>(pc + 1, "numeric_index");
+ if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
switch (opcode) {
@@ -1237,6 +1210,7 @@ class WasmDecoder : public Decoder {
}
case kSimdPrefix: {
byte simd_index = decoder->read_u8<validate>(pc + 1, "simd_index");
+ if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
switch (opcode) {
@@ -1265,6 +1239,7 @@ class WasmDecoder : public Decoder {
}
case kAtomicPrefix: {
byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
+ if (!VALIDATE(decoder->ok())) return 2;
WasmOpcode opcode =
static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
switch (opcode) {
@@ -1309,7 +1284,9 @@ class WasmDecoder : public Decoder {
case kExprBrIf:
case kExprBrTable:
case kExprIf:
+ case kExprRethrow:
return {1, 0};
+ case kExprCatch:
case kExprGetLocal:
case kExprGetGlobal:
case kExprI32Const:
@@ -1331,11 +1308,19 @@ class WasmDecoder : public Decoder {
return {imm.sig->parameter_count() + 1,
imm.sig->return_count()};
}
+ case kExprThrow: {
+ ExceptionIndexImmediate<validate> imm(this, pc);
+ CHECK(Complete(pc, imm));
+ DCHECK_EQ(0, imm.exception->sig->return_count());
+ return {imm.exception->sig->parameter_count(), 0};
+ }
case kExprBr:
case kExprBlock:
case kExprLoop:
case kExprEnd:
case kExprElse:
+ case kExprTry:
+ case kExprBrOnExn:
case kExprNop:
case kExprReturn:
case kExprUnreachable:
@@ -1408,8 +1393,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
local_type_vec_(zone),
stack_(zone),
control_(zone),
- args_(zone),
- last_end_found_(false) {
+ args_(zone) {
this->local_types_ = &local_type_vec_;
}
@@ -1436,24 +1420,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DecodeFunctionBody();
if (!this->failed()) CALL_INTERFACE(FinishFunction);
- if (this->failed()) return this->TraceFailed();
-
- if (!control_.empty()) {
- // Generate a better error message whether the unterminated control
- // structure is the function body block or an innner structure.
- if (control_.size() > 1) {
- this->error(control_.back().pc, "unterminated control structure");
- } else {
- this->error("function body must end with \"end\" opcode");
- }
- return TraceFailed();
- }
-
- if (!last_end_found_) {
+ // Generate a better error message whether the unterminated control
+ // structure is the function body block or an innner structure.
+ if (control_.size() > 1) {
+ this->error(control_.back().pc, "unterminated control structure");
+ } else if (control_.size() == 1) {
this->error("function body must end with \"end\" opcode");
- return false;
}
+ if (this->failed()) return this->TraceFailed();
+
if (FLAG_trace_wasm_decode_time) {
double ms = decode_timer.Elapsed().InMillisecondsF();
PrintF("wasm-decode %s (%0.3f ms)\n\n",
@@ -1466,9 +1442,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_offset_,
- this->GetBufferRelativeOffset(this->error_offset_),
- this->error_msg_.c_str());
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
+ this->GetBufferRelativeOffset(this->error_.offset()),
+ this->error_.message().c_str());
return false;
}
@@ -1479,7 +1455,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
inline Zone* zone() const { return zone_; }
- inline uint32_t NumLocals() {
+ inline uint32_t num_locals() const {
return static_cast<uint32_t>(local_type_vec_.size());
}
@@ -1508,21 +1484,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline Value* stack_value(uint32_t depth) {
- DCHECK_GT(stack_.size(), depth);
- return &stack_[stack_.size() - depth - 1];
- }
-
- inline Value& GetMergeValueFromStack(
- Control* c, Merge<Value>* merge, uint32_t i) {
- DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- DCHECK_GT(merge->arity, i);
- DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
- return stack_[stack_.size() - merge->arity + i];
+ DCHECK_LT(0, depth);
+ DCHECK_GE(stack_.size(), depth);
+ return &*(stack_.end() - depth);
}
private:
- static constexpr size_t kErrorMsgSize = 128;
-
Zone* zone_;
Interface interface_;
@@ -1531,7 +1498,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
ZoneVector<Value> args_; // parameters of current block or call
- bool last_end_found_;
+
+ static Value UnreachableValue(const uint8_t* pc) {
+ return Value{pc, kWasmVar};
+ }
bool CheckHasMemory() {
if (!VALIDATE(this->module_->has_memory)) {
@@ -1584,12 +1554,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Set up initial function block.
{
- auto* c = PushBlock();
+ auto* c = PushControl(kControlBlock);
InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
InitMerge(&c->end_merge,
static_cast<uint32_t>(this->sig_->return_count()),
- [&] (uint32_t i) {
- return Value::New(this->pc_, this->sig_->GetReturn(i)); });
+ [&](uint32_t i) {
+ return Value{this->pc_, this->sig_->GetReturn(i)};
+ });
CALL_INTERFACE(StartFunctionBody, c);
}
@@ -1610,559 +1581,543 @@ class WasmFullDecoder : public WasmDecoder<validate> {
#define TRACE_PART(...)
#endif
- FunctionSig* sig = const_cast<FunctionSig*>(kSimpleOpcodeSigs[opcode]);
- if (sig) {
- BuildSimpleOperator(opcode, sig);
- } else {
- // Complex bytecode.
- switch (opcode) {
- case kExprNop:
- break;
- case kExprBlock: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
- auto* block = PushBlock();
- SetBlockType(block, imm);
- CALL_INTERFACE_IF_REACHABLE(Block, block);
- PushMergeValues(block, &block->start_merge);
- len = 1 + imm.length;
+ switch (opcode) {
+#define BUILD_SIMPLE_OPCODE(op, _, sig) \
+ case kExpr##op: \
+ BuildSimpleOperator_##sig(opcode); \
+ break;
+ FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
+#undef BUILD_SIMPLE_OPCODE
+ case kExprNop:
+ break;
+ case kExprBlock: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ PopArgs(imm.sig);
+ auto* block = PushControl(kControlBlock);
+ SetBlockType(block, imm);
+ CALL_INTERFACE_IF_REACHABLE(Block, block);
+ PushMergeValues(block, &block->start_merge);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRethrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ auto exception = Pop(0, kWasmExceptRef);
+ CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
+ EndControl();
+ break;
+ }
+ case kExprThrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ ExceptionIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ PopArgs(imm.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args_));
+ EndControl();
+ break;
+ }
+ case kExprTry: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ PopArgs(imm.sig);
+ auto* try_block = PushControl(kControlTry);
+ SetBlockType(try_block, imm);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Try, try_block);
+ PushMergeValues(try_block, &try_block->start_merge);
+ break;
+ }
+ case kExprCatch: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ if (!VALIDATE(!control_.empty())) {
+ this->error("catch does not match any try");
break;
}
- case kExprRethrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BreakDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- if (!VALIDATE(c->is_try_catchall() || c->is_try_catch())) {
- this->error("rethrow not targeting catch or catch-all");
- break;
- }
- CALL_INTERFACE_IF_REACHABLE(Rethrow, c);
- len = 1 + imm.length;
- EndControl();
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_try())) {
+ this->error("catch does not match any try");
break;
}
- case kExprThrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- PopArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args_));
- EndControl();
+ if (!VALIDATE(c->is_incomplete_try())) {
+ this->error("catch already present for try");
break;
}
- case kExprTry: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
- auto* try_block = PushTry();
- SetBlockType(try_block, imm);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Try, try_block);
- PushMergeValues(try_block, &try_block->start_merge);
- break;
+ c->kind = kControlTryCatch;
+ FallThruTo(c);
+ stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
+ c->reachability = control_at(1)->innerReachability();
+ auto* exception = Push(kWasmExceptRef);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
+ break;
+ }
+ case kExprBrOnExn: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BranchDepthImmediate<validate> imm_br(this, this->pc_);
+ if (!this->Validate(this->pc_, imm_br, control_.size())) break;
+ ExceptionIndexImmediate<validate> imm_idx(this,
+ this->pc_ + imm_br.length);
+ if (!this->Validate(this->pc_ + imm_br.length, imm_idx)) break;
+ Control* c = control_at(imm_br.depth);
+ auto exception = Pop(0, kWasmExceptRef);
+ const WasmExceptionSig* sig = imm_idx.exception->sig;
+ size_t value_count = sig->parameter_count();
+ // TODO(mstarzinger): This operand stack mutation is an ugly hack to
+ // make both type checking here as well as environment merging in the
+ // graph builder interface work out of the box. We should introduce
+ // special handling for both and do minimal/no stack mutation here.
+ for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ Vector<Value> values(stack_.data() + c->stack_depth, value_count);
+ if (!TypeCheckBranch(c)) break;
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrOnException, exception, imm_idx, imm_br.depth,
+ values);
+ c->br_merge()->reached = true;
}
- case kExprCatch: {
- CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- len = 1 + imm.length;
- if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
- break;
- }
- if (!VALIDATE(!c->is_try_catchall())) {
- this->error("catch after catch-all for try");
- break;
- }
- c->kind = kControlTryCatch;
- FallThruTo(c);
- stack_.resize(c->stack_depth);
- const WasmExceptionSig* sig = imm.exception->sig;
- for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
- Push(sig->GetParam(i));
- }
- Vector<Value> values(stack_.data() + c->stack_depth,
- sig->parameter_count());
- c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchException, imm, c, values);
+ len = 1 + imm_br.length + imm_idx.length;
+ for (size_t i = 0; i < value_count; ++i) Pop();
+ auto* pexception = Push(kWasmExceptRef);
+ *pexception = exception;
+ break;
+ }
+ case kExprLoop: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ PopArgs(imm.sig);
+ auto* block = PushControl(kControlLoop);
+ SetBlockType(&control_.back(), imm);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Loop, block);
+ PushMergeValues(block, &block->start_merge);
+ break;
+ }
+ case kExprIf: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ auto cond = Pop(0, kWasmI32);
+ PopArgs(imm.sig);
+ if (!VALIDATE(this->ok())) break;
+ auto* if_block = PushControl(kControlIf);
+ SetBlockType(if_block, imm);
+ CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
+ len = 1 + imm.length;
+ PushMergeValues(if_block, &if_block->start_merge);
+ break;
+ }
+ case kExprElse: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("else does not match any if");
break;
}
- case kExprCatchAll: {
- CHECK_PROTOTYPE_OPCODE(eh);
- if (!VALIDATE(!control_.empty())) {
- this->error("catch-all does not match any try");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->error("catch-all does not match any try");
- break;
- }
- if (!VALIDATE(!c->is_try_catchall())) {
- this->error("catch-all already present for try");
- break;
- }
- c->kind = kControlTryCatchAll;
- FallThruTo(c);
- stack_.resize(c->stack_depth);
- c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchAll, c);
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_if())) {
+ this->error(this->pc_, "else does not match an if");
break;
}
- case kExprLoop: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
- auto* block = PushLoop();
- SetBlockType(&control_.back(), imm);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Loop, block);
- PushMergeValues(block, &block->start_merge);
+ if (c->is_if_else()) {
+ this->error(this->pc_, "else already present for if");
break;
}
- case kExprIf: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto cond = Pop(0, kWasmI32);
- PopArgs(imm.sig);
- if (!VALIDATE(this->ok())) break;
- auto* if_block = PushIf();
- SetBlockType(if_block, imm);
- CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
- len = 1 + imm.length;
- PushMergeValues(if_block, &if_block->start_merge);
+ if (!TypeCheckFallThru(c)) break;
+ c->kind = kControlIfElse;
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ if (c->reachable()) c->end_merge.reached = true;
+ PushMergeValues(c, &c->start_merge);
+ c->reachability = control_at(1)->innerReachability();
+ break;
+ }
+ case kExprEnd: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("end does not match any if, try, or block");
break;
}
- case kExprElse: {
- if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
- break;
- }
- if (c->is_if_else()) {
- this->error(this->pc_, "else already present for if");
- break;
- }
- FallThruTo(c);
- c->kind = kControlIfElse;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
- PushMergeValues(c, &c->start_merge);
- c->reachability = control_at(1)->innerReachability();
+ Control* c = &control_.back();
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->error(this->pc_, "missing catch or catch-all in try");
break;
}
- case kExprEnd: {
- if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
- return;
- }
- Control* c = &control_.back();
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
+ this->error(
+ c->pc,
+ "start-arity and end-arity of one-armed if must match");
break;
}
- if (c->is_onearmed_if()) {
- // Emulate empty else arm.
- FallThruTo(c);
- if (this->failed()) break;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
- PushMergeValues(c, &c->start_merge);
- c->reachability = control_at(1)->innerReachability();
- }
- if (c->is_try_catch()) {
- // Emulate catch-all + re-throw.
- FallThruTo(c);
- c->reachability = control_at(1)->innerReachability();
- CALL_INTERFACE_IF_PARENT_REACHABLE(CatchAll, c);
- CALL_INTERFACE_IF_REACHABLE(Rethrow, c);
- EndControl();
- }
-
- FallThruTo(c);
- // A loop just leaves the values on the stack.
- if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
-
- if (control_.size() == 1) {
- // If at the last (implicit) control, check we are at end.
- if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
- break;
- }
- last_end_found_ = true;
- // The result of the block is the return value.
- TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
- "(implicit) return");
- DoReturn(c, true);
- }
-
- PopControl(c);
- break;
- }
- case kExprSelect: {
- auto cond = Pop(2, kWasmI32);
- auto fval = Pop();
- auto tval = Pop(0, fval.type);
- auto* result = Push(tval.type == kWasmVar ? fval.type : tval.type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
- break;
- }
- case kExprBr: {
- BreakDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- if (imm.depth == control_.size() - 1) {
- DoReturn(c, false);
- } else {
- if (!TypeCheckBreak(c)) break;
- if (control_.back().reachable()) {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
- }
- }
- len = 1 + imm.length;
- EndControl();
- break;
- }
- case kExprBrIf: {
- BreakDepthImmediate<validate> imm(this, this->pc_);
- auto cond = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- if (!TypeCheckBreak(c)) break;
- if (control_.back().reachable()) {
- CALL_INTERFACE(BrIf, cond, c);
- c->br_merge()->reached = true;
- }
- len = 1 + imm.length;
- break;
}
- case kExprBrTable: {
- BranchTableImmediate<validate> imm(this, this->pc_);
- BranchTableIterator<validate> iterator(this, imm);
- auto key = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- uint32_t br_arity = 0;
- std::vector<bool> br_targets(control_.size());
- while (iterator.has_next()) {
- const uint32_t i = iterator.cur_index();
- const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(target < control_.size())) {
- this->errorf(pos,
- "improper branch in br_table target %u (depth %u)",
- i, target);
- break;
- }
- // Avoid redundant break target checks.
- if (br_targets[target]) continue;
- br_targets[target] = true;
- // Check that label types match up.
- Control* c = control_at(target);
- uint32_t arity = c->br_merge()->arity;
- if (i == 0) {
- br_arity = arity;
- } else if (!VALIDATE(br_arity == arity)) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u"
- " (previous was %u, this one %u)",
- i, br_arity, arity);
- }
- if (!TypeCheckBreak(c)) break;
- }
- if (this->failed()) break;
- if (control_.back().reachable()) {
- CALL_INTERFACE(BrTable, imm, key);
+ if (!TypeCheckFallThru(c)) break;
- for (uint32_t depth = control_depth(); depth-- > 0;) {
- if (!br_targets[depth]) continue;
- control_at(depth)->br_merge()->reached = true;
- }
+ if (control_.size() == 1) {
+ // If at the last (implicit) control, check we are at end.
+ if (!VALIDATE(this->pc_ + 1 == this->end_)) {
+ this->error(this->pc_ + 1, "trailing code after function end");
+ break;
}
-
- len = 1 + iterator.length();
- EndControl();
- break;
- }
- case kExprReturn: {
- DoReturn(&control_.back(), false);
+ // The result of the block is the return value.
+ TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
+ "(implicit) return");
+ DoReturn();
+ control_.clear();
break;
}
- case kExprUnreachable: {
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
- EndControl();
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI64);
- CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF32);
- CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF64);
- CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- auto* value = Push(kWasmAnyRef);
- CALL_INTERFACE_IF_REACHABLE(RefNull, value);
- len = 1;
- break;
- }
- case kExprGetLocal: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprSetLocal: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprTeeLocal: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- auto* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprDrop: {
- auto value = Pop();
- CALL_INTERFACE_IF_REACHABLE(Drop, value);
- break;
+
+ PopControl(c);
+ break;
+ }
+ case kExprSelect: {
+ auto cond = Pop(2, kWasmI32);
+ auto fval = Pop();
+ auto tval = Pop(0, fval.type);
+ auto* result = Push(tval.type == kWasmVar ? fval.type : tval.type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ break;
+ }
+ case kExprBr: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ if (!TypeCheckBranch(c)) break;
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else if (control_.back().reachable()) {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
}
- case kExprGetGlobal: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm);
- break;
+ len = 1 + imm.length;
+ EndControl();
+ break;
+ }
+ case kExprBrIf: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ auto cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ if (!TypeCheckBranch(c)) break;
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrIf, cond, imm.depth);
+ c->br_merge()->reached = true;
}
- case kExprSetGlobal: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableImmediate<validate> imm(this, this->pc_);
+ BranchTableIterator<validate> iterator(this, imm);
+ auto key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ uint32_t br_arity = 0;
+ std::vector<bool> br_targets(control_.size());
+ while (iterator.has_next()) {
+ const uint32_t i = iterator.cur_index();
+ const byte* pos = iterator.pc();
+ uint32_t target = iterator.next();
+ if (!VALIDATE(target < control_.size())) {
+ this->errorf(pos,
+ "improper branch in br_table target %u (depth %u)",
+ i, target);
break;
}
- auto value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
- break;
- }
- case kExprI32LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
- break;
- case kExprI32LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
- break;
- case kExprI32LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
- break;
- case kExprI32LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
- break;
- case kExprI32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI32Load);
- break;
- case kExprI64LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
- break;
- case kExprI64LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
- break;
- case kExprI64LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
- break;
- case kExprI64LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
- break;
- case kExprI64LoadMem32S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
- break;
- case kExprI64LoadMem32U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
- break;
- case kExprI64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI64Load);
- break;
- case kExprF32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF32Load);
- break;
- case kExprF64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF64Load);
- break;
- case kExprI32StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI32Store8);
- break;
- case kExprI32StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI32Store16);
- break;
- case kExprI32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI32Store);
- break;
- case kExprI64StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI64Store8);
- break;
- case kExprI64StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI64Store16);
- break;
- case kExprI64StoreMem32:
- len = 1 + DecodeStoreMem(StoreType::kI64Store32);
- break;
- case kExprI64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI64Store);
- break;
- case kExprF32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF32Store);
- break;
- case kExprF64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF64Store);
- break;
- case kExprMemoryGrow: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- DCHECK_NOT_NULL(this->module_);
- if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
- break;
+ // Avoid redundant branch target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
+ // Check that label types match up.
+ Control* c = control_at(target);
+ uint32_t arity = c->br_merge()->arity;
+ if (i == 0) {
+ br_arity = arity;
+ } else if (!VALIDATE(br_arity == arity)) {
+ this->errorf(pos,
+ "inconsistent arity in br_table target %u"
+ " (previous was %u, this one %u)",
+ i, br_arity, arity);
}
- auto value = Pop(0, kWasmI32);
- auto* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
- break;
- }
- case kExprMemorySize: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- auto* result = Push(kWasmI32);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
- break;
- }
- case kExprCallFunction: {
- CallFunctionImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- // TODO(clemensh): Better memory management.
- PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args_.data(), returns);
- break;
+ if (!TypeCheckBranch(c)) break;
}
- case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto index = Pop(0, kWasmI32);
- PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args_.data(),
- returns);
- break;
- }
- case kNumericPrefix: {
- ++len;
- byte numeric_index = this->template read_u8<validate>(
- this->pc_ + 1, "numeric index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
- if (opcode < kExprMemoryInit) {
- CHECK_PROTOTYPE_OPCODE(sat_f2i_conversions);
- } else {
- CHECK_PROTOTYPE_OPCODE(bulk_memory);
+ if (this->failed()) break;
+
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrTable, imm, key);
+
+ for (uint32_t depth = control_depth(); depth-- > 0;) {
+ if (!br_targets[depth]) continue;
+ control_at(depth)->br_merge()->reached = true;
}
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeNumericOpcode(opcode);
- break;
}
- case kSimdPrefix: {
- CHECK_PROTOTYPE_OPCODE(simd);
- len++;
- byte simd_index =
- this->template read_u8<validate>(this->pc_ + 1, "simd index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeSimdOpcode(opcode);
+
+ len = 1 + iterator.length();
+ EndControl();
+ break;
+ }
+ case kExprReturn: {
+ if (!TypeCheckReturn()) break;
+ DoReturn();
+ EndControl();
+ break;
+ }
+ case kExprUnreachable: {
+ CALL_INTERFACE_IF_REACHABLE(Unreachable);
+ EndControl();
+ break;
+ }
+ case kExprI32Const: {
+ ImmI32Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmI64);
+ CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmF32);
+ CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Immediate<validate> imm(this, this->pc_);
+ auto* value = Push(kWasmF64);
+ CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefNull: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ auto* value = Push(kWasmAnyRef);
+ CALL_INTERFACE_IF_REACHABLE(RefNull, value);
+ len = 1;
+ break;
+ }
+ case kExprGetLocal: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto* value = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GetLocal, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprSetLocal: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto value = Pop(0, local_type_vec_[imm.index]);
+ CALL_INTERFACE_IF_REACHABLE(SetLocal, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprTeeLocal: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto value = Pop(0, local_type_vec_[imm.index]);
+ auto* result = Push(value.type);
+ CALL_INTERFACE_IF_REACHABLE(TeeLocal, value, result, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprDrop: {
+ auto value = Pop();
+ CALL_INTERFACE_IF_REACHABLE(Drop, value);
+ break;
+ }
+ case kExprGetGlobal: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto* result = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GetGlobal, result, imm);
+ break;
+ }
+ case kExprSetGlobal: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!VALIDATE(imm.global->mutability)) {
+ this->errorf(this->pc_, "immutable global #%u cannot be assigned",
+ imm.index);
break;
}
- case kAtomicPrefix: {
- CHECK_PROTOTYPE_OPCODE(threads);
- if (!CheckHasSharedMemory()) break;
- len++;
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeAtomicOpcode(opcode);
+ auto value = Pop(0, imm.type);
+ CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
+ break;
+ }
+ case kExprI32LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
+ break;
+ case kExprI32LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
+ break;
+ case kExprI32LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
+ break;
+ case kExprI32LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
+ break;
+ case kExprI32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load);
+ break;
+ case kExprI64LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
+ break;
+ case kExprI64LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
+ break;
+ case kExprI64LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
+ break;
+ case kExprI64LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
+ break;
+ case kExprI64LoadMem32S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
+ break;
+ case kExprI64LoadMem32U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
+ break;
+ case kExprI64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load);
+ break;
+ case kExprF32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF32Load);
+ break;
+ case kExprF64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF64Load);
+ break;
+ case kExprI32StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store8);
+ break;
+ case kExprI32StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store16);
+ break;
+ case kExprI32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store);
+ break;
+ case kExprI64StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store8);
+ break;
+ case kExprI64StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store16);
+ break;
+ case kExprI64StoreMem32:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store32);
+ break;
+ case kExprI64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store);
+ break;
+ case kExprF32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF32Store);
+ break;
+ case kExprF64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF64Store);
+ break;
+ case kExprMemoryGrow: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ DCHECK_NOT_NULL(this->module_);
+ if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
+ this->error("grow_memory is not supported for asmjs modules");
break;
}
+ auto value = Pop(0, kWasmI32);
+ auto* result = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
+ break;
+ }
+ case kExprMemorySize: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ auto* result = Push(kWasmI32);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ // TODO(clemensh): Better memory management.
+ PopArgs(imm.sig);
+ auto* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args_.data(), returns);
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ auto index = Pop(0, kWasmI32);
+ PopArgs(imm.sig);
+ auto* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args_.data(),
+ returns);
+ break;
+ }
+ case kNumericPrefix: {
+ ++len;
+ byte numeric_index =
+ this->template read_u8<validate>(this->pc_ + 1, "numeric index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
+ if (opcode < kExprMemoryInit) {
+ CHECK_PROTOTYPE_OPCODE(sat_f2i_conversions);
+ } else {
+ CHECK_PROTOTYPE_OPCODE(bulk_memory);
+ }
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len += DecodeNumericOpcode(opcode);
+ break;
+ }
+ case kSimdPrefix: {
+ CHECK_PROTOTYPE_OPCODE(simd);
+ len++;
+ byte simd_index =
+ this->template read_u8<validate>(this->pc_ + 1, "simd index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len += DecodeSimdOpcode(opcode);
+ break;
+ }
+ case kAtomicPrefix: {
+ CHECK_PROTOTYPE_OPCODE(threads);
+ if (!CheckHasSharedMemory()) break;
+ len++;
+ byte atomic_index =
+ this->template read_u8<validate>(this->pc_ + 1, "atomic index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ len += DecodeAtomicOpcode(opcode);
+ break;
+ }
// Note that prototype opcodes are not handled in the fastpath
// above this switch, to avoid checking a feature flag.
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
case kExpr##name: /* fallthrough */
- FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
- BuildSimplePrototypeOperator(opcode);
- break;
- default: {
- // Deal with special asmjs opcodes.
- if (this->module_ != nullptr &&
- this->module_->origin == kAsmJsOrigin) {
- sig = WasmOpcodes::AsmjsSignature(opcode);
- if (sig) {
- BuildSimpleOperator(opcode, sig);
- }
- } else {
- this->error("Invalid opcode");
- return;
+ BuildSimplePrototypeOperator(opcode);
+ break;
+ default: {
+ // Deal with special asmjs opcodes.
+ if (this->module_ != nullptr &&
+ this->module_->origin == kAsmJsOrigin) {
+ FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
+ if (sig) {
+ BuildSimpleOperator(opcode, sig);
}
+ } else {
+ this->error("Invalid opcode");
+ return;
}
}
}
@@ -2239,7 +2194,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void EndControl() {
DCHECK(!control_.empty());
auto* current = &control_.back();
- stack_.resize(current->stack_depth);
+ stack_.erase(stack_.begin() + current->stack_depth, stack_.end());
CALL_INTERFACE_IF_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
}
@@ -2262,7 +2217,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
const byte* pc = this->pc_;
Value* args = this->args_.data();
InitMerge(&c->end_merge, imm.out_arity(), [pc, &imm](uint32_t i) {
- return Value::New(pc, imm.out_type(i));
+ return Value{pc, imm.out_type(i)};
});
InitMerge(&c->start_merge, imm.in_arity(),
[args](uint32_t i) { return args[i]; });
@@ -2271,7 +2226,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Pops arguments as required by signature into {args_}.
V8_INLINE void PopArgs(FunctionSig* sig) {
int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
- args_.resize(count);
+ args_.resize(count, UnreachableValue(nullptr));
for (int i = count - 1; i >= 0; --i) {
args_[i] = Pop(i, sig->GetParam(i));
}
@@ -2282,38 +2237,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
- Control* PushControl(Control&& new_control) {
+ Control* PushControl(ControlKind kind) {
Reachability reachability =
control_.empty() ? kReachable : control_.back().innerReachability();
- control_.emplace_back(std::move(new_control));
- Control* c = &control_.back();
- c->reachability = reachability;
- c->start_merge.reached = c->reachable();
- return c;
- }
-
- Control* PushBlock() {
- return PushControl(Control::Block(this->pc_, stack_size()));
- }
- Control* PushLoop() {
- return PushControl(Control::Loop(this->pc_, stack_size()));
- }
- Control* PushIf() {
- return PushControl(Control::If(this->pc_, stack_size()));
- }
- Control* PushTry() {
- // current_catch_ = static_cast<int32_t>(control_.size() - 1);
- return PushControl(Control::Try(this->pc_, stack_size()));
+ control_.emplace_back(kind, stack_size(), this->pc_, reachability);
+ return &control_.back();
}
void PopControl(Control* c) {
DCHECK_EQ(c, &control_.back());
CALL_INTERFACE_IF_PARENT_REACHABLE(PopControl, c);
- bool reached = c->end_merge.reached;
+
+ // A loop just leaves the values on the stack.
+ if (!c->is_loop()) PushMergeValues(c, &c->end_merge);
+
+ bool parent_reached =
+ c->reachable() || c->end_merge.reached || c->is_onearmed_if();
control_.pop_back();
// If the parent block was reachable before, but the popped control does not
- // return to here, this block becomes indirectly unreachable.
- if (!control_.empty() && !reached && control_.back().reachable()) {
+ // return to here, this block becomes "spec only reachable".
+ if (!parent_reached && control_.back().reachable()) {
control_.back().reachability = kSpecOnlyReachable;
}
}
@@ -2352,7 +2295,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t SimdReplaceLane(WasmOpcode opcode, ValueType type) {
SimdLaneImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
- Value inputs[2];
+ Value inputs[2] = {UnreachableValue(this->pc_),
+ UnreachableValue(this->pc_)};
inputs[1] = Pop(1, type);
inputs[0] = Pop(0, kWasmS128);
auto* result = Push(kWasmS128);
@@ -2503,8 +2447,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryInitImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, VectorOf(args_));
+ auto size = Pop(2, sig->GetParam(2));
+ auto src = Pop(1, sig->GetParam(1));
+ auto dst = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
break;
}
case kExprMemoryDrop: {
@@ -2518,16 +2464,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(imm)) break;
len += imm.length;
- PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, VectorOf(args_));
+ auto size = Pop(2, sig->GetParam(2));
+ auto src = Pop(1, sig->GetParam(1));
+ auto dst = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
break;
}
case kExprMemoryFill: {
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(imm)) break;
len += imm.length;
- PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, VectorOf(args_));
+ auto size = Pop(2, sig->GetParam(2));
+ auto value = Pop(1, sig->GetParam(1));
+ auto dst = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
break;
}
case kExprTableInit: {
@@ -2563,32 +2513,27 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return len;
}
- void DoReturn(Control* c, bool implicit) {
- int return_count = static_cast<int>(this->sig_->return_count());
- args_.resize(return_count);
-
- // Pop return values off the stack in reverse order.
- for (int i = return_count - 1; i >= 0; --i) {
- args_[i] = Pop(i, this->sig_->GetReturn(i));
- }
-
- // Simulate that an implicit return morally comes after the current block.
- if (implicit && c->end_merge.reached) c->reachability = kReachable;
- CALL_INTERFACE_IF_REACHABLE(DoReturn, VectorOf(args_), implicit);
+ void DoReturn() {
+ size_t return_count = this->sig_->return_count();
+ DCHECK_GE(stack_.size(), return_count);
+ Vector<Value> return_values =
+ return_count == 0
+ ? Vector<Value>{}
+ : Vector<Value>{&*(stack_.end() - return_count), return_count};
- EndControl();
+ CALL_INTERFACE_IF_REACHABLE(DoReturn, return_values);
}
inline Value* Push(ValueType type) {
DCHECK_NE(kWasmStmt, type);
- stack_.push_back(Value::New(this->pc_, type));
+ stack_.emplace_back(this->pc_, type);
return &stack_.back();
}
void PushMergeValues(Control* c, Merge<Value>* merge) {
DCHECK_EQ(c, &control_.back());
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- stack_.resize(c->stack_depth);
+ stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
if (merge->arity == 1) {
stack_.push_back(merge->vals.first);
} else {
@@ -2609,7 +2554,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return stack_.data() + old_size;
}
- Value Pop(int index, ValueType expected) {
+ V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
if (!VALIDATE(val.type == expected || val.type == kWasmVar ||
expected == kWasmVar)) {
@@ -2621,7 +2566,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return val;
}
- Value Pop() {
+ V8_INLINE Value Pop() {
DCHECK(!control_.empty());
uint32_t limit = control_.back().stack_depth;
if (stack_.size() <= limit) {
@@ -2630,7 +2575,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->errorf(this->pc_, "%s found empty stack",
SafeOpcodeNameAt(this->pc_));
}
- return Value::Unreachable(this->pc_);
+ return UnreachableValue(this->pc_);
}
auto val = stack_.back();
stack_.pop_back();
@@ -2651,22 +2596,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
+ // The computation of {stack_values} is only valid if {merge->arity} is >0.
+ DCHECK_LT(0, merge->arity);
+ Value* stack_values = &*(stack_.end() - merge->arity);
// Typecheck the topmost {merge->arity} values on the stack.
for (uint32_t i = 0; i < merge->arity; ++i) {
- auto& val = GetMergeValueFromStack(c, merge, i);
- auto& old = (*merge)[i];
- if (val.type != old.type) {
- // If {val.type} is polymorphic, which results from unreachable, make
- // it more specific by using the merge value's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
- this->errorf(
- this->pc_, "type error in merge[%u] (expected %s, got %s)", i,
- ValueTypes::TypeName(old.type), ValueTypes::TypeName(val.type));
- return false;
- }
- val.type = old.type;
+ Value& val = stack_values[i];
+ Value& old = (*merge)[i];
+ if (val.type == old.type) continue;
+ // If {val.type} is polymorphic, which results from unreachable, make
+ // it more specific by using the merge value's expected type.
+ // If it is not polymorphic, this is a type error.
+ if (!VALIDATE(val.type == kWasmVar)) {
+ this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
+ i, ValueTypes::TypeName(old.type),
+ ValueTypes::TypeName(val.type));
+ return false;
}
+ val.type = old.type;
}
return true;
@@ -2686,13 +2633,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
expected, startrel(c->pc), actual);
return false;
}
+ if (expected == 0) return true; // Fast path.
return TypeCheckMergeValues(c, &c->end_merge);
}
- bool TypeCheckBreak(Control* c) {
- // Breaks must have at least the number of values expected; can have more.
+ bool TypeCheckBranch(Control* c) {
+ // Branches must have at least the number of values expected; can have more.
uint32_t expected = c->br_merge()->arity;
+ if (expected == 0) return true; // Fast path.
DCHECK_GE(stack_.size(), control_.back().stack_depth);
uint32_t actual =
static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
@@ -2705,6 +2654,42 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return TypeCheckMergeValues(c, c->br_merge());
}
+ bool TypeCheckReturn() {
+ // Returns must have at least the number of values expected; can have more.
+ uint32_t num_returns = static_cast<uint32_t>(this->sig_->return_count());
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (!InsertUnreachablesIfNecessary(num_returns, actual)) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for return, found %u",
+ num_returns, actual);
+ return false;
+ }
+
+ // Typecheck the topmost {num_returns} values on the stack.
+ if (num_returns == 0) return true;
+ // This line requires num_returns > 0.
+ Value* stack_values = &*(stack_.end() - num_returns);
+ for (uint32_t i = 0; i < num_returns; ++i) {
+ auto& val = stack_values[i];
+ ValueType expected_type = this->sig_->GetReturn(i);
+ if (val.type == expected_type) continue;
+ // If {val.type} is polymorphic, which results from unreachable,
+ // make it more specific by using the return's expected type.
+ // If it is not polymorphic, this is a type error.
+ if (!VALIDATE(val.type == kWasmVar)) {
+ this->errorf(this->pc_,
+ "type error in return[%u] (expected %s, got %s)", i,
+ ValueTypes::TypeName(expected_type),
+ ValueTypes::TypeName(val.type));
+ return false;
+ }
+ val.type = expected_type;
+ }
+ return true;
+ }
+
inline bool InsertUnreachablesIfNecessary(uint32_t expected,
uint32_t actual) {
if (V8_LIKELY(actual >= expected)) {
@@ -2719,13 +2704,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// unreachable, insert unreachable values below the actual values.
// This simplifies {TypeCheckMergeValues}.
auto pos = stack_.begin() + (stack_.size() - actual);
- stack_.insert(pos, (expected - actual), Value::Unreachable(this->pc_));
+ stack_.insert(pos, expected - actual, UnreachableValue(this->pc_));
return true;
}
void onFirstError() override {
this->end_ = this->pc_; // Terminate decoding loop.
- TRACE(" !%s\n", this->error_msg_.c_str());
+ TRACE(" !%s\n", this->error_.message().c_str());
CALL_INTERFACE(OnFirstError);
}
@@ -2740,13 +2725,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BuildSimpleOperator(opcode, sig);
}
- inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
auto* ret =
sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, sig, val, ret);
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
break;
}
case 2: {
@@ -2754,13 +2739,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto lval = Pop(0, sig->GetParam(0));
auto* ret =
sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, sig, lval, rval, ret);
+ CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
break;
}
default:
UNREACHABLE();
}
}
+
+ void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
+ ValueType arg_type) {
+ auto val = Pop(0, arg_type);
+ auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
+ }
+
+ void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
+ ValueType lhs_type, ValueType rhs_type) {
+ auto rval = Pop(1, rhs_type);
+ auto lval = Pop(0, lhs_type);
+ auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
+ }
+
+#define DEFINE_SIMPLE_SIG_OPERATOR(sig, ...) \
+ void BuildSimpleOperator_##sig(WasmOpcode opcode) { \
+ BuildSimpleOperator(opcode, __VA_ARGS__); \
+ }
+ FOREACH_SIGNATURE(DEFINE_SIMPLE_SIG_OPERATOR)
+#undef DEFINE_SIMPLE_SIG_OPERATOR
};
#undef CALL_INTERFACE
diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc
index f51bae27917..27cbe10b7e1 100644
--- a/chromium/v8/src/wasm/function-body-decoder.cc
+++ b/chromium/v8/src/wasm/function-body-decoder.cc
@@ -116,7 +116,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BodyLocalDecls decls(&zone);
BytecodeIterator i(body.start, body.end, &decls);
if (body.start != i.pc() && print_locals == kPrintLocals) {
- os << "// locals: ";
+ os << "// locals:";
if (!decls.type_list.empty()) {
ValueType type = decls.type_list[0];
uint32_t count = 0;
@@ -129,6 +129,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
count = 1;
}
}
+ os << " " << count << " " << ValueTypes::TypeName(type);
}
os << std::endl;
if (line_numbers) line_numbers->push_back(kNoByteCode);
@@ -152,7 +153,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
WasmOpcode opcode = i.current();
if (line_numbers) line_numbers->push_back(i.position());
- if (opcode == kExprElse) control_depth--;
+ if (opcode == kExprElse || opcode == kExprCatch) {
+ control_depth--;
+ }
int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
@@ -192,6 +195,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
switch (opcode) {
case kExprElse:
+ case kExprCatch:
os << " // @" << i.pc_offset();
control_depth++;
break;
@@ -215,12 +219,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << " // depth=" << imm.depth;
break;
}
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << " // depth=" << imm.depth;
break;
}
diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc
index 0fbc6c5b33f..c166683beeb 100644
--- a/chromium/v8/src/wasm/function-compiler.cc
+++ b/chromium/v8/src/wasm/function-compiler.cc
@@ -28,27 +28,103 @@ const char* GetExecutionTierAsString(ExecutionTier tier) {
UNREACHABLE();
}
+class WasmInstructionBufferImpl {
+ public:
+ class View : public AssemblerBuffer {
+ public:
+ View(Vector<uint8_t> buffer, WasmInstructionBufferImpl* holder)
+ : buffer_(buffer), holder_(holder) {}
+
+ ~View() override {
+ if (buffer_.start() == holder_->old_buffer_.start()) {
+ DCHECK_EQ(buffer_.size(), holder_->old_buffer_.size());
+ holder_->old_buffer_ = {};
+ }
+ }
+
+ byte* start() const override { return buffer_.start(); }
+
+ int size() const override { return static_cast<int>(buffer_.size()); }
+
+ std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
+ // If we grow, we must be the current buffer of {holder_}.
+ DCHECK_EQ(buffer_.start(), holder_->buffer_.start());
+ DCHECK_EQ(buffer_.size(), holder_->buffer_.size());
+ DCHECK_NULL(holder_->old_buffer_);
+
+ DCHECK_LT(size(), new_size);
+
+ holder_->old_buffer_ = std::move(holder_->buffer_);
+ holder_->buffer_ = OwnedVector<uint8_t>::New(new_size);
+ return base::make_unique<View>(holder_->buffer_.as_vector(), holder_);
+ }
+
+ private:
+ const Vector<uint8_t> buffer_;
+ WasmInstructionBufferImpl* const holder_;
+ };
+
+ std::unique_ptr<AssemblerBuffer> CreateView() {
+ DCHECK_NOT_NULL(buffer_);
+ return base::make_unique<View>(buffer_.as_vector(), this);
+ }
+
+ std::unique_ptr<uint8_t[]> ReleaseBuffer() {
+ DCHECK_NULL(old_buffer_);
+ DCHECK_NOT_NULL(buffer_);
+ return buffer_.ReleaseData();
+ }
+
+ bool released() const { return buffer_ == nullptr; }
+
+ private:
+ // The current buffer used to emit code.
+ OwnedVector<uint8_t> buffer_ =
+ OwnedVector<uint8_t>::New(AssemblerBase::kMinimalBufferSize);
+
+ // While the buffer is grown, we need to temporarily also keep the old
+ // buffer alive.
+ OwnedVector<uint8_t> old_buffer_;
+};
+
+WasmInstructionBufferImpl* Impl(WasmInstructionBuffer* buf) {
+ return reinterpret_cast<WasmInstructionBufferImpl*>(buf);
+}
+
} // namespace
+// PIMPL interface WasmInstructionBuffer for WasmInstBufferImpl
+WasmInstructionBuffer::~WasmInstructionBuffer() {
+ Impl(this)->~WasmInstructionBufferImpl();
+}
+
+std::unique_ptr<AssemblerBuffer> WasmInstructionBuffer::CreateView() {
+ return Impl(this)->CreateView();
+}
+
+std::unique_ptr<uint8_t[]> WasmInstructionBuffer::ReleaseBuffer() {
+ return Impl(this)->ReleaseBuffer();
+}
+
+// static
+std::unique_ptr<WasmInstructionBuffer> WasmInstructionBuffer::New() {
+ return std::unique_ptr<WasmInstructionBuffer>{
+ reinterpret_cast<WasmInstructionBuffer*>(
+ new WasmInstructionBufferImpl())};
+}
+// End of PIMPL interface WasmInstructionBuffer for WasmInstBufferImpl
+
// static
-ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier() {
- return FLAG_liftoff ? ExecutionTier::kBaseline : ExecutionTier::kOptimized;
+ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
+ const WasmModule* module) {
+ return FLAG_liftoff && module->origin == kWasmOrigin
+ ? ExecutionTier::kBaseline
+ : ExecutionTier::kOptimized;
}
-WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
- NativeModule* native_module, int index,
+WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine, int index,
ExecutionTier tier)
- : wasm_engine_(wasm_engine),
- func_index_(index),
- native_module_(native_module),
- tier_(tier) {
- const WasmModule* module = native_module->module();
- DCHECK_GE(index, module->num_imported_functions);
- DCHECK_LT(index, module->functions.size());
- // Always disable Liftoff for asm.js, for two reasons:
- // 1) asm-specific opcodes are not implemented, and
- // 2) tier-up does not work with lazy compilation.
- if (module->origin == kAsmJsOrigin) tier = ExecutionTier::kOptimized;
+ : wasm_engine_(wasm_engine), func_index_(index), tier_(tier) {
if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
(FLAG_wasm_tier_mask_for_testing & (1 << index))) {
tier = ExecutionTier::kOptimized;
@@ -60,21 +136,19 @@ WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
// {TurbofanWasmCompilationUnit} can be opaque in the header file.
WasmCompilationUnit::~WasmCompilationUnit() = default;
-void WasmCompilationUnit::ExecuteCompilation(
- CompilationEnv* env, std::shared_ptr<WireBytesStorage> wire_bytes_storage,
+WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
+ CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
- const WasmModule* module = native_module_->module();
- DCHECK_EQ(module, env->module);
-
auto* func = &env->module->functions[func_index_];
Vector<const uint8_t> code = wire_bytes_storage->GetCode(func->code);
wasm::FunctionBody func_body{func->sig, func->code.offset(), code.start(),
code.end()};
- auto size_histogram =
- SELECT_WASM_COUNTER(counters, module->origin, wasm, function_size_bytes);
+ auto size_histogram = SELECT_WASM_COUNTER(counters, env->module->origin, wasm,
+ function_size_bytes);
size_histogram->AddSample(static_cast<int>(func_body.end - func_body.start));
- auto timed_histogram = SELECT_WASM_COUNTER(counters, module->origin,
+ auto timed_histogram = SELECT_WASM_COUNTER(counters, env->module->origin,
wasm_compile, function_time);
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
@@ -83,23 +157,57 @@ void WasmCompilationUnit::ExecuteCompilation(
GetExecutionTierAsString(tier_));
}
+ WasmCompilationResult result;
switch (tier_) {
case ExecutionTier::kBaseline:
- if (liftoff_unit_->ExecuteCompilation(env, func_body, counters,
- detected)) {
- break;
- }
+ result =
+ liftoff_unit_->ExecuteCompilation(env, func_body, counters, detected);
+ if (result.succeeded()) break;
// Otherwise, fall back to turbofan.
SwitchTier(ExecutionTier::kOptimized);
// TODO(wasm): We could actually stop or remove the tiering unit for this
// function to avoid compiling it twice with TurboFan.
V8_FALLTHROUGH;
case ExecutionTier::kOptimized:
- turbofan_unit_->ExecuteCompilation(env, func_body, counters, detected);
+ result = turbofan_unit_->ExecuteCompilation(env, func_body, counters,
+ detected);
break;
case ExecutionTier::kInterpreter:
UNREACHABLE(); // TODO(titzer): compile interpreter entry stub.
}
+
+ if (result.succeeded()) {
+ counters->wasm_generated_code_size()->Increment(
+ result.code_desc.instr_size);
+ counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
+ }
+
+ return result;
+}
+
+WasmCode* WasmCompilationUnit::Publish(WasmCompilationResult result,
+ NativeModule* native_module) {
+ if (!result.succeeded()) {
+ native_module->compilation_state()->SetError(func_index_,
+ std::move(result.error));
+ return nullptr;
+ }
+
+ // The {tier} argument specifies the requested tier, which can differ from the
+ // actually executed tier stored in {unit->tier()}.
+ DCHECK(result.succeeded());
+ WasmCode::Tier code_tier = tier_ == ExecutionTier::kBaseline
+ ? WasmCode::kLiftoff
+ : WasmCode::kTurbofan;
+ DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
+ WasmCode* code = native_module->AddCode(
+ func_index_, result.code_desc, result.frame_slot_count,
+ result.safepoint_table_offset, result.handler_table_offset,
+ std::move(result.protected_instructions),
+ std::move(result.source_positions), WasmCode::kFunction, code_tier);
+ // TODO(clemensh): Merge this into {AddCode}?
+ native_module->PublishCode(code);
+ return code;
}
void WasmCompilationUnit::SwitchTier(ExecutionTier new_tier) {
@@ -125,7 +233,7 @@ void WasmCompilationUnit::SwitchTier(ExecutionTier new_tier) {
}
// static
-bool WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
+void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
NativeModule* native_module,
WasmFeatures* detected,
const WasmFunction* function,
@@ -135,24 +243,12 @@ bool WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
- WasmCompilationUnit unit(isolate->wasm_engine(), native_module,
- function->func_index, tier);
+ WasmCompilationUnit unit(isolate->wasm_engine(), function->func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
- unit.ExecuteCompilation(
+ WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
isolate->counters(), detected);
- return !unit.failed();
-}
-
-void WasmCompilationUnit::SetResult(WasmCode* code, Counters* counters) {
- DCHECK_NULL(result_);
- result_ = code;
- native_module()->PublishCode(code);
-
- counters->wasm_generated_code_size()->Increment(
- static_cast<int>(code->instructions().size()));
- counters->wasm_reloc_size()->Increment(
- static_cast<int>(code->reloc_info().size()));
+ unit.Publish(std::move(result), native_module);
}
} // namespace wasm
diff --git a/chromium/v8/src/wasm/function-compiler.h b/chromium/v8/src/wasm/function-compiler.h
index 3d3e1073f18..8f235a5d1ce 100644
--- a/chromium/v8/src/wasm/function-compiler.h
+++ b/chromium/v8/src/wasm/function-compiler.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
@@ -14,9 +15,11 @@
namespace v8 {
namespace internal {
+class AssemblerBuffer;
class Counters;
namespace compiler {
+class Pipeline;
class TurbofanWasmCompilationUnit;
} // namespace compiler
@@ -25,35 +28,70 @@ namespace wasm {
class LiftoffCompilationUnit;
class NativeModule;
class WasmCode;
+class WasmCompilationUnit;
class WasmEngine;
struct WasmFunction;
+class WasmInstructionBuffer final {
+ public:
+ ~WasmInstructionBuffer();
+ std::unique_ptr<AssemblerBuffer> CreateView();
+ std::unique_ptr<uint8_t[]> ReleaseBuffer();
+
+ static std::unique_ptr<WasmInstructionBuffer> New();
+
+ private:
+ WasmInstructionBuffer() = delete;
+ DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
+};
+
+struct WasmCompilationResult {
+ public:
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
+
+ explicit WasmCompilationResult(WasmError error) : error(std::move(error)) {}
+
+ bool succeeded() const {
+ DCHECK_EQ(code_desc.buffer != nullptr, error.empty());
+ return error.empty();
+ }
+ operator bool() const { return succeeded(); }
+
+ CodeDesc code_desc;
+ std::unique_ptr<uint8_t[]> instr_buffer;
+ uint32_t frame_slot_count = 0;
+ size_t safepoint_table_offset = 0;
+ size_t handler_table_offset = 0;
+ OwnedVector<byte> source_positions;
+ OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
+
+ WasmError error;
+};
+
class WasmCompilationUnit final {
public:
- static ExecutionTier GetDefaultExecutionTier();
+ static ExecutionTier GetDefaultExecutionTier(const WasmModule*);
// If constructing from a background thread, pass in a Counters*, and ensure
// that the Counters live at least as long as this compilation unit (which
// typically means to hold a std::shared_ptr<Counters>).
// If used exclusively from a foreground thread, Isolate::counters() may be
// used by callers to pass Counters.
- WasmCompilationUnit(WasmEngine*, NativeModule*, int index,
- ExecutionTier = GetDefaultExecutionTier());
+ WasmCompilationUnit(WasmEngine*, int index, ExecutionTier);
~WasmCompilationUnit();
- void ExecuteCompilation(CompilationEnv*, std::shared_ptr<WireBytesStorage>,
- Counters*, WasmFeatures* detected);
+ WasmCompilationResult ExecuteCompilation(
+ CompilationEnv*, const std::shared_ptr<WireBytesStorage>&, Counters*,
+ WasmFeatures* detected);
+
+ WasmCode* Publish(WasmCompilationResult, NativeModule*);
- NativeModule* native_module() const { return native_module_; }
ExecutionTier tier() const { return tier_; }
- bool failed() const { return result_ == nullptr; } // TODO(clemensh): Remove.
- WasmCode* result() const { return result_; }
- static bool CompileWasmFunction(Isolate* isolate, NativeModule* native_module,
- WasmFeatures* detected,
- const WasmFunction* function,
- ExecutionTier = GetDefaultExecutionTier());
+ static void CompileWasmFunction(Isolate*, NativeModule*,
+ WasmFeatures* detected, const WasmFunction*,
+ ExecutionTier);
private:
friend class LiftoffCompilationUnit;
@@ -61,7 +99,6 @@ class WasmCompilationUnit final {
WasmEngine* const wasm_engine_;
const int func_index_;
- NativeModule* const native_module_;
ExecutionTier tier_;
WasmCode* result_ = nullptr;
@@ -72,9 +109,6 @@ class WasmCompilationUnit final {
void SwitchTier(ExecutionTier new_tier);
- // Called from {ExecuteCompilation} to set the result of compilation.
- void SetResult(WasmCode*, Counters*);
-
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc
index 8c6d34902b9..ac297662c80 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.cc
+++ b/chromium/v8/src/wasm/graph-builder-interface.cc
@@ -36,7 +36,6 @@ struct SsaEnv {
compiler::WasmInstanceCacheNodes instance_cache;
TFNode** locals;
- bool reached() const { return state >= kReached; }
void Kill(State new_state = kControlEnd) {
state = new_state;
locals = nullptr;
@@ -51,7 +50,6 @@ struct SsaEnv {
#define BUILD(func, ...) \
([&] { \
- DCHECK(ssa_env_->reached()); \
DCHECK(decoder->ok()); \
return CheckForException(decoder, builder_->func(__VA_ARGS__)); \
})()
@@ -63,22 +61,36 @@ class WasmGraphBuildingInterface {
static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
- struct Value : public ValueWithNamedConstructors<Value> {
- TFNode* node;
+ struct Value : public ValueBase {
+ TFNode* node = nullptr;
+
+ template <typename... Args>
+ explicit Value(Args&&... args) V8_NOEXCEPT
+ : ValueBase(std::forward<Args>(args)...) {}
};
struct TryInfo : public ZoneObject {
SsaEnv* catch_env;
TFNode* exception = nullptr;
+ bool might_throw() const { return exception != nullptr; }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TryInfo);
+
explicit TryInfo(SsaEnv* c) : catch_env(c) {}
};
- struct Control : public ControlWithNamedConstructors<Control, Value> {
- SsaEnv* end_env; // end environment for the construct.
- SsaEnv* false_env; // false environment (only for if).
- TryInfo* try_info; // information used for compiling try statements.
- int32_t previous_catch; // previous Control (on the stack) with a catch.
+ struct Control : public ControlBase<Value> {
+ SsaEnv* end_env = nullptr; // end environment for the construct.
+ SsaEnv* false_env = nullptr; // false environment (only for if).
+ TryInfo* try_info = nullptr; // information about try statements.
+ int32_t previous_catch = -1; // previous Control with a catch.
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
+
+ template <typename... Args>
+ explicit Control(Args&&... args) V8_NOEXCEPT
+ : ControlBase(std::forward<Args>(args)...) {}
};
explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder)
@@ -87,7 +99,7 @@ class WasmGraphBuildingInterface {
void StartFunction(FullDecoder* decoder) {
SsaEnv* ssa_env =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- uint32_t num_locals = decoder->NumLocals();
+ uint32_t num_locals = decoder->num_locals();
uint32_t env_count = num_locals;
size_t size = sizeof(TFNode*) * env_count;
ssa_env->state = SsaEnv::kReached;
@@ -127,15 +139,10 @@ class WasmGraphBuildingInterface {
// Reload the instance cache entries into the Ssa Environment.
void LoadContextIntoSsa(SsaEnv* ssa_env) {
- if (!ssa_env || !ssa_env->reached()) return;
- builder_->InitInstanceCache(&ssa_env->instance_cache);
+ if (ssa_env) builder_->InitInstanceCache(&ssa_env->instance_cache);
}
- void StartFunctionBody(FullDecoder* decoder, Control* block) {
- SsaEnv* break_env = ssa_env_;
- SetEnv(Steal(decoder->zone(), break_env));
- block->end_env = break_env;
- }
+ void StartFunctionBody(FullDecoder* decoder, Control* block) {}
void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
@@ -144,7 +151,7 @@ class WasmGraphBuildingInterface {
void NextInstruction(FullDecoder*, WasmOpcode) {}
void Block(FullDecoder* decoder, Control* block) {
- // The break environment is the outer environment.
+ // The branch environment is the outer environment.
block->end_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
@@ -181,9 +188,7 @@ class WasmGraphBuildingInterface {
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- if (ssa_env_->reached()) {
- BUILD(BranchNoHint, cond.node, &if_true, &if_false);
- }
+ BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(decoder, ssa_env_);
false_env->control = if_false;
@@ -200,18 +205,28 @@ class WasmGraphBuildingInterface {
}
void PopControl(FullDecoder* decoder, Control* block) {
- if (!block->is_loop()) SetEnv(block->end_env);
+ // A loop just continues with the end environment. There is no merge.
+ if (block->is_loop()) return;
+ // Any other block falls through to the parent block.
+ if (block->reachable()) FallThruTo(decoder, block);
+ if (block->is_onearmed_if()) {
+ // Merge the else branch into the end merge.
+ SetEnv(block->false_env);
+ MergeValuesInto(decoder, block, &block->end_merge);
+ }
+ // Now continue with the merged environment.
+ SetEnv(block->end_env);
}
void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
- void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
- const Value& value, Value* result) {
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
+ Value* result) {
result->node = BUILD(Unop, opcode, value.node, decoder->position());
}
- void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
- const Value& lhs, const Value& rhs, Value* result) {
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
+ const Value& rhs, Value* result) {
auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
if (result) result->node = node;
}
@@ -238,17 +253,9 @@ class WasmGraphBuildingInterface {
void Drop(FullDecoder* decoder, const Value& value) {}
- void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
- if (implicit) {
- DCHECK_EQ(1, decoder->control_depth());
- SetEnv(decoder->control_at(0)->end_env);
- }
- size_t num_values = values.size();
- TFNode** buffer = GetNodes(values);
- for (size_t i = 0; i < num_values; ++i) {
- buffer[i] = values[i].node;
- }
- BUILD(Return, static_cast<uint32_t>(values.size()), buffer);
+ void DoReturn(FullDecoder* decoder, Vector<Value> values) {
+ TFNode** nodes = GetNodes(values);
+ BUILD(Return, static_cast<uint32_t>(values.size()), nodes);
}
void GetLocal(FullDecoder* decoder, Value* result,
@@ -295,18 +302,30 @@ class WasmGraphBuildingInterface {
ssa_env_->control = merge;
}
+ void BrOrRet(FullDecoder* decoder, uint32_t depth) {
+ if (depth == decoder->control_depth() - 1) {
+ uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
+ TFNode** values =
+ ret_count == 0 ? nullptr
+ : GetNodes(decoder->stack_value(ret_count), ret_count);
+ BUILD(Return, ret_count, values);
+ } else {
+ Br(decoder, decoder->control_at(depth));
+ }
+ }
+
void Br(FullDecoder* decoder, Control* target) {
MergeValuesInto(decoder, target, target->br_merge());
}
- void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
+ void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder, fenv);
fenv->SetNotMerged();
BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
- ssa_env_ = tenv;
- Br(decoder, target);
- ssa_env_ = fenv;
+ SetEnv(tenv);
+ BrOrRet(decoder, depth);
+ SetEnv(fenv);
}
void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
@@ -314,30 +333,34 @@ class WasmGraphBuildingInterface {
if (imm.table_count == 0) {
// Only a default target. Do the equivalent of br.
uint32_t target = BranchTableIterator<validate>(decoder, imm).next();
- Br(decoder, decoder->control_at(target));
+ BrOrRet(decoder, target);
return;
}
- SsaEnv* break_env = ssa_env_;
+ SsaEnv* branch_env = ssa_env_;
// Build branches to the various blocks based on the table.
TFNode* sw = BUILD(Switch, imm.table_count + 1, key.node);
- SsaEnv* copy = Steal(decoder->zone(), break_env);
- ssa_env_ = copy;
+ SsaEnv* copy = Steal(decoder->zone(), branch_env);
+ SetEnv(copy);
BranchTableIterator<validate> iterator(decoder, imm);
while (iterator.has_next()) {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
- ssa_env_ = Split(decoder, copy);
+ SetEnv(Split(decoder, copy));
ssa_env_->control =
(i == imm.table_count) ? BUILD(IfDefault, sw) : BUILD(IfValue, i, sw);
- Br(decoder, decoder->control_at(target));
+ BrOrRet(decoder, target);
}
DCHECK(decoder->ok());
- ssa_env_ = break_env;
+ SetEnv(branch_env);
}
void Else(FullDecoder* decoder, Control* if_block) {
+ if (if_block->reachable()) {
+ // Merge the if branch into the end merge.
+ MergeValuesInto(decoder, if_block, &if_block->end_merge);
+ }
SetEnv(if_block->false_env);
}
@@ -418,75 +441,59 @@ class WasmGraphBuildingInterface {
builder_->TerminateThrow(ssa_env_->effect, ssa_env_->control);
}
- void Rethrow(FullDecoder* decoder, Control* block) {
- DCHECK(block->is_try_catchall() || block->is_try_catch());
- TFNode* exception = block->try_info->exception;
- BUILD(Rethrow, exception);
+ void Rethrow(FullDecoder* decoder, const Value& exception) {
+ BUILD(Rethrow, exception.node);
builder_->TerminateThrow(ssa_env_->effect, ssa_env_->control);
}
- void CatchException(FullDecoder* decoder,
- const ExceptionIndexImmediate<validate>& imm,
- Control* block, Vector<Value> values) {
- DCHECK(block->is_try_catch());
- TFNode* exception = block->try_info->exception;
- current_catch_ = block->previous_catch; // Pop try scope.
- SsaEnv* catch_env = block->try_info->catch_env;
- SetEnv(catch_env);
-
- // The catch block is unreachable if no possible throws in the try block
- // exist. We only build a landing pad if some node in the try block can
- // (possibly) throw. Otherwise the catch environments remain empty.
- DCHECK_EQ(exception != nullptr, ssa_env_->reached());
- if (exception == nullptr) {
- block->reachability = kSpecOnlyReachable;
- return;
- }
-
- TFNode* if_catch = nullptr;
- TFNode* if_no_catch = nullptr;
+ void BrOnException(FullDecoder* decoder, const Value& exception,
+ const ExceptionIndexImmediate<validate>& imm,
+ uint32_t depth, Vector<Value> values) {
+ TFNode* if_match = nullptr;
+ TFNode* if_no_match = nullptr;
// Get the exception tag and see if it matches the expected one.
- TFNode* caught_tag = BUILD(GetExceptionTag, exception);
+ TFNode* caught_tag = BUILD(GetExceptionTag, exception.node);
TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
- BUILD(BranchNoHint, compare, &if_catch, &if_no_catch);
-
- // If the tags don't match we continue with the next tag by setting the
- // false environment as the new {TryInfo::catch_env} here.
- SsaEnv* if_no_catch_env = Split(decoder, ssa_env_);
- if_no_catch_env->control = if_no_catch;
- SsaEnv* if_catch_env = Steal(decoder->zone(), ssa_env_);
- if_catch_env->control = if_catch;
- block->try_info->catch_env = if_no_catch_env;
+ BUILD(BranchNoHint, compare, &if_match, &if_no_match);
+ SsaEnv* if_no_match_env = Split(decoder, ssa_env_);
+ SsaEnv* if_match_env = Steal(decoder->zone(), ssa_env_);
+ if_no_match_env->control = if_no_match;
+ if_match_env->control = if_match;
// If the tags match we extract the values from the exception object and
// push them onto the operand stack using the passed {values} vector.
- SetEnv(if_catch_env);
+ SetEnv(if_match_env);
// TODO(mstarzinger): Can't use BUILD() here, GetExceptionValues() returns
// TFNode** rather than TFNode*. Fix to add landing pads.
TFNode** caught_values =
- builder_->GetExceptionValues(exception, imm.exception);
+ builder_->GetExceptionValues(exception.node, imm.exception);
for (size_t i = 0, e = values.size(); i < e; ++i) {
values[i].node = caught_values[i];
}
+ BrOrRet(decoder, depth);
+
+ // If the tags don't match we fall-through here.
+ SetEnv(if_no_match_env);
}
- void CatchAll(FullDecoder* decoder, Control* block) {
- DCHECK(block->is_try_catchall() || block->is_try_catch());
- TFNode* exception = block->try_info->exception;
+ void Catch(FullDecoder* decoder, Control* block, Value* exception) {
+ DCHECK(block->is_try_catch());
+
current_catch_ = block->previous_catch; // Pop try scope.
- SsaEnv* catch_env = block->try_info->catch_env;
- SetEnv(catch_env);
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
- DCHECK_EQ(exception != nullptr, ssa_env_->reached());
- if (exception == nullptr) {
+ if (!block->try_info->might_throw()) {
block->reachability = kSpecOnlyReachable;
return;
}
+
+ SetEnv(block->try_info->catch_env);
+ DCHECK_NOT_NULL(block->try_info->exception);
+ exception->node = block->try_info->exception;
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -498,35 +505,38 @@ class WasmGraphBuildingInterface {
}
void MemoryInit(FullDecoder* decoder,
- const MemoryInitImmediate<validate>& imm,
- Vector<Value> args) {
- BUILD(Unreachable, decoder->position());
+ const MemoryInitImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
+ BUILD(MemoryInit, imm.data_segment_index, dst.node, src.node, size.node,
+ decoder->position());
}
void MemoryDrop(FullDecoder* decoder,
const MemoryDropImmediate<validate>& imm) {
- BUILD(Unreachable, decoder->position());
+ BUILD(MemoryDrop, imm.index, decoder->position());
}
void MemoryCopy(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
- Vector<Value> args) {
- BUILD(Unreachable, decoder->position());
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& src, const Value& size) {
+ BUILD(MemoryCopy, dst.node, src.node, size.node, decoder->position());
}
void MemoryFill(FullDecoder* decoder,
- const MemoryIndexImmediate<validate>& imm,
- Vector<Value> args) {
- BUILD(Unreachable, decoder->position());
+ const MemoryIndexImmediate<validate>& imm, const Value& dst,
+ const Value& value, const Value& size) {
+ BUILD(MemoryFill, dst.node, value.node, size.node, decoder->position());
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(Unreachable, decoder->position());
+ BUILD(TableInit, imm.table.index, imm.elem_segment_index, args[0].node,
+ args[1].node, args[2].node, decoder->position());
}
void TableDrop(FullDecoder* decoder,
const TableDropImmediate<validate>& imm) {
- BUILD(Unreachable, decoder->position());
+ BUILD(TableDrop, imm.index, decoder->position());
}
void TableCopy(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(Unreachable, decoder->position());
+ BUILD(TableCopy, imm.index, args[0].node, args[1].node, args[2].node,
+ decoder->position());
}
private:
@@ -643,18 +653,21 @@ class WasmGraphBuildingInterface {
void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- if (!ssa_env_->reached()) return;
SsaEnv* target = c->end_env;
const bool first = target->state == SsaEnv::kUnreachable;
Goto(decoder, ssa_env_, target);
+ if (merge->arity == 0) return;
+
uint32_t avail =
decoder->stack_size() - decoder->control_at(0)->stack_depth;
+ DCHECK_GE(avail, merge->arity);
uint32_t start = avail >= merge->arity ? 0 : merge->arity - avail;
+ Value* stack_values = decoder->stack_value(merge->arity);
for (uint32_t i = start; i < merge->arity; ++i) {
- auto& val = decoder->GetMergeValueFromStack(c, merge, i);
- auto& old = (*merge)[i];
+ Value& val = stack_values[i];
+ Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
DCHECK(val.type == old.type || val.type == kWasmVar);
old.node = first ? val.node
@@ -666,7 +679,6 @@ class WasmGraphBuildingInterface {
void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
- if (!from->reached()) return;
switch (to->state) {
case SsaEnv::kUnreachable: { // Overwrite destination.
to->state = SsaEnv::kReached;
@@ -688,7 +700,7 @@ class WasmGraphBuildingInterface {
to->effect = builder_->EffectPhi(2, effects, merge);
}
// Merge SSA values.
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
TFNode* a = to->locals[i];
TFNode* b = from->locals[i];
if (a != b) {
@@ -710,7 +722,7 @@ class WasmGraphBuildingInterface {
to->effect = builder_->CreateOrMergeIntoEffectPhi(merge, to->effect,
from->effect);
// Merge locals.
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
to->locals[i] = builder_->CreateOrMergeIntoPhi(
ValueTypes::MachineRepresentationFor(decoder->GetLocalType(i)),
merge, to->locals[i], from->locals[i]);
@@ -727,7 +739,6 @@ class WasmGraphBuildingInterface {
}
SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) {
- if (!env->reached()) return Split(decoder, env);
env->state = SsaEnv::kMerged;
env->control = builder_->Loop(env->control);
@@ -740,7 +751,7 @@ class WasmGraphBuildingInterface {
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
int instance_cache_index = decoder->total_locals();
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
@@ -758,7 +769,7 @@ class WasmGraphBuildingInterface {
}
// Conservatively introduce phis for all local variables.
- for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
+ for (int i = decoder->num_locals() - 1; i >= 0; i--) {
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
}
@@ -777,22 +788,16 @@ class WasmGraphBuildingInterface {
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- size_t size = sizeof(TFNode*) * decoder->NumLocals();
+ size_t size = sizeof(TFNode*) * decoder->num_locals();
result->control = from->control;
result->effect = from->effect;
- if (from->reached()) {
- result->state = SsaEnv::kReached;
- result->locals =
- size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
- : nullptr;
- memcpy(result->locals, from->locals, size);
- result->instance_cache = from->instance_cache;
- } else {
- result->state = SsaEnv::kUnreachable;
- result->locals = nullptr;
- result->instance_cache = {};
- }
+ result->state = SsaEnv::kReached;
+ result->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
+ : nullptr;
+ memcpy(result->locals, from->locals, size);
+ result->instance_cache = from->instance_cache;
return result;
}
@@ -801,7 +806,6 @@ class WasmGraphBuildingInterface {
// unreachable.
SsaEnv* Steal(Zone* zone, SsaEnv* from) {
DCHECK_NOT_NULL(from);
- if (!from->reached()) return UnreachableEnv(zone);
SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
result->state = SsaEnv::kReached;
result->locals = from->locals;
diff --git a/chromium/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h b/chromium/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h
new file mode 100644
index 00000000000..c50183d33ee
--- /dev/null
+++ b/chromium/v8/src/wasm/js-to-wasm-wrapper-cache-inl.h
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
+#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
+
+#include "src/compiler/wasm-compiler.h"
+#include "src/counters.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class JSToWasmWrapperCache {
+ public:
+ Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ bool is_import) {
+ std::pair<bool, FunctionSig> key(is_import, *sig);
+ Handle<Code>& cached = cache_[key];
+ if (cached.is_null()) {
+ cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
+ .ToHandleChecked();
+ }
+ return cached;
+ }
+
+ private:
+ // We generate different code for calling imports than calling wasm functions
+ // in this module. Both are cached separately.
+ using CacheKey = std::pair<bool, FunctionSig>;
+ std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_INL_H_
diff --git a/chromium/v8/src/wasm/jump-table-assembler.h b/chromium/v8/src/wasm/jump-table-assembler.h
index a83a7d5b210..68fe5966609 100644
--- a/chromium/v8/src/wasm/jump-table-assembler.h
+++ b/chromium/v8/src/wasm/jump-table-assembler.h
@@ -84,8 +84,9 @@ class JumpTableAssembler : public TurboAssembler {
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(nullptr, JumpTableAssemblerOptions(),
- reinterpret_cast<void*>(slot_addr), size,
- CodeObjectRequired::kNo) {}
+ CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(
+ reinterpret_cast<uint8_t*>(slot_addr), size)) {}
// To allow concurrent patching of the jump table entries, we need to ensure
// that the instruction containing the call target does not cross cache-line
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index d27e199a778..bdceb0b73bd 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -6,6 +6,8 @@
#include "src/api.h"
#include "src/asmjs/asm-js.h"
+#include "src/base/enum-set.h"
+#include "src/base/optional.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
@@ -15,6 +17,7 @@
#include "src/task-utils.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -27,11 +30,6 @@
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
- } while (false)
-
#define TRACE_COMPILE(...) \
do { \
if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
@@ -53,15 +51,6 @@ namespace wasm {
namespace {
-// Callbacks will receive either {kFailedCompilation} or both
-// {kFinishedBaselineCompilation} and {kFinishedTopTierCompilation}, in that
-// order. If tier up is off, both events are delivered right after each other.
-enum class CompilationEvent : uint8_t {
- kFinishedBaselineCompilation,
- kFinishedTopTierCompilation,
- kFailedCompilation
-};
-
enum class CompileMode : uint8_t { kRegular, kTiering };
// The {CompilationStateImpl} keeps track of the compilation state of the
@@ -71,8 +60,6 @@ enum class CompileMode : uint8_t { kRegular, kTiering };
// It's public interface {CompilationState} lives in compilation-environment.h.
class CompilationStateImpl {
public:
- using callback_t = std::function<void(CompilationEvent, const VoidResult*)>;
-
CompilationStateImpl(internal::Isolate*, NativeModule*);
~CompilationStateImpl();
@@ -85,9 +72,9 @@ class CompilationStateImpl {
// compilation.
void SetNumberOfFunctionsToCompile(size_t num_functions);
- // Set the callback function to be called on compilation events. Needs to be
+ // Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run.
- void SetCallback(callback_t callback);
+ void AddCallback(CompilationState::callback_t);
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(
@@ -98,13 +85,12 @@ class CompilationStateImpl {
bool HasCompilationUnitToFinish();
- void OnFinishedUnit();
- void ScheduleUnitForFinishing(std::unique_ptr<WasmCompilationUnit> unit,
- ExecutionTier tier);
- void ScheduleCodeLogging(WasmCode*);
+ void OnFinishedUnit(ExecutionTier, WasmCode*);
+ void ReportDetectedFeatures(const WasmFeatures& detected);
void OnBackgroundTaskStopped(const WasmFeatures& detected);
void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected);
+ void RestartBackgroundCompileTask();
void RestartBackgroundTasks(size_t max = std::numeric_limits<size_t>::max());
// Only one foreground thread (finisher) is allowed to run at a time.
// {SetFinisherIsRunning} returns whether the flag changed its state.
@@ -113,53 +99,43 @@ class CompilationStateImpl {
void Abort();
- void SetError(uint32_t func_index, const ResultBase& error_result);
+ void SetError(uint32_t func_index, const WasmError& error);
Isolate* isolate() const { return isolate_; }
bool failed() const {
- base::MutexGuard guard(&mutex_);
- return compile_error_ != nullptr;
+ return compile_error_.load(std::memory_order_relaxed) != nullptr;
}
bool baseline_compilation_finished() const {
+ base::MutexGuard guard(&mutex_);
return outstanding_baseline_units_ == 0 ||
(compile_mode_ == CompileMode::kTiering &&
outstanding_tiering_units_ == 0);
}
- bool has_outstanding_units() const {
- return outstanding_tiering_units_ > 0 || outstanding_baseline_units_ > 0;
- }
-
CompileMode compile_mode() const { return compile_mode_; }
WasmFeatures* detected_features() { return &detected_features_; }
// Call {GetCompileError} from foreground threads only, since we access
// NativeModule::wire_bytes, which is set from the foreground thread once the
// stream has finished.
- VoidResult GetCompileError() {
- DCHECK_NOT_NULL(compile_error_);
- std::ostringstream error;
- error << "Compiling wasm function \"";
+ WasmError GetCompileError() {
+ CompilationError* error = compile_error_.load(std::memory_order_acquire);
+ DCHECK_NOT_NULL(error);
+ std::ostringstream error_msg;
+ error_msg << "Compiling wasm function \"";
wasm::ModuleWireBytes wire_bytes(native_module_->wire_bytes());
wasm::WireBytesRef name_ref = native_module_->module()->LookupFunctionName(
- wire_bytes, compile_error_->func_index);
+ wire_bytes, error->func_index);
if (name_ref.is_set()) {
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
- error.write(name.start(), name.length());
+ error_msg.write(name.start(), name.length());
} else {
- error << "wasm-function[" << compile_error_->func_index << "]";
+ error_msg << "wasm-function[" << error->func_index << "]";
}
- error << "\" failed: " << compile_error_->result.error_msg();
- return VoidResult::Error(compile_error_->result.error_offset(),
- error.str());
- }
-
- std::shared_ptr<WireBytesStorage> GetSharedWireBytesStorage() const {
- base::MutexGuard guard(&mutex_);
- DCHECK_NOT_NULL(wire_bytes_storage_);
- return wire_bytes_storage_;
+ error_msg << "\" failed: " << error->error.message();
+ return WasmError{error->error.offset(), error_msg.str()};
}
void SetWireBytesStorage(
@@ -168,18 +144,18 @@ class CompilationStateImpl {
wire_bytes_storage_ = wire_bytes_storage;
}
- std::shared_ptr<WireBytesStorage> GetWireBytesStorage() {
+ std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const {
base::MutexGuard guard(&mutex_);
+ DCHECK_NOT_NULL(wire_bytes_storage_);
return wire_bytes_storage_;
}
private:
struct CompilationError {
uint32_t const func_index;
- VoidResult const result;
- CompilationError(uint32_t func_index, const ResultBase& compile_result)
- : func_index(func_index),
- result(VoidResult::ErrorFrom(compile_result)) {}
+ WasmError const error;
+ CompilationError(uint32_t func_index, WasmError error)
+ : func_index(func_index), error(std::move(error)) {}
};
class LogCodesTask : public CancelableTask {
@@ -217,7 +193,19 @@ class CompilationStateImpl {
std::vector<WasmCode*> code_to_log_;
};
- void NotifyOnEvent(CompilationEvent event, const VoidResult* error_result);
+ class FreeCallbacksTask : public CancelableTask {
+ public:
+ explicit FreeCallbacksTask(CompilationStateImpl* comp_state)
+ : CancelableTask(&comp_state->foreground_task_manager_),
+ compilation_state_(comp_state) {}
+
+ void RunInternal() override { compilation_state_->callbacks_.clear(); }
+
+ private:
+ CompilationStateImpl* const compilation_state_;
+ };
+
+ void NotifyOnEvent(CompilationEvent event, const WasmError* error);
std::vector<std::unique_ptr<WasmCompilationUnit>>& finish_units() {
return baseline_compilation_finished() ? tiering_finish_units_
@@ -235,6 +223,11 @@ class CompilationStateImpl {
// compilation is running.
bool const should_log_code_;
+ // Compilation error, atomically updated, but at most once (nullptr -> error).
+ // Uses acquire-release semantics (acquire on load, release on update).
+ // For checking whether an error is set, relaxed semantics can be used.
+ std::atomic<CompilationError*> compile_error_{nullptr};
+
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@@ -247,7 +240,6 @@ class CompilationStateImpl {
bool finisher_is_running_ = false;
size_t num_background_tasks_ = 0;
- std::unique_ptr<CompilationError> compile_error_;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_finish_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_finish_units_;
@@ -265,20 +257,27 @@ class CompilationStateImpl {
// compiling.
std::shared_ptr<WireBytesStorage> wire_bytes_storage_;
+ size_t outstanding_baseline_units_ = 0;
+ size_t outstanding_tiering_units_ = 0;
+
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
- // Callback function to be called on compilation events.
- callback_t callback_;
+ // Callback functions to be called on compilation events. Only accessible from
+ // the foreground thread.
+ std::vector<CompilationState::callback_t> callbacks_;
+
+ // Remember whether {Abort()} was called. When set from the foreground this
+ // ensures no more callbacks will be called afterwards. No guarantees when set
+ // from the background. Only needs to be atomic so that it can be set from
+ // foreground and background.
+ std::atomic<bool> aborted_{false};
CancelableTaskManager background_task_manager_;
CancelableTaskManager foreground_task_manager_;
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
const size_t max_background_tasks_ = 0;
-
- size_t outstanding_baseline_units_ = 0;
- size_t outstanding_tiering_units_ = 0;
};
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
@@ -287,165 +286,24 @@ void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
}
}
-class JSToWasmWrapperCache {
- public:
- Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
- bool is_import) {
- std::pair<bool, FunctionSig> key(is_import, *sig);
- Handle<Code>& cached = cache_[key];
- if (cached.is_null()) {
- cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
- .ToHandleChecked();
- }
- return cached;
- }
-
- private:
- // We generate different code for calling imports than calling wasm functions
- // in this module. Both are cached separately.
- using CacheKey = std::pair<bool, FunctionSig>;
- std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
-};
-
-// A helper class to simplify instantiating a module from a module object.
-// It closes over the {Isolate}, the {ErrorThrower}, etc.
-class InstanceBuilder {
- public:
- InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory);
-
- // Build an instance, in all of its glory.
- MaybeHandle<WasmInstanceObject> Build();
- // Run the start function, if any.
- bool ExecuteStartFunction();
-
- private:
- // Represents the initialized state of a table.
- struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_wrappers; // JSFunctions exported
- size_t table_size;
- };
-
- // A pre-evaluated value to use in import binding.
- struct SanitizedImport {
- Handle<String> module_name;
- Handle<String> import_name;
- Handle<Object> value;
- };
-
- Isolate* isolate_;
- const WasmFeatures enabled_;
- const WasmModule* const module_;
- ErrorThrower* thrower_;
- Handle<WasmModuleObject> module_object_;
- MaybeHandle<JSReceiver> ffi_;
- MaybeHandle<JSArrayBuffer> memory_;
- Handle<JSArrayBuffer> globals_;
- std::vector<TableInstance> table_instances_;
- std::vector<Handle<JSFunction>> js_wrappers_;
- std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
- Handle<WasmExportedFunction> start_function_;
- JSToWasmWrapperCache js_to_wasm_cache_;
- std::vector<SanitizedImport> sanitized_imports_;
-
- UseTrapHandler use_trap_handler() const {
- return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
- : kNoTrapHandler;
- }
-
-// Helper routines to print out errors with imports.
-#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
- void Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name, Handle<String> import_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
- index, module_name->ToCString().get(), \
- import_name->ToCString().get(), error); \
- } \
- \
- MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name) { \
- thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
- module_name->ToCString().get(), error); \
- return MaybeHandle<Object>(); \
- }
-
- ERROR_THROWER_WITH_MESSAGE(LinkError)
- ERROR_THROWER_WITH_MESSAGE(TypeError)
-
-#undef ERROR_THROWER_WITH_MESSAGE
-
- // Look up an import value in the {ffi_} object.
- MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
- Handle<String> import_name);
-
- // Look up an import value in the {ffi_} object specifically for linking an
- // asm.js module. This only performs non-observable lookups, which allows
- // falling back to JavaScript proper (and hence re-executing all lookups) if
- // module instantiation fails.
- MaybeHandle<Object> LookupImportAsm(uint32_t index,
- Handle<String> import_name);
-
- uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
-
- // Load data segments into the memory.
- void LoadDataSegments(Handle<WasmInstanceObject> instance);
-
- void WriteGlobalValue(const WasmGlobal& global, double value);
- void WriteGlobalValue(const WasmGlobal& global,
- Handle<WasmGlobalObject> value);
-
- void SanitizeImports();
-
- // Find the imported memory buffer if there is one. This is used to see if we
- // need to recompile with bounds checks before creating the instance.
- MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
-
- // Process the imports, including functions, tables, globals, and memory, in
- // order, loading them from the {ffi_} object. Returns the number of imported
- // functions.
- int ProcessImports(Handle<WasmInstanceObject> instance);
-
- template <typename T>
- T* GetRawGlobalPtr(const WasmGlobal& global);
-
- // Process initialization of globals.
- void InitGlobals();
-
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
-
- bool NeedsWrappers() const;
-
- // Process the exports, creating wrappers for functions, tables, memories,
- // and globals.
- void ProcessExports(Handle<WasmInstanceObject> instance);
-
- void InitializeTables(Handle<WasmInstanceObject> instance);
-
- void LoadTableSegments(Handle<WasmInstanceObject> instance);
-
- // Creates new exception tags for all exceptions. Note that some tags might
- // already exist if they were imported, those tags will be re-used.
- void InitializeExceptions(Handle<WasmInstanceObject> instance);
-};
-
CompilationStateImpl* Impl(CompilationState* compilation_state) {
return reinterpret_cast<CompilationStateImpl*>(compilation_state);
}
+const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
+ return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
+}
} // namespace
//////////////////////////////////////////////////////
// PIMPL implementation of {CompilationState}.
+CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
+
void CompilationState::CancelAndWait() { Impl(this)->CancelAndWait(); }
-void CompilationState::SetError(uint32_t func_index,
- const ResultBase& error_result) {
- Impl(this)->SetError(func_index, error_result);
+void CompilationState::SetError(uint32_t func_index, const WasmError& error) {
+ Impl(this)->SetError(func_index, error);
}
void CompilationState::SetWireBytesStorage(
@@ -453,11 +311,16 @@ void CompilationState::SetWireBytesStorage(
Impl(this)->SetWireBytesStorage(std::move(wire_bytes_storage));
}
-std::shared_ptr<WireBytesStorage> CompilationState::GetWireBytesStorage() {
+std::shared_ptr<WireBytesStorage> CompilationState::GetWireBytesStorage()
+ const {
return Impl(this)->GetWireBytesStorage();
}
-CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); }
+void CompilationState::AddCallback(CompilationState::callback_t callback) {
+ return Impl(this)->AddCallback(std::move(callback));
+}
+
+bool CompilationState::failed() const { return Impl(this)->failed(); }
// static
std::unique_ptr<CompilationState> CompilationState::New(
@@ -469,19 +332,6 @@ std::unique_ptr<CompilationState> CompilationState::New(
// End of PIMPL implementation of {CompilationState}.
//////////////////////////////////////////////////////
-MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
- auto instance = builder.Build();
- if (!instance.is_null() && builder.ExecuteStartFunction()) {
- return instance;
- }
- DCHECK(isolate->has_pending_exception() || thrower->error());
- return {};
-}
-
WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
int func_index) {
base::ElapsedTimer compilation_timer;
@@ -498,21 +348,22 @@ WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
module_start + func->code.offset(),
module_start + func->code.end_offset()};
- WasmCompilationUnit unit(isolate->wasm_engine(), native_module, func_index);
+ ExecutionTier tier =
+ WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
+ WasmCompilationUnit unit(isolate->wasm_engine(), func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
- unit.ExecuteCompilation(
+ WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
isolate->counters(),
Impl(native_module->compilation_state())->detected_features());
+ WasmCode* code = unit.Publish(std::move(result), native_module);
- // If there is a pending error, something really went wrong. The module was
- // verified before starting execution with lazy compilation.
+ // During lazy compilation, we should never get compilation errors. The module
+ // was verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
- CHECK(!unit.failed());
-
- WasmCode* code = unit.result();
+ CHECK(!native_module->compilation_state()->failed());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
@@ -555,7 +406,10 @@ class CompilationUnitBuilder {
public:
explicit CompilationUnitBuilder(NativeModule* native_module,
WasmEngine* wasm_engine)
- : native_module_(native_module), wasm_engine_(wasm_engine) {}
+ : native_module_(native_module),
+ wasm_engine_(wasm_engine),
+ default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
+ native_module->module())) {}
void AddUnit(uint32_t func_index) {
switch (compilation_state()->compile_mode()) {
@@ -566,8 +420,7 @@ class CompilationUnitBuilder {
CreateUnit(func_index, ExecutionTier::kBaseline));
return;
case CompileMode::kRegular:
- baseline_units_.emplace_back(CreateUnit(
- func_index, WasmCompilationUnit::GetDefaultExecutionTier()));
+ baseline_units_.emplace_back(CreateUnit(func_index, default_tier_));
return;
}
UNREACHABLE();
@@ -588,8 +441,8 @@ class CompilationUnitBuilder {
private:
std::unique_ptr<WasmCompilationUnit> CreateUnit(uint32_t func_index,
ExecutionTier tier) {
- return base::make_unique<WasmCompilationUnit>(wasm_engine_, native_module_,
- func_index, tier);
+ return base::make_unique<WasmCompilationUnit>(wasm_engine_, func_index,
+ tier);
}
CompilationStateImpl* compilation_state() const {
@@ -598,6 +451,7 @@ class CompilationUnitBuilder {
NativeModule* const native_module_;
WasmEngine* const wasm_engine_;
+ const ExecutionTier default_tier_;
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
@@ -607,22 +461,11 @@ bool compile_lazy(const WasmModule* module) {
(FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
}
-byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
- return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
-}
-
void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
-bool in_bounds(uint32_t offset, size_t size, size_t upper) {
- return offset + size <= upper && offset + size >= offset;
-}
-
-using WasmInstanceMap =
- IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-
double MonotonicallyIncreasingTimeInMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
base::Time::kMillisecondsPerSecond;
@@ -633,6 +476,7 @@ double MonotonicallyIncreasingTimeInMs() {
// within the result_mutex_ lock when no finishing task is running, i.e. when
// the finisher_is_running_ flag is not set.
bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
+ NativeModule* native_module,
CompilationStateImpl* compilation_state,
WasmFeatures* detected,
Counters* counters) {
@@ -645,10 +489,11 @@ bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
// Get the tier before starting compilation, as compilation can switch tiers
// if baseline bails out.
ExecutionTier tier = unit->tier();
- unit->ExecuteCompilation(env, compilation_state->GetSharedWireBytesStorage(),
- counters, detected);
- if (!unit->failed()) compilation_state->ScheduleCodeLogging(unit->result());
- compilation_state->ScheduleUnitForFinishing(std::move(unit), tier);
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ env, compilation_state->GetWireBytesStorage(), counters, detected);
+
+ WasmCode* code = unit->Publish(std::move(result), native_module);
+ compilation_state->OnFinishedUnit(tier, code);
return true;
}
@@ -672,14 +517,6 @@ void FinishCompilationUnits(CompilationStateImpl* compilation_state) {
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextExecutedUnit();
if (unit == nullptr) break;
-
- if (unit->failed()) {
- compilation_state->Abort();
- break;
- }
-
- // Update the compilation state.
- compilation_state->OnFinishedUnit();
}
}
@@ -693,12 +530,8 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
// {compilation_state}. By adding units to the {compilation_state}, new
// {BackgroundCompileTasks} instances are spawned which run on
// the background threads.
- // 2.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {baseline_finish_units_}.
- // 2.b) If {baseline_finish_units_} contains a compilation unit, the main
- // thread dequeues it and finishes the compilation.
+ // 2) The background threads and the main thread pick one compilation unit at
+ // a time and execute the parallel phase of the compilation unit.
// 3) After the parallel phase of all compilation units has started, the
// main thread continues to finish all compilation units as long as
// baseline-compilation units are left to be processed.
@@ -727,22 +560,16 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
// background threads.
InitializeCompilationUnits(native_module, isolate->wasm_engine());
- // 2.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {baseline_finish_units_}.
- // The foreground task bypasses waiting on memory threshold, because
- // its results will immediately be converted to code (below).
+ // 2) The background threads and the main thread pick one compilation unit at
+ // a time and execute the parallel phase of the compilation unit.
WasmFeatures detected_features;
CompilationEnv env = native_module->CreateCompilationEnv();
- while (FetchAndExecuteCompilationUnit(&env, compilation_state,
+ while (FetchAndExecuteCompilationUnit(&env, native_module, compilation_state,
&detected_features,
isolate->counters()) &&
!compilation_state->baseline_compilation_finished()) {
- // 2.b) If {baseline_finish_units_} contains a compilation unit, the main
- // thread dequeues it and finishes the compilation unit. Compilation
- // units are finished concurrently to the background threads to save
- // memory.
+ // TODO(clemensh): Refactor ownership of the AsyncCompileJob and remove
+ // this.
FinishCompilationUnits(compilation_state);
if (compilation_state->failed()) break;
@@ -778,16 +605,17 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module,
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = native_module->module();
WasmFeatures detected = kNoWasmFeatures;
- for (uint32_t i = 0; i < module->functions.size(); ++i) {
- const WasmFunction& func = module->functions[i];
+ auto* comp_state = Impl(native_module->compilation_state());
+ ExecutionTier tier =
+ WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
+ for (const WasmFunction& func : module->functions) {
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
- bool success = WasmCompilationUnit::CompileWasmFunction(
- isolate, native_module, &detected, &func);
- if (!success) {
- thrower->CompileFailed(
- Impl(native_module->compilation_state())->GetCompileError());
+ WasmCompilationUnit::CompileWasmFunction(isolate, native_module, &detected,
+ &func, tier);
+ if (comp_state->failed()) {
+ thrower->CompileFailed(comp_state->GetCompileError());
break;
}
}
@@ -823,7 +651,8 @@ void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
TruncatedUserString<> name(wire_bytes.GetNameOrNull(&func, module));
thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
name.length(), name.start(),
- result.error_msg().c_str(), result.error_offset());
+ result.error().message().c_str(),
+ result.error().offset());
break;
}
}
@@ -907,12 +736,7 @@ class FinishCompileTask : public CancelableTask {
break;
}
- DCHECK_IMPLIES(unit->failed(), compilation_state_->failed());
- if (unit->failed()) break;
-
- // Update the compilation state, and possibly notify
- // threads waiting for events.
- compilation_state_->OnFinishedUnit();
+ if (compilation_state_->failed()) break;
if (deadline < MonotonicallyIncreasingTimeInMs()) {
// We reached the deadline. We reschedule this task and return
@@ -947,11 +771,18 @@ class BackgroundCompileTask : public CancelableTask {
CompilationEnv env = native_module_->CreateCompilationEnv();
auto* compilation_state = Impl(native_module_->compilation_state());
WasmFeatures detected_features = kNoWasmFeatures;
+ double deadline = MonotonicallyIncreasingTimeInMs() + 50.0;
while (!compilation_state->failed()) {
- if (!FetchAndExecuteCompilationUnit(&env, compilation_state,
- &detected_features, counters_)) {
+ if (!FetchAndExecuteCompilationUnit(&env, native_module_,
+ compilation_state, &detected_features,
+ counters_)) {
break;
}
+ if (deadline < MonotonicallyIncreasingTimeInMs()) {
+ compilation_state->ReportDetectedFeatures(detected_features);
+ compilation_state->RestartBackgroundCompileTask();
+ return;
+ }
}
compilation_state->OnBackgroundTaskStopped(detected_features);
}
@@ -999,7 +830,8 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
// Compile JS->wasm wrappers for exported functions.
*export_wrappers_out =
isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
- CompileJsToWasmWrappers(isolate, native_module.get(), *export_wrappers_out);
+ CompileJsToWasmWrappers(isolate, native_module->module(),
+ *export_wrappers_out);
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
@@ -1007,1253 +839,12 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
return native_module;
}
-InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory)
- : isolate_(isolate),
- enabled_(module_object->native_module()->enabled_features()),
- module_(module_object->module()),
- thrower_(thrower),
- module_object_(module_object),
- ffi_(ffi),
- memory_(memory) {
- sanitized_imports_.reserve(module_->import_table.size());
-}
-
-// Build an instance, in all of its glory.
-MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
- // Check that an imports argument was provided, if the module requires it.
- // No point in continuing otherwise.
- if (!module_->import_table.empty() && ffi_.is_null()) {
- thrower_->TypeError(
- "Imports argument must be present and must be an object");
- return {};
- }
-
- SanitizeImports();
- if (thrower_->error()) return {};
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
- // From here on, we expect the build pipeline to run without exiting to JS.
- DisallowJavascriptExecution no_js(isolate_);
- // Record build time into correct bucket, then build instance.
- TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
- isolate_->counters(), module_->origin, wasm_instantiate, module_time));
-
- //--------------------------------------------------------------------------
- // Allocate the memory array buffer.
- //--------------------------------------------------------------------------
- // We allocate the memory buffer before cloning or reusing the compiled module
- // so we will know whether we need to recompile with bounds checks.
- uint32_t initial_pages = module_->initial_pages;
- auto initial_pages_counter = SELECT_WASM_COUNTER(
- isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
- initial_pages_counter->AddSample(initial_pages);
- // Asm.js has memory_ already set at this point, so we don't want to
- // overwrite it.
- if (memory_.is_null()) {
- memory_ = FindImportedMemoryBuffer();
- }
- if (!memory_.is_null()) {
- // Set externally passed ArrayBuffer non neuterable.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- memory->set_is_neuterable(false);
-
- DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
- memory->backing_store() == nullptr);
- } else if (initial_pages > 0 || use_trap_handler()) {
- // We need to unconditionally create a guard region if using trap handlers,
- // even when the size is zero to prevent null-dereference issues
- // (e.g. https://crbug.com/769637).
- // Allocate memory if the initial size is more than 0 pages.
- memory_ = AllocateMemory(initial_pages);
- if (memory_.is_null()) {
- // failed to allocate memory
- DCHECK(isolate_->has_pending_exception() || thrower_->error());
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Recompile module if using trap handlers but could not get guarded memory
- //--------------------------------------------------------------------------
- if (module_->origin == kWasmOrigin && use_trap_handler()) {
- // Make sure the memory has suitable guard regions.
- WasmMemoryTracker* const memory_tracker =
- isolate_->wasm_engine()->memory_tracker();
-
- if (!memory_tracker->HasFullGuardRegions(
- memory_.ToHandleChecked()->backing_store())) {
- if (!FLAG_wasm_trap_handler_fallback) {
- thrower_->LinkError(
- "Provided memory is lacking guard regions but fallback was "
- "disabled.");
- return {};
- }
-
- TRACE("Recompiling module without bounds checks\n");
- constexpr bool allow_trap_handler = false;
- // TODO(wasm): Fix this before enabling the trap handler fallback.
- USE(allow_trap_handler);
- // Disable trap handlers on this native module.
- NativeModule* native_module = module_object_->native_module();
- native_module->DisableTrapHandler();
-
- // Recompile all functions in this native module.
- ErrorThrower thrower(isolate_, "recompile");
- CompileNativeModule(isolate_, &thrower, module_, native_module);
- if (thrower.error()) {
- return {};
- }
- DCHECK(!native_module->use_trap_handler());
- }
- }
-
- //--------------------------------------------------------------------------
- // Create the WebAssembly.Instance object.
- //--------------------------------------------------------------------------
- NativeModule* native_module = module_object_->native_module();
- TRACE("New module instantiation for %p\n", native_module);
- Handle<WasmInstanceObject> instance =
- WasmInstanceObject::New(isolate_, module_object_);
- NativeModuleModificationScope native_modification_scope(native_module);
-
- //--------------------------------------------------------------------------
- // Set up the globals for the new instance.
- //--------------------------------------------------------------------------
- uint32_t globals_buffer_size = module_->globals_buffer_size;
- if (globals_buffer_size > 0) {
- void* backing_store =
- isolate_->array_buffer_allocator()->Allocate(globals_buffer_size);
- if (backing_store == nullptr) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- globals_ =
- isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
- constexpr bool is_external = false;
- constexpr bool is_wasm_memory = false;
- JSArrayBuffer::Setup(globals_, isolate_, is_external, backing_store,
- globals_buffer_size, SharedFlag::kNotShared,
- is_wasm_memory);
- if (globals_.is_null()) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- instance->set_globals_start(
- reinterpret_cast<byte*>(globals_->backing_store()));
- instance->set_globals_buffer(*globals_);
- }
-
- //--------------------------------------------------------------------------
- // Set up the array of references to imported globals' array buffers.
- //--------------------------------------------------------------------------
- if (module_->num_imported_mutable_globals > 0) {
- // TODO(binji): This allocates one slot for each mutable global, which is
- // more than required if multiple globals are imported from the same
- // module.
- Handle<FixedArray> buffers_array = isolate_->factory()->NewFixedArray(
- module_->num_imported_mutable_globals, TENURED);
- instance->set_imported_mutable_globals_buffers(*buffers_array);
- }
-
- //--------------------------------------------------------------------------
- // Set up the exception table used for exception tag checks.
- //--------------------------------------------------------------------------
- int exceptions_count = static_cast<int>(module_->exceptions.size());
- if (exceptions_count > 0) {
- Handle<FixedArray> exception_table =
- isolate_->factory()->NewFixedArray(exceptions_count, TENURED);
- instance->set_exceptions_table(*exception_table);
- exception_wrappers_.resize(exceptions_count);
- }
-
- //--------------------------------------------------------------------------
- // Reserve the metadata for indirect function tables.
- //--------------------------------------------------------------------------
- int table_count = static_cast<int>(module_->tables.size());
- table_instances_.resize(table_count);
-
- //--------------------------------------------------------------------------
- // Process the imports for the module.
- //--------------------------------------------------------------------------
- int num_imported_functions = ProcessImports(instance);
- if (num_imported_functions < 0) return {};
-
- //--------------------------------------------------------------------------
- // Process the initialization for the module's globals.
- //--------------------------------------------------------------------------
- InitGlobals();
-
- //--------------------------------------------------------------------------
- // Initialize the indirect tables.
- //--------------------------------------------------------------------------
- if (table_count > 0) {
- InitializeTables(instance);
- }
-
- //--------------------------------------------------------------------------
- // Initialize the exceptions table.
- //--------------------------------------------------------------------------
- if (exceptions_count > 0) {
- InitializeExceptions(instance);
- }
-
- //--------------------------------------------------------------------------
- // Create the WebAssembly.Memory object.
- //--------------------------------------------------------------------------
- if (module_->has_memory) {
- if (!instance->has_memory_object()) {
- // No memory object exists. Create one.
- Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
- isolate_, memory_,
- module_->maximum_pages != 0 ? module_->maximum_pages : -1);
- instance->set_memory_object(*memory_object);
- }
-
- // Add the instance object to the list of instances for this memory.
- Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
- WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
-
- if (!memory_.is_null()) {
- // Double-check the {memory} array buffer matches the instance.
- Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- CHECK_EQ(instance->memory_size(), memory->byte_length());
- CHECK_EQ(instance->memory_start(), memory->backing_store());
- }
- }
-
- //--------------------------------------------------------------------------
- // Check that indirect function table segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmTableInit& table_init : module_->table_inits) {
- if (!table_init.active) continue;
- DCHECK(table_init.table_index < table_instances_.size());
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- size_t table_size = table_instances_[table_init.table_index].table_size;
- if (!in_bounds(base, table_init.entries.size(), table_size)) {
- thrower_->LinkError("table initializer is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Check that memory segments are within bounds.
- //--------------------------------------------------------------------------
- for (const WasmDataSegment& seg : module_->data_segments) {
- if (!seg.active) continue;
- uint32_t base = EvalUint32InitExpr(seg.dest_addr);
- if (!in_bounds(base, seg.source.length(), instance->memory_size())) {
- thrower_->LinkError("data segment is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Set up the exports object for the new instance.
- //--------------------------------------------------------------------------
- ProcessExports(instance);
- if (thrower_->error()) return {};
-
- //--------------------------------------------------------------------------
- // Initialize the indirect function tables.
- //--------------------------------------------------------------------------
- if (table_count > 0) {
- LoadTableSegments(instance);
- }
-
- //--------------------------------------------------------------------------
- // Initialize the memory by loading data segments.
- //--------------------------------------------------------------------------
- if (module_->data_segments.size() > 0) {
- LoadDataSegments(instance);
- }
-
- //--------------------------------------------------------------------------
- // Debugging support.
- //--------------------------------------------------------------------------
- // Set all breakpoints that were set on the shared module.
- WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
-
- if (FLAG_wasm_interpret_all && module_->origin == kWasmOrigin) {
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- std::vector<int> func_indexes;
- for (int func_index = num_imported_functions,
- num_wasm_functions = static_cast<int>(module_->functions.size());
- func_index < num_wasm_functions; ++func_index) {
- func_indexes.push_back(func_index);
- }
- WasmDebugInfo::RedirectToInterpreter(debug_info, VectorOf(func_indexes));
- }
-
- //--------------------------------------------------------------------------
- // Create a wrapper for the start function.
- //--------------------------------------------------------------------------
- if (module_->start_function_index >= 0) {
- int start_index = module_->start_function_index;
- auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, function.sig, function.imported);
- // TODO(clemensh): Don't generate an exported function for the start
- // function. Use CWasmEntry instead.
- start_function_ = WasmExportedFunction::New(
- isolate_, instance, MaybeHandle<String>(), start_index,
- static_cast<int>(function.sig->parameter_count()), wrapper_code);
- }
-
- DCHECK(!isolate_->has_pending_exception());
- TRACE("Successfully built instance for module %p\n",
- module_object_->native_module());
- return instance;
-}
-
-bool InstanceBuilder::ExecuteStartFunction() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "InstanceBuilder::ExecuteStartFunction");
- if (start_function_.is_null()) return true; // No start function.
-
- HandleScope scope(isolate_);
- // Call the JS function.
- Handle<Object> undefined = isolate_->factory()->undefined_value();
- MaybeHandle<Object> retval =
- Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- DCHECK(isolate_->has_pending_exception());
- return false;
- }
- return true;
-}
-
-// Look up an import value in the {ffi_} object.
-MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
- Handle<String> module_name,
-
- Handle<String> import_name) {
- // We pre-validated in the js-api layer that the ffi object is present, and
- // a JSObject, if the module has imports.
- DCHECK(!ffi_.is_null());
-
- // Look up the module first.
- MaybeHandle<Object> result = Object::GetPropertyOrElement(
- isolate_, ffi_.ToHandleChecked(), module_name);
- if (result.is_null()) {
- return ReportTypeError("module not found", index, module_name);
- }
-
- Handle<Object> module = result.ToHandleChecked();
-
- // Look up the value in the module.
- if (!module->IsJSReceiver()) {
- return ReportTypeError("module is not an object or function", index,
- module_name);
- }
-
- result = Object::GetPropertyOrElement(isolate_, module, import_name);
- if (result.is_null()) {
- ReportLinkError("import not found", index, module_name, import_name);
- return MaybeHandle<JSFunction>();
- }
-
- return result;
-}
-
-// Look up an import value in the {ffi_} object specifically for linking an
-// asm.js module. This only performs non-observable lookups, which allows
-// falling back to JavaScript proper (and hence re-executing all lookups) if
-// module instantiation fails.
-MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
- uint32_t index, Handle<String> import_name) {
- // Check that a foreign function interface object was provided.
- if (ffi_.is_null()) {
- return ReportLinkError("missing imports object", index, import_name);
- }
-
- // Perform lookup of the given {import_name} without causing any observable
- // side-effect. We only accept accesses that resolve to data properties,
- // which is indicated by the asm.js spec in section 7 ("Linking") as well.
- Handle<Object> result;
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate_, ffi_.ToHandleChecked(), import_name);
- switch (it.state()) {
- case LookupIterator::ACCESS_CHECK:
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::JSPROXY:
- case LookupIterator::ACCESSOR:
- case LookupIterator::TRANSITION:
- return ReportLinkError("not a data property", index, import_name);
- case LookupIterator::NOT_FOUND:
- // Accepting missing properties as undefined does not cause any
- // observable difference from JavaScript semantics, we are lenient.
- result = isolate_->factory()->undefined_value();
- break;
- case LookupIterator::DATA:
- result = it.GetDataValue();
- break;
- }
-
- return result;
-}
-
-uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
- switch (expr.kind) {
- case WasmInitExpr::kI32Const:
- return expr.val.i32_const;
- case WasmInitExpr::kGlobalIndex: {
- uint32_t offset = module_->globals[expr.val.global_index].offset;
- return ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(raw_buffer_ptr(globals_, offset)));
- }
- default:
- UNREACHABLE();
- }
-}
-
-// Load data segments into the memory.
-void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
- Vector<const uint8_t> wire_bytes =
- module_object_->native_module()->wire_bytes();
- for (const WasmDataSegment& segment : module_->data_segments) {
- uint32_t source_size = segment.source.length();
- // Segments of size == 0 are just nops.
- if (source_size == 0) continue;
- // Passive segments are not copied during instantiation.
- if (!segment.active) continue;
- uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
- DCHECK(in_bounds(dest_offset, source_size, instance->memory_size()));
- byte* dest = instance->memory_start() + dest_offset;
- const byte* src = wire_bytes.start() + segment.source.offset();
- memcpy(dest, src, source_size);
- }
-}
-
-void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
- TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
- reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset,
- num, ValueTypes::TypeName(global.type));
- switch (global.type) {
- case kWasmI32:
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
- static_cast<int32_t>(num));
- break;
- case kWasmI64:
- // TODO(titzer): initialization of imported i64 globals.
- UNREACHABLE();
- break;
- case kWasmF32:
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
- static_cast<float>(num));
- break;
- case kWasmF64:
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
- static_cast<double>(num));
- break;
- default:
- UNREACHABLE();
- }
-}
-
-void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
- Handle<WasmGlobalObject> value) {
- TRACE("init [globals_start=%p + %u] = ",
- reinterpret_cast<void*>(raw_buffer_ptr(globals_, 0)), global.offset);
- switch (global.type) {
- case kWasmI32: {
- int32_t num = value->GetI32();
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
- TRACE("%d", num);
- break;
- }
- case kWasmI64: {
- int64_t num = value->GetI64();
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
- TRACE("%" PRId64, num);
- break;
- }
- case kWasmF32: {
- float num = value->GetF32();
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
- TRACE("%f", num);
- break;
- }
- case kWasmF64: {
- double num = value->GetF64();
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
- TRACE("%lf", num);
- break;
- }
- default:
- UNREACHABLE();
- }
- TRACE(", type = %s (from WebAssembly.Global)\n",
- ValueTypes::TypeName(global.type));
-}
-
-void InstanceBuilder::SanitizeImports() {
- Vector<const uint8_t> wire_bytes =
- module_object_->native_module()->wire_bytes();
- for (size_t index = 0; index < module_->import_table.size(); ++index) {
- const WasmImport& import = module_->import_table[index];
-
- Handle<String> module_name;
- MaybeHandle<String> maybe_module_name =
- WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
- import.module_name);
- if (!maybe_module_name.ToHandle(&module_name)) {
- thrower_->LinkError("Could not resolve module name for import %zu",
- index);
- return;
- }
-
- Handle<String> import_name;
- MaybeHandle<String> maybe_import_name =
- WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
- import.field_name);
- if (!maybe_import_name.ToHandle(&import_name)) {
- thrower_->LinkError("Could not resolve import name for import %zu",
- index);
- return;
- }
-
- int int_index = static_cast<int>(index);
- MaybeHandle<Object> result =
- module_->origin == kAsmJsOrigin
- ? LookupImportAsm(int_index, import_name)
- : LookupImport(int_index, module_name, import_name);
- if (thrower_->error()) {
- thrower_->LinkError("Could not find value for import %zu", index);
- return;
- }
- Handle<Object> value = result.ToHandleChecked();
- sanitized_imports_.push_back({module_name, import_name, value});
- }
-}
-
-MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
- DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
- for (size_t index = 0; index < module_->import_table.size(); index++) {
- const WasmImport& import = module_->import_table[index];
-
- if (import.kind == kExternalMemory) {
- const auto& value = sanitized_imports_[index].value;
- if (!value->IsWasmMemoryObject()) {
- return {};
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- return buffer;
- }
- }
- return {};
-}
-
-// Process the imports, including functions, tables, globals, and memory, in
-// order, loading them from the {ffi_} object. Returns the number of imported
-// functions.
-int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
- int num_imported_functions = 0;
- int num_imported_tables = 0;
- int num_imported_mutable_globals = 0;
-
- DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
- int num_imports = static_cast<int>(module_->import_table.size());
- NativeModule* native_module = instance->module_object()->native_module();
- for (int index = 0; index < num_imports; ++index) {
- const WasmImport& import = module_->import_table[index];
-
- Handle<String> module_name = sanitized_imports_[index].module_name;
- Handle<String> import_name = sanitized_imports_[index].import_name;
- Handle<Object> value = sanitized_imports_[index].value;
-
- switch (import.kind) {
- case kExternalFunction: {
- // Function imports must be callable.
- if (!value->IsCallable()) {
- ReportLinkError("function import requires a callable", index,
- module_name, import_name);
- return -1;
- }
- uint32_t func_index = import.index;
- DCHECK_EQ(num_imported_functions, func_index);
- auto js_receiver = Handle<JSReceiver>::cast(value);
- FunctionSig* expected_sig = module_->functions[func_index].sig;
- auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig);
- switch (kind) {
- case compiler::WasmImportCallKind::kLinkError:
- ReportLinkError(
- "imported function does not match the expected type", index,
- module_name, import_name);
- return -1;
- case compiler::WasmImportCallKind::kWasmToWasm: {
- // The imported function is a WASM function from another instance.
- auto imported_function = Handle<WasmExportedFunction>::cast(value);
- Handle<WasmInstanceObject> imported_instance(
- imported_function->instance(), isolate_);
- // The import reference is the instance object itself.
- Address imported_target = imported_function->GetWasmCallTarget();
- ImportedFunctionEntry entry(instance, func_index);
- entry.SetWasmToWasm(*imported_instance, imported_target);
- break;
- }
- default: {
- // The imported function is a callable.
- WasmCode* wasm_code =
- native_module->import_wrapper_cache()->GetOrCompile(
- isolate_, kind, expected_sig);
- ImportedFunctionEntry entry(instance, func_index);
- if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) {
- // Wasm to JS wrappers are treated specially in the import table.
- entry.SetWasmToJs(isolate_, js_receiver, wasm_code);
- } else {
- // Wasm math intrinsics are compiled as regular Wasm functions.
- DCHECK(kind >=
- compiler::WasmImportCallKind::kFirstMathIntrinsic &&
- kind <= compiler::WasmImportCallKind::kLastMathIntrinsic);
- entry.SetWasmToWasm(*instance, wasm_code->instruction_start());
- }
- break;
- }
- }
- num_imported_functions++;
- break;
- }
- case kExternalTable: {
- if (!value->IsWasmTableObject()) {
- ReportLinkError("table import requires a WebAssembly.Table", index,
- module_name, import_name);
- return -1;
- }
- uint32_t table_num = import.index;
- DCHECK_EQ(table_num, num_imported_tables);
- const WasmTable& table = module_->tables[table_num];
- TableInstance& table_instance = table_instances_[table_num];
- table_instance.table_object = Handle<WasmTableObject>::cast(value);
- instance->set_table_object(*table_instance.table_object);
- table_instance.js_wrappers = Handle<FixedArray>(
- table_instance.table_object->functions(), isolate_);
-
- int imported_table_size = table_instance.js_wrappers->length();
- if (imported_table_size < static_cast<int>(table.initial_size)) {
- thrower_->LinkError(
- "table import %d is smaller than initial %d, got %u", index,
- table.initial_size, imported_table_size);
- return -1;
- }
-
- if (table.has_maximum_size) {
- int64_t imported_maximum_size =
- table_instance.table_object->maximum_length()->Number();
- if (imported_maximum_size < 0) {
- thrower_->LinkError(
- "table import %d has no maximum length, expected %d", index,
- table.maximum_size);
- return -1;
- }
- if (imported_maximum_size > table.maximum_size) {
- thrower_->LinkError(
- " table import %d has a larger maximum size %" PRIx64
- " than the module's declared maximum %u",
- index, imported_maximum_size, table.maximum_size);
- return -1;
- }
- }
-
- // Allocate a new dispatch table.
- if (!instance->has_indirect_function_table()) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, imported_table_size);
- table_instances_[table_num].table_size = imported_table_size;
- }
- // Initialize the dispatch table with the (foreign) JS functions
- // that are already in the table.
- for (int i = 0; i < imported_table_size; ++i) {
- Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
- // TODO(mtrofin): this is the same logic as WasmTableObject::Set:
- // insert in the local table a wrapper from the other module, and add
- // a reference to the owning instance of the other module.
- if (!val->IsJSFunction()) continue;
- if (!WasmExportedFunction::IsWasmExportedFunction(*val)) {
- thrower_->LinkError("table import %d[%d] is not a wasm function",
- index, i);
- return -1;
- }
- auto target_func = Handle<WasmExportedFunction>::cast(val);
- Handle<WasmInstanceObject> target_instance =
- handle(target_func->instance(), isolate_);
- // Look up the signature's canonical id. If there is no canonical
- // id, then the signature does not appear at all in this module,
- // so putting {-1} in the table will cause checks to always fail.
- FunctionSig* sig = target_func->sig();
- IndirectFunctionTableEntry(instance, i)
- .Set(module_->signature_map.Find(*sig), target_instance,
- target_func->function_index());
- }
- num_imported_tables++;
- break;
- }
- case kExternalMemory: {
- // Validation should have failed if more than one memory object was
- // provided.
- DCHECK(!instance->has_memory_object());
- if (!value->IsWasmMemoryObject()) {
- ReportLinkError("memory import must be a WebAssembly.Memory object",
- index, module_name, import_name);
- return -1;
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- instance->set_memory_object(*memory);
- Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
- // memory_ should have already been assigned in Build().
- DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
- uint32_t imported_cur_pages =
- static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
- if (imported_cur_pages < module_->initial_pages) {
- thrower_->LinkError(
- "memory import %d is smaller than initial %u, got %u", index,
- module_->initial_pages, imported_cur_pages);
- }
- int32_t imported_maximum_pages = memory->maximum_pages();
- if (module_->has_maximum_pages) {
- if (imported_maximum_pages < 0) {
- thrower_->LinkError(
- "memory import %d has no maximum limit, expected at most %u",
- index, imported_maximum_pages);
- return -1;
- }
- if (static_cast<uint32_t>(imported_maximum_pages) >
- module_->maximum_pages) {
- thrower_->LinkError(
- "memory import %d has a larger maximum size %u than the "
- "module's declared maximum %u",
- index, imported_maximum_pages, module_->maximum_pages);
- return -1;
- }
- }
- if (module_->has_shared_memory != buffer->is_shared()) {
- thrower_->LinkError(
- "mismatch in shared state of memory, declared = %d, imported = "
- "%d",
- module_->has_shared_memory, buffer->is_shared());
- return -1;
- }
-
- break;
- }
- case kExternalGlobal: {
- // Immutable global imports are converted to numbers and written into
- // the {globals_} array buffer.
- //
- // Mutable global imports instead have their backing array buffers
- // referenced by this instance, and store the address of the imported
- // global in the {imported_mutable_globals_} array.
- const WasmGlobal& global = module_->globals[import.index];
-
- // The mutable-global proposal allows importing i64 values, but only if
- // they are passed as a WebAssembly.Global object.
- if (global.type == kWasmI64 &&
- !(enabled_.mut_global && value->IsWasmGlobalObject())) {
- ReportLinkError("global import cannot have type i64", index,
- module_name, import_name);
- return -1;
- }
- if (module_->origin == kAsmJsOrigin) {
- // Accepting {JSFunction} on top of just primitive values here is a
- // workaround to support legacy asm.js code with broken binding. Note
- // that using {NaN} (or Smi::kZero) here is what using the observable
- // conversion via {ToPrimitive} would produce as well.
- // TODO(mstarzinger): Still observable if Function.prototype.valueOf
- // or friends are patched, we might need to check for that as well.
- if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
- if (value->IsPrimitive() && !value->IsSymbol()) {
- if (global.type == kWasmI32) {
- value = Object::ToInt32(isolate_, value).ToHandleChecked();
- } else {
- value = Object::ToNumber(isolate_, value).ToHandleChecked();
- }
- }
- }
- if (enabled_.mut_global) {
- if (value->IsWasmGlobalObject()) {
- auto global_object = Handle<WasmGlobalObject>::cast(value);
- if (global_object->type() != global.type) {
- ReportLinkError(
- "imported global does not match the expected type", index,
- module_name, import_name);
- return -1;
- }
- if (global_object->is_mutable() != global.mutability) {
- ReportLinkError(
- "imported global does not match the expected mutability",
- index, module_name, import_name);
- return -1;
- }
- if (global.mutability) {
- Handle<JSArrayBuffer> buffer(global_object->array_buffer(),
- isolate_);
- int index = num_imported_mutable_globals++;
- instance->imported_mutable_globals_buffers()->set(index, *buffer);
- // It is safe in this case to store the raw pointer to the buffer
- // since the backing store of the JSArrayBuffer will not be
- // relocated.
- instance->imported_mutable_globals()[index] =
- reinterpret_cast<Address>(
- raw_buffer_ptr(buffer, global_object->offset()));
- } else {
- WriteGlobalValue(global, global_object);
- }
- } else if (value->IsNumber()) {
- if (global.mutability) {
- ReportLinkError(
- "imported mutable global must be a WebAssembly.Global object",
- index, module_name, import_name);
- return -1;
- }
- WriteGlobalValue(global, value->Number());
- } else {
- ReportLinkError(
- "global import must be a number or WebAssembly.Global object",
- index, module_name, import_name);
- return -1;
- }
- } else {
- if (value->IsNumber()) {
- WriteGlobalValue(global, value->Number());
- } else {
- ReportLinkError("global import must be a number", index,
- module_name, import_name);
- return -1;
- }
- }
- break;
- }
- case kExternalException: {
- if (!value->IsWasmExceptionObject()) {
- ReportLinkError("exception import requires a WebAssembly.Exception",
- index, module_name, import_name);
- return -1;
- }
- Handle<WasmExceptionObject> imported_exception =
- Handle<WasmExceptionObject>::cast(value);
- if (!imported_exception->IsSignatureEqual(
- module_->exceptions[import.index].sig)) {
- ReportLinkError("imported exception does not match the expected type",
- index, module_name, import_name);
- return -1;
- }
- Object* exception_tag = imported_exception->exception_tag();
- DCHECK(instance->exceptions_table()->get(import.index)->IsUndefined());
- instance->exceptions_table()->set(import.index, exception_tag);
- exception_wrappers_[import.index] = imported_exception;
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
-
- DCHECK_EQ(module_->num_imported_mutable_globals,
- num_imported_mutable_globals);
-
- return num_imported_functions;
-}
-
-template <typename T>
-T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
- return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
-}
-
-// Process initialization of globals.
-void InstanceBuilder::InitGlobals() {
- for (auto global : module_->globals) {
- if (global.mutability && global.imported) {
- continue;
- }
-
- switch (global.init.kind) {
- case WasmInitExpr::kI32Const:
- WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
- global.init.val.i32_const);
- break;
- case WasmInitExpr::kI64Const:
- WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
- global.init.val.i64_const);
- break;
- case WasmInitExpr::kF32Const:
- WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
- global.init.val.f32_const);
- break;
- case WasmInitExpr::kF64Const:
- WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
- global.init.val.f64_const);
- break;
- case WasmInitExpr::kGlobalIndex: {
- // Initialize with another global.
- uint32_t new_offset = global.offset;
- uint32_t old_offset =
- module_->globals[global.init.val.global_index].offset;
- TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
- ? sizeof(double)
- : sizeof(int32_t);
- memcpy(raw_buffer_ptr(globals_, new_offset),
- raw_buffer_ptr(globals_, old_offset), size);
- break;
- }
- case WasmInitExpr::kNone:
- // Happens with imported globals.
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
- if (num_pages > max_mem_pages()) {
- thrower_->RangeError("Out of memory: wasm memory too large");
- return Handle<JSArrayBuffer>::null();
- }
- const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
- SharedFlag shared_flag =
- is_shared_memory ? SharedFlag::kShared : SharedFlag::kNotShared;
- Handle<JSArrayBuffer> mem_buffer;
- if (!NewArrayBuffer(isolate_, num_pages * kWasmPageSize, shared_flag)
- .ToHandle(&mem_buffer)) {
- thrower_->RangeError("Out of memory: wasm memory");
- }
- return mem_buffer;
-}
-
-bool InstanceBuilder::NeedsWrappers() const {
- if (module_->num_exported_functions > 0) return true;
- for (auto& table_instance : table_instances_) {
- if (!table_instance.js_wrappers.is_null()) return true;
- }
- for (auto& table : module_->tables) {
- if (table.exported) return true;
- }
- return false;
-}
-
-// Process the exports, creating wrappers for functions, tables, memories,
-// globals, and exceptions.
-void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
- isolate_);
- if (NeedsWrappers()) {
- // Fill the table to cache the exported JSFunction wrappers.
- js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
- Handle<JSFunction>::null());
-
- // If an imported WebAssembly function gets exported, the exported function
- // has to be identical to to imported function. Therefore we put all
- // imported WebAssembly functions into the js_wrappers_ list.
- for (int index = 0, end = static_cast<int>(module_->import_table.size());
- index < end; ++index) {
- const WasmImport& import = module_->import_table[index];
- if (import.kind == kExternalFunction) {
- Handle<Object> value = sanitized_imports_[index].value;
- if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
- js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
- }
- }
- }
- }
-
- Handle<JSObject> exports_object;
- bool is_asm_js = false;
- switch (module_->origin) {
- case kWasmOrigin: {
- // Create the "exports" object.
- exports_object = isolate_->factory()->NewJSObjectWithNullProto();
- break;
- }
- case kAsmJsOrigin: {
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate_->native_context()->object_function(), isolate_);
- exports_object = isolate_->factory()->NewJSObject(object_function);
- is_asm_js = true;
- break;
- }
- default:
- UNREACHABLE();
- }
- instance->set_exports_object(*exports_object);
-
- Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-
- PropertyDescriptor desc;
- desc.set_writable(is_asm_js);
- desc.set_enumerable(true);
- desc.set_configurable(is_asm_js);
-
- // Process each export in the export table.
- int export_index = 0; // Index into {export_wrappers}.
- for (const WasmExport& exp : module_->export_table) {
- Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, exp.name)
- .ToHandleChecked();
- Handle<JSObject> export_to;
- if (is_asm_js && exp.kind == kExternalFunction &&
- String::Equals(isolate_, name, single_function_name)) {
- export_to = instance;
- } else {
- export_to = exports_object;
- }
-
- switch (exp.kind) {
- case kExternalFunction: {
- // Wrap and export the code as a JSFunction.
- const WasmFunction& function = module_->functions[exp.index];
- Handle<JSFunction> js_function = js_wrappers_[exp.index];
- if (js_function.is_null()) {
- // Wrap the exported code as a JSFunction.
- Handle<Code> export_code =
- export_wrappers->GetValueChecked<Code>(isolate_, export_index);
- MaybeHandle<String> func_name;
- if (is_asm_js) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupFunctionName(
- module_object_->native_module()->wire_bytes(),
- function.func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, func_name_ref)
- .ToHandleChecked();
- }
- js_function = WasmExportedFunction::New(
- isolate_, instance, func_name, function.func_index,
- static_cast<int>(function.sig->parameter_count()), export_code);
- js_wrappers_[exp.index] = js_function;
- }
- desc.set_value(js_function);
- export_index++;
- break;
- }
- case kExternalTable: {
- // Export a table as a WebAssembly.Table object.
- TableInstance& table_instance = table_instances_[exp.index];
- const WasmTable& table = module_->tables[exp.index];
- if (table_instance.table_object.is_null()) {
- uint32_t maximum = table.has_maximum_size ? table.maximum_size
- : FLAG_wasm_max_table_size;
- table_instance.table_object =
- WasmTableObject::New(isolate_, table.initial_size, maximum,
- &table_instance.js_wrappers);
- }
- desc.set_value(table_instance.table_object);
- break;
- }
- case kExternalMemory: {
- // Export the memory as a WebAssembly.Memory object. A WasmMemoryObject
- // should already be available if the module has memory, since we always
- // create or import it when building an WasmInstanceObject.
- DCHECK(instance->has_memory_object());
- desc.set_value(
- Handle<WasmMemoryObject>(instance->memory_object(), isolate_));
- break;
- }
- case kExternalGlobal: {
- const WasmGlobal& global = module_->globals[exp.index];
- if (enabled_.mut_global) {
- Handle<JSArrayBuffer> buffer;
- uint32_t offset;
-
- if (global.mutability && global.imported) {
- Handle<FixedArray> buffers_array(
- instance->imported_mutable_globals_buffers(), isolate_);
- buffer = buffers_array->GetValueChecked<JSArrayBuffer>(
- isolate_, global.index);
- Address global_addr =
- instance->imported_mutable_globals()[global.index];
-
- size_t buffer_size = buffer->byte_length();
- Address backing_store =
- reinterpret_cast<Address>(buffer->backing_store());
- CHECK(global_addr >= backing_store &&
- global_addr < backing_store + buffer_size);
- offset = static_cast<uint32_t>(global_addr - backing_store);
- } else {
- buffer = handle(instance->globals_buffer(), isolate_);
- offset = global.offset;
- }
-
- // Since the global's array buffer is always provided, allocation
- // should never fail.
- Handle<WasmGlobalObject> global_obj =
- WasmGlobalObject::New(isolate_, buffer, global.type, offset,
- global.mutability)
- .ToHandleChecked();
- desc.set_value(global_obj);
- } else {
- // Export the value of the global variable as a number.
- double num = 0;
- switch (global.type) {
- case kWasmI32:
- num = ReadLittleEndianValue<int32_t>(
- GetRawGlobalPtr<int32_t>(global));
- break;
- case kWasmF32:
- num =
- ReadLittleEndianValue<float>(GetRawGlobalPtr<float>(global));
- break;
- case kWasmF64:
- num = ReadLittleEndianValue<double>(
- GetRawGlobalPtr<double>(global));
- break;
- case kWasmI64:
- thrower_->LinkError(
- "export of globals of type I64 is not allowed.");
- return;
- default:
- UNREACHABLE();
- }
- desc.set_value(isolate_->factory()->NewNumber(num));
- }
- break;
- }
- case kExternalException: {
- const WasmException& exception = module_->exceptions[exp.index];
- Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
- if (wrapper.is_null()) {
- Handle<HeapObject> exception_tag(
- HeapObject::cast(instance->exceptions_table()->get(exp.index)),
- isolate_);
- wrapper =
- WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
- exception_wrappers_[exp.index] = wrapper;
- }
- desc.set_value(wrapper);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate_, export_to, name, &desc, kThrowOnError);
- if (!status.IsJust()) {
- TruncatedUserString<> trunc_name(name->GetCharVector<uint8_t>());
- thrower_->LinkError("export of %.*s failed.", trunc_name.length(),
- trunc_name.start());
- return;
- }
- }
- DCHECK_EQ(export_index, export_wrappers->length());
-
- if (module_->origin == kWasmOrigin) {
- v8::Maybe<bool> success =
- JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
- DCHECK(success.FromMaybe(false));
- USE(success);
- }
-}
-
-void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
- size_t table_count = module_->tables.size();
- for (size_t index = 0; index < table_count; ++index) {
- const WasmTable& table = module_->tables[index];
- TableInstance& table_instance = table_instances_[index];
-
- if (!instance->has_indirect_function_table() &&
- table.type == kWasmAnyFunc) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, table.initial_size);
- table_instance.table_size = table.initial_size;
- }
- }
-}
-
-void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
- NativeModule* native_module = module_object_->native_module();
- for (auto& table_init : module_->table_inits) {
- // Passive segments are not copied during instantiation.
- if (!table_init.active) continue;
-
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
- uint32_t index = table_init.table_index;
- TableInstance& table_instance = table_instances_[index];
- DCHECK(in_bounds(base, num_entries, table_instance.table_size));
- for (uint32_t i = 0; i < num_entries; ++i) {
- uint32_t func_index = table_init.entries[i];
- const WasmFunction* function = &module_->functions[func_index];
- int table_index = static_cast<int>(i + base);
-
- // Update the local dispatch table first.
- uint32_t sig_id = module_->signature_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, table_index)
- .Set(sig_id, instance, func_index);
-
- if (!table_instance.table_object.is_null()) {
- // Update the table object's other dispatch tables.
- if (js_wrappers_[func_index].is_null()) {
- // No JSFunction entry yet exists for this function. Create one.
- // TODO(titzer): We compile JS->wasm wrappers for functions are
- // not exported but are in an exported table. This should be done
- // at module compile time and cached instead.
-
- Handle<Code> wrapper_code =
- js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
- isolate_, function->sig, function->imported);
- MaybeHandle<String> func_name;
- if (module_->origin == kAsmJsOrigin) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupFunctionName(
- native_module->wire_bytes(), func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, func_name_ref)
- .ToHandleChecked();
- }
- Handle<WasmExportedFunction> js_function = WasmExportedFunction::New(
- isolate_, instance, func_name, func_index,
- static_cast<int>(function->sig->parameter_count()), wrapper_code);
- js_wrappers_[func_index] = js_function;
- }
- table_instance.js_wrappers->set(table_index, *js_wrappers_[func_index]);
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(
- isolate_, table_instance.table_object, table_index, function->sig,
- instance, func_index);
- }
- }
- }
-
- int table_count = static_cast<int>(module_->tables.size());
- for (int index = 0; index < table_count; ++index) {
- TableInstance& table_instance = table_instances_[index];
-
- // Add the new dispatch table at the end to avoid redundant lookups.
- if (!table_instance.table_object.is_null()) {
- WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
- instance, index);
- }
- }
-}
-
-void InstanceBuilder::InitializeExceptions(
- Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
- for (int index = 0; index < exceptions_table->length(); ++index) {
- if (!exceptions_table->get(index)->IsUndefined(isolate_)) continue;
- // TODO(mstarzinger): Tags provide an object identity for each exception,
- // using {JSObject} here is gigantic hack and we should use a dedicated
- // object with a much lighter footprint for this purpose here.
- Handle<HeapObject> exception_tag =
- isolate_->factory()->NewJSObjectWithNullProto();
- exceptions_table->set(index, *exception_tag);
- }
+void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
+ ErrorThrower* thrower,
+ const WasmModule* wasm_module,
+ NativeModule* native_module) {
+ native_module->DisableTrapHandler();
+ CompileNativeModule(isolate, thrower, wasm_module, native_module);
}
AsyncCompileJob::AsyncCompileJob(
@@ -2305,7 +896,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
void OnFinishedStream(OwnedVector<uint8_t> bytes) override;
- void OnError(DecodeResult result) override;
+ void OnError(const WasmError&) override;
void OnAbort() override;
@@ -2314,7 +905,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
private:
// Finishes the AsyncCompileJob with an error.
- void FinishAsyncCompileJobWithError(ResultBase result);
+ void FinishAsyncCompileJobWithError(const WasmError&);
void CommitCompilationUnits();
@@ -2333,7 +924,11 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
AsyncCompileJob::~AsyncCompileJob() {
background_task_manager_.CancelAndWait();
- if (native_module_) Impl(native_module_->compilation_state())->Abort();
+ // If the runtime objects were not created yet, then initial compilation did
+ // not finish yet. In this case we can abort compilation.
+ if (native_module_ && module_object_.is_null()) {
+ Impl(native_module_->compilation_state())->Abort();
+ }
// Tell the streaming decoder that the AsyncCompileJob is not available
// anymore.
// TODO(ahaas): Is this notification really necessary? Check
@@ -2343,19 +938,13 @@ AsyncCompileJob::~AsyncCompileJob() {
for (auto d : deferred_handles_) delete d;
}
-void AsyncCompileJob::PrepareRuntimeObjects(
+void AsyncCompileJob::CreateNativeModule(
std::shared_ptr<const WasmModule> module) {
// Embedder usage count for declared shared memories.
if (module->has_shared_memory) {
isolate_->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
- // Create heap objects for script and module bytes to be stored in the
- // module object. Asm.js is not compiled asynchronously.
- Handle<Script> script =
- CreateWasmScript(isolate_, wire_bytes_, module->source_map_url);
- Handle<ByteArray> asm_js_offset_table;
-
// TODO(wasm): Improve efficiency of storing module wire bytes. Only store
// relevant sections, not function bodies
@@ -2365,11 +954,29 @@ void AsyncCompileJob::PrepareRuntimeObjects(
// only have one {WasmModuleObject}. Otherwise, we might only set
// breakpoints on a (potentially empty) subset of the instances.
// Create the module object.
- module_object_ =
- WasmModuleObject::New(isolate_, enabled_features_, std::move(module),
- {std::move(bytes_copy_), wire_bytes_.length()},
- script, asm_js_offset_table);
- native_module_ = module_object_->native_module();
+
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get());
+ native_module_ = isolate_->wasm_engine()->code_manager()->NewNativeModule(
+ isolate_, enabled_features_, code_size_estimate,
+ wasm::NativeModule::kCanAllocateMoreMemory, std::move(module));
+ native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()});
+ native_module_->SetRuntimeStubs(isolate_);
+
+ if (stream_) stream_->NotifyNativeModuleCreated(native_module_);
+}
+
+void AsyncCompileJob::PrepareRuntimeObjects() {
+ // Create heap objects for script and module bytes to be stored in the
+ // module object. Asm.js is not compiled asynchronously.
+ const WasmModule* module = native_module_->module();
+ Handle<Script> script =
+ CreateWasmScript(isolate_, wire_bytes_, module->source_map_url);
+
+ size_t code_size_estimate =
+ wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module);
+ module_object_ = WasmModuleObject::New(isolate_, native_module_, script,
+ code_size_estimate);
{
DeferredHandleScope deferred(isolate_);
@@ -2380,7 +987,11 @@ void AsyncCompileJob::PrepareRuntimeObjects(
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
-void AsyncCompileJob::FinishCompile(bool compile_wrappers) {
+void AsyncCompileJob::FinishCompile() {
+ bool is_after_deserialization = !module_object_.is_null();
+ if (!is_after_deserialization) {
+ PrepareRuntimeObjects();
+ }
DCHECK(!isolate_->context().is_null());
// Finish the wasm script now and make it public to the debugger.
Handle<Script> script(module_object_->script(), isolate_);
@@ -2393,18 +1004,18 @@ void AsyncCompileJob::FinishCompile(bool compile_wrappers) {
isolate_->debug()->OnAfterCompile(script);
// We can only update the feature counts once the entire compile is done.
- auto compilation_state = Impl(native_module_->compilation_state());
+ auto compilation_state =
+ Impl(module_object_->native_module()->compilation_state());
compilation_state->PublishDetectedFeatures(
isolate_, *compilation_state->detected_features());
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
// just compile wrappers here.
- if (compile_wrappers) {
- DoSync<CompileWrappers>();
- } else {
- // TODO(wasm): compiling wrappers should be made async as well.
- DoSync<AsyncCompileJob::FinishModule>();
+ if (!is_after_deserialization) {
+ // TODO(wasm): compiling wrappers should be made async.
+ CompileWrappers();
}
+ FinishModule();
}
void AsyncCompileJob::AsyncCompileFailed(Handle<Object> error_reason) {
@@ -2422,7 +1033,7 @@ class AsyncCompileJob::CompilationStateCallback {
public:
explicit CompilationStateCallback(AsyncCompileJob* job) : job_(job) {}
- void operator()(CompilationEvent event, const VoidResult* error_result) {
+ void operator()(CompilationEvent event, const WasmError* error) {
// This callback is only being called from a foreground task.
switch (event) {
case CompilationEvent::kFinishedBaselineCompilation:
@@ -2430,25 +1041,17 @@ class AsyncCompileJob::CompilationStateCallback {
if (job_->DecrementAndCheckFinisherCount()) {
SaveContext saved_context(job_->isolate());
job_->isolate()->set_context(*job_->native_context_);
- job_->FinishCompile(true);
+ job_->FinishCompile();
}
break;
case CompilationEvent::kFinishedTopTierCompilation:
DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
- // Notify embedder that compilation is finished.
- if (job_->stream_ && job_->stream_->module_compiled_callback()) {
- job_->stream_->module_compiled_callback()(job_->module_object_);
- }
- // If a foreground task or a finisher is pending, we rely on
- // FinishModule to remove the job.
- if (!job_->pending_foreground_task_ &&
- job_->outstanding_finishers_.load() == 0) {
- job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
- }
+ // This callback should not react to top tier finished callbacks, since
+ // the job might already be gone then.
break;
case CompilationEvent::kFailedCompilation:
DCHECK(!last_event_.has_value());
- DCHECK_NOT_NULL(error_result);
+ DCHECK_NOT_NULL(error);
// Tier-up compilation should not fail if baseline compilation
// did not fail.
DCHECK(!Impl(job_->native_module_->compilation_state())
@@ -2458,7 +1061,7 @@ class AsyncCompileJob::CompilationStateCallback {
SaveContext saved_context(job_->isolate());
job_->isolate()->set_context(*job_->native_context_);
ErrorThrower thrower(job_->isolate(), "AsyncCompilation");
- thrower.CompileFailed(*error_result);
+ thrower.CompileFailed(nullptr, *error);
Handle<Object> error = thrower.Reify();
DeferredHandleScope deferred(job_->isolate());
@@ -2629,7 +1232,7 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
}
if (result.failed()) {
// Decoding failure; reject the promise and clean up.
- job->DoSync<DecodeFail>(std::move(result));
+ job->DoSync<DecodeFail>(std::move(result).error());
} else {
// Decode passed.
job->DoSync<PrepareAndStartCompile>(std::move(result).value(), true);
@@ -2645,14 +1248,15 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
//==========================================================================
class AsyncCompileJob::DecodeFail : public CompileStep {
public:
- explicit DecodeFail(ModuleResult result) : result_(std::move(result)) {}
+ explicit DecodeFail(WasmError error) : error_(std::move(error)) {}
private:
- ModuleResult result_;
+ WasmError error_;
+
void RunInForeground(AsyncCompileJob* job) override {
TRACE_COMPILE("(1b) Decoding failed.\n");
ErrorThrower thrower(job->isolate_, "AsyncCompile");
- thrower.CompileFailed("Wasm decoding failed", result_);
+ thrower.CompileFailed("Wasm decoding failed", error_);
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
return job->AsyncCompileFailed(thrower.Reify());
}
@@ -2678,20 +1282,20 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// is done.
job->background_task_manager_.CancelAndWait();
- job->PrepareRuntimeObjects(module_);
+ job->CreateNativeModule(module_);
size_t num_functions =
module_->functions.size() - module_->num_imported_functions;
if (num_functions == 0) {
// Degenerate case of an empty module.
- job->FinishCompile(true);
+ job->FinishCompile();
return;
}
CompilationStateImpl* compilation_state =
Impl(job->native_module_->compilation_state());
- compilation_state->SetCallback(CompilationStateCallback{job});
+ compilation_state->AddCallback(CompilationStateCallback{job});
if (start_compilation_) {
// TODO(ahaas): Try to remove the {start_compilation_} check when
// streaming decoding is done in the background. If
@@ -2701,7 +1305,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
compilation_state->SetNumberOfFunctionsToCompile(
module_->num_declared_functions);
// Add compilation units and kick off compilation.
- InitializeCompilationUnits(job->native_module_,
+ InitializeCompilationUnits(job->native_module_.get(),
job->isolate()->wasm_engine());
}
}
@@ -2724,77 +1328,61 @@ class AsyncCompileJob::CompileFailed : public CompileStep {
Handle<Object> error_reason_;
};
-//==========================================================================
-// Step 5 (sync): Compile JS->wasm wrappers.
-//==========================================================================
-class AsyncCompileJob::CompileWrappers : public CompileStep {
+void AsyncCompileJob::CompileWrappers() {
// TODO(wasm): Compile all wrappers here, including the start function wrapper
// and the wrappers for the function table elements.
- void RunInForeground(AsyncCompileJob* job) override {
- TRACE_COMPILE("(5) Compile wrappers...\n");
- // Compile JS->wasm wrappers for exported functions.
- CompileJsToWasmWrappers(
- job->isolate_, job->module_object_->native_module(),
- handle(job->module_object_->export_wrappers(), job->isolate_));
- job->DoSync<FinishModule>();
- }
-};
+ TRACE_COMPILE("(5) Compile wrappers...\n");
+ // Compile JS->wasm wrappers for exported functions.
+ CompileJsToWasmWrappers(isolate_, module_object_->native_module()->module(),
+ handle(module_object_->export_wrappers(), isolate_));
+}
-//==========================================================================
-// Step 6 (sync): Finish the module and resolve the promise.
-//==========================================================================
-class AsyncCompileJob::FinishModule : public CompileStep {
- void RunInForeground(AsyncCompileJob* job) override {
- TRACE_COMPILE("(6) Finish module...\n");
- job->AsyncCompileSucceeded(job->module_object_);
-
- size_t num_functions = job->native_module_->num_functions() -
- job->native_module_->num_imported_functions();
- auto* compilation_state = Impl(job->native_module_->compilation_state());
- if (compilation_state->compile_mode() == CompileMode::kRegular ||
- num_functions == 0) {
- // If we do not tier up, the async compile job is done here and
- // can be deleted.
- job->isolate_->wasm_engine()->RemoveCompileJob(job);
- return;
- }
- DCHECK_EQ(CompileMode::kTiering, compilation_state->compile_mode());
- if (!compilation_state->has_outstanding_units()) {
- job->isolate_->wasm_engine()->RemoveCompileJob(job);
- }
+void AsyncCompileJob::FinishModule() {
+ TRACE_COMPILE("(6) Finish module...\n");
+ AsyncCompileSucceeded(module_object_);
+
+ size_t num_functions = native_module_->num_functions() -
+ native_module_->num_imported_functions();
+ auto* compilation_state = Impl(native_module_->compilation_state());
+ if (compilation_state->compile_mode() == CompileMode::kRegular ||
+ num_functions == 0) {
+ // If we do not tier up, the async compile job is done here and
+ // can be deleted.
+ isolate_->wasm_engine()->RemoveCompileJob(this);
+ return;
}
-};
+ DCHECK_EQ(CompileMode::kTiering, compilation_state->compile_mode());
+ if (compilation_state->baseline_compilation_finished()) {
+ isolate_->wasm_engine()->RemoveCompileJob(this);
+ }
+}
AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
: decoder_(job->enabled_features_),
job_(job),
compilation_unit_builder_(nullptr) {}
-void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
- DCHECK(error.failed());
+void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
+ const WasmError& error) {
+ DCHECK(error.has_error());
// Make sure all background tasks stopped executing before we change the state
// of the AsyncCompileJob to DecodeFail.
job_->background_task_manager_.CancelAndWait();
- // Create a ModuleResult from the result we got as parameter. Since there was
- // an error, we don't have to provide a real wasm module to the ModuleResult.
- ModuleResult result = ModuleResult::ErrorFrom(std::move(error));
-
// Check if there is already a CompiledModule, in which case we have to clean
// up the CompilationStateImpl as well.
if (job_->native_module_) {
Impl(job_->native_module_->compilation_state())->Abort();
job_->DoSync<AsyncCompileJob::DecodeFail,
- AsyncCompileJob::kUseExistingForegroundTask>(
- std::move(result));
+ AsyncCompileJob::kUseExistingForegroundTask>(error);
// Clear the {compilation_unit_builder_} if it exists. This is needed
// because there is a check in the destructor of the
// {CompilationUnitBuilder} that it is empty.
if (compilation_unit_builder_) compilation_unit_builder_->Clear();
} else {
- job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ job_->DoSync<AsyncCompileJob::DecodeFail>(error);
}
}
@@ -2806,7 +1394,7 @@ bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
job_->isolate()->wasm_engine()->allocator());
decoder_.DecodeModuleHeader(bytes, offset);
if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
return true;
@@ -2838,7 +1426,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
constexpr bool verify_functions = false;
decoder_.DecodeSection(section_code, bytes, offset, verify_functions);
if (!decoder_.ok()) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
return true;
@@ -2852,7 +1440,7 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
functions_count);
if (!decoder_.CheckFunctionsCount(static_cast<uint32_t>(functions_count),
offset)) {
- FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
+ FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false).error());
return false;
}
// Execute the PrepareAndStartCompile step immediately and not in a separate
@@ -2869,7 +1457,7 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
compilation_unit_builder_.reset(new CompilationUnitBuilder(
- job_->native_module_, job_->isolate()->wasm_engine()));
+ job_->native_module_.get(), job_->isolate()->wasm_engine()));
return true;
}
@@ -2903,28 +1491,35 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
TRACE_STREAMING("Finish stream...\n");
ModuleResult result = decoder_.FinishDecoding(false);
- DCHECK(result.ok());
+ if (result.failed()) {
+ FinishAsyncCompileJobWithError(result.error());
+ return;
+ }
+ // We have to open a HandleScope and prepare the Context for
+ // CreateNativeModule, PrepareRuntimeObjects and FinishCompile as this is a
+ // callback from the embedder.
+ HandleScope scope(job_->isolate_);
+ SaveContext saved_context(job_->isolate_);
+ job_->isolate_->set_context(*job_->native_context_);
+
bool needs_finish = job_->DecrementAndCheckFinisherCount();
if (job_->native_module_ == nullptr) {
// We are processing a WebAssembly module without code section. Create the
// runtime objects now (would otherwise happen in {PrepareAndStartCompile}).
- job_->PrepareRuntimeObjects(std::move(result).value());
+ job_->CreateNativeModule(std::move(result).value());
DCHECK(needs_finish);
}
job_->wire_bytes_ = ModuleWireBytes(bytes.as_vector());
job_->native_module_->SetWireBytes(std::move(bytes));
if (needs_finish) {
- HandleScope scope(job_->isolate_);
- SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->native_context_);
- job_->FinishCompile(true);
+ job_->FinishCompile();
}
}
// Report an error detected in the StreamingDecoder.
-void AsyncStreamingProcessor::OnError(DecodeResult result) {
+void AsyncStreamingProcessor::OnError(const WasmError& error) {
TRACE_STREAMING("Stream error...\n");
- FinishAsyncCompileJobWithError(std::move(result));
+ FinishAsyncCompileJobWithError(error);
}
void AsyncStreamingProcessor::OnAbort() {
@@ -2950,11 +1545,11 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
job_->module_object_ = handle(*job_->module_object_, job_->isolate_);
job_->deferred_handles_.push_back(deferred.Detach());
}
- job_->native_module_ = job_->module_object_->native_module();
+ job_->native_module_ = job_->module_object_->shared_native_module();
auto owned_wire_bytes = OwnedVector<uint8_t>::Of(wire_bytes);
job_->wire_bytes_ = ModuleWireBytes(owned_wire_bytes.as_vector());
job_->native_module_->SetWireBytes(std::move(owned_wire_bytes));
- job_->FinishCompile(false);
+ job_->FinishCompile();
return true;
}
@@ -2978,6 +1573,8 @@ CompilationStateImpl::CompilationStateImpl(internal::Isolate* isolate,
CompilationStateImpl::~CompilationStateImpl() {
DCHECK(background_task_manager_.canceled());
DCHECK(foreground_task_manager_.canceled());
+ CompilationError* error = compile_error_.load(std::memory_order_acquire);
+ if (error != nullptr) delete error;
}
void CompilationStateImpl::CancelAndWait() {
@@ -2987,6 +1584,7 @@ void CompilationStateImpl::CancelAndWait() {
void CompilationStateImpl::SetNumberOfFunctionsToCompile(size_t num_functions) {
DCHECK(!failed());
+ base::MutexGuard guard(&mutex_);
outstanding_baseline_units_ = num_functions;
if (compile_mode_ == CompileMode::kTiering) {
@@ -2994,9 +1592,8 @@ void CompilationStateImpl::SetNumberOfFunctionsToCompile(size_t num_functions) {
}
}
-void CompilationStateImpl::SetCallback(callback_t callback) {
- DCHECK_NULL(callback_);
- callback_ = std::move(callback);
+void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
+ callbacks_.emplace_back(std::move(callback));
}
void CompilationStateImpl::AddCompilationUnits(
@@ -3044,8 +1641,8 @@ CompilationStateImpl::GetNextCompilationUnit() {
std::unique_ptr<WasmCompilationUnit>
CompilationStateImpl::GetNextExecutedUnit() {
- base::MutexGuard guard(&mutex_);
std::vector<std::unique_ptr<WasmCompilationUnit>>& units = finish_units();
+ base::MutexGuard guard(&mutex_);
if (units.empty()) return {};
std::unique_ptr<WasmCompilationUnit> ret = std::move(units.back());
units.pop_back();
@@ -3053,69 +1650,97 @@ CompilationStateImpl::GetNextExecutedUnit() {
}
bool CompilationStateImpl::HasCompilationUnitToFinish() {
- base::MutexGuard guard(&mutex_);
return !finish_units().empty();
}
-void CompilationStateImpl::OnFinishedUnit() {
+void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
+ // This mutex guarantees that events happen in the right order.
+ base::MutexGuard guard(&mutex_);
+
+ if (failed()) return;
+
// If we are *not* compiling in tiering mode, then all units are counted as
// baseline units.
bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
- bool is_tiering_unit = is_tiering_mode && outstanding_baseline_units_ == 0;
+ bool is_tiering_unit = is_tiering_mode && tier == ExecutionTier::kOptimized;
// Sanity check: If we are not in tiering mode, there cannot be outstanding
// tiering units.
DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
+ // Bitset of events to deliver.
+ base::EnumSet<CompilationEvent> events;
+
if (is_tiering_unit) {
DCHECK_LT(0, outstanding_tiering_units_);
--outstanding_tiering_units_;
if (outstanding_tiering_units_ == 0) {
- // We currently finish all baseline units before finishing tiering units.
- DCHECK_EQ(0, outstanding_baseline_units_);
- NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation, nullptr);
+ // If baseline compilation has not finished yet, then also trigger
+ // {kFinishedBaselineCompilation}.
+ if (outstanding_baseline_units_ > 0) {
+ events.Add(CompilationEvent::kFinishedBaselineCompilation);
+ }
+ events.Add(CompilationEvent::kFinishedTopTierCompilation);
}
} else {
DCHECK_LT(0, outstanding_baseline_units_);
--outstanding_baseline_units_;
if (outstanding_baseline_units_ == 0) {
- NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation, nullptr);
+ events.Add(CompilationEvent::kFinishedBaselineCompilation);
// If we are not tiering, then we also trigger the "top tier finished"
// event when baseline compilation is finished.
if (!is_tiering_mode) {
- NotifyOnEvent(CompilationEvent::kFinishedTopTierCompilation, nullptr);
+ events.Add(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ }
+ }
+
+ if (!events.empty()) {
+ auto notify_events = [this, events] {
+ for (auto event : {CompilationEvent::kFinishedBaselineCompilation,
+ CompilationEvent::kFinishedTopTierCompilation}) {
+ if (!events.contains(event)) continue;
+ NotifyOnEvent(event, nullptr);
}
+ };
+ foreground_task_runner_->PostTask(
+ MakeCancelableTask(&foreground_task_manager_, notify_events));
+ }
+
+ if (should_log_code_ && code != nullptr) {
+ if (log_codes_task_ == nullptr) {
+ auto new_task = base::make_unique<LogCodesTask>(&foreground_task_manager_,
+ this, isolate_);
+ log_codes_task_ = new_task.get();
+ foreground_task_runner_->PostTask(std::move(new_task));
}
+ log_codes_task_->AddCode(code);
}
}
-void CompilationStateImpl::ScheduleUnitForFinishing(
- std::unique_ptr<WasmCompilationUnit> unit, ExecutionTier tier) {
- base::MutexGuard guard(&mutex_);
- if (compile_mode_ == CompileMode::kTiering &&
- tier == ExecutionTier::kOptimized) {
- tiering_finish_units_.push_back(std::move(unit));
- } else {
- baseline_finish_units_.push_back(std::move(unit));
+void CompilationStateImpl::RestartBackgroundCompileTask() {
+ auto task = base::make_unique<BackgroundCompileTask>(
+ &background_task_manager_, native_module_, isolate_->counters());
+
+ // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
+ // tasks. This is used to make timing deterministic.
+ if (FLAG_wasm_num_compilation_tasks == 0) {
+ foreground_task_runner_->PostTask(std::move(task));
+ return;
}
- if (!finisher_is_running_ && !compile_error_) {
- ScheduleFinisherTask();
- // We set the flag here so that not more than one finisher is started.
- finisher_is_running_ = true;
+ if (baseline_compilation_finished()) {
+ V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
+ std::move(task));
+ } else {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
-void CompilationStateImpl::ScheduleCodeLogging(WasmCode* code) {
- if (!should_log_code_) return;
+void CompilationStateImpl::ReportDetectedFeatures(
+ const WasmFeatures& detected) {
base::MutexGuard guard(&mutex_);
- if (log_codes_task_ == nullptr) {
- auto new_task = base::make_unique<LogCodesTask>(&foreground_task_manager_,
- this, isolate_);
- log_codes_task_ = new_task.get();
- foreground_task_runner_->PostTask(std::move(new_task));
- }
- log_codes_task_->AddCode(code);
+ UnionFeaturesInto(&detected_features_, detected);
}
void CompilationStateImpl::OnBackgroundTaskStopped(
@@ -3141,7 +1766,7 @@ void CompilationStateImpl::RestartBackgroundTasks(size_t max) {
{
base::MutexGuard guard(&mutex_);
// No need to restart tasks if compilation already failed.
- if (compile_error_) return;
+ if (failed()) return;
DCHECK_LE(num_background_tasks_, max_background_tasks_);
if (num_background_tasks_ == max_background_tasks_) return;
@@ -3153,16 +1778,7 @@ void CompilationStateImpl::RestartBackgroundTasks(size_t max) {
}
for (; num_restart > 0; --num_restart) {
- auto task = base::make_unique<BackgroundCompileTask>(
- &background_task_manager_, native_module_, isolate_->counters());
-
- // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
- // tasks. This is used to make timing deterministic.
- if (FLAG_wasm_num_compilation_tasks > 0) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- } else {
- foreground_task_runner_->PostTask(std::move(task));
- }
+ RestartBackgroundCompileTask();
}
}
@@ -3179,44 +1795,55 @@ void CompilationStateImpl::ScheduleFinisherTask() {
}
void CompilationStateImpl::Abort() {
- {
- base::MutexGuard guard(&mutex_);
- if (!compile_error_) {
- compile_error_ = base::make_unique<CompilationError>(
- 0, VoidResult::Error(0, "Compilation aborted"));
- }
- }
+ SetError(0, WasmError{0, "Compilation aborted"});
background_task_manager_.CancelAndWait();
+ // No more callbacks after abort. Don't free the std::function objects here,
+ // since this might clear references in the embedder, which is only allowed on
+ // the main thread.
+ aborted_.store(true);
+ if (!callbacks_.empty()) {
+ foreground_task_runner_->PostTask(
+ base::make_unique<FreeCallbacksTask>(this));
+ }
}
void CompilationStateImpl::SetError(uint32_t func_index,
- const ResultBase& error_result) {
- DCHECK(error_result.failed());
- base::MutexGuard guard(&mutex_);
- // Ignore all but the first error.
- if (compile_error_) return;
- compile_error_ =
- base::make_unique<CompilationError>(func_index, error_result);
+ const WasmError& error) {
+ DCHECK(error.has_error());
+ std::unique_ptr<CompilationError> compile_error =
+ base::make_unique<CompilationError>(func_index, error);
+ CompilationError* expected = nullptr;
+ bool set = compile_error_.compare_exchange_strong(
+ expected, compile_error.get(), std::memory_order_acq_rel);
+ // Ignore all but the first error. If the previous value is not nullptr, just
+ // return (and free the allocated error).
+ if (!set) return;
+ // If set successfully, give up ownership.
+ compile_error.release();
// Schedule a foreground task to call the callback and notify users about the
// compile error.
foreground_task_runner_->PostTask(
MakeCancelableTask(&foreground_task_manager_, [this] {
- VoidResult error_result = GetCompileError();
- NotifyOnEvent(CompilationEvent::kFailedCompilation, &error_result);
+ WasmError error = GetCompileError();
+ NotifyOnEvent(CompilationEvent::kFailedCompilation, &error);
}));
}
void CompilationStateImpl::NotifyOnEvent(CompilationEvent event,
- const VoidResult* error_result) {
+ const WasmError* error) {
+ if (aborted_.load()) return;
HandleScope scope(isolate_);
- if (callback_) callback_(event, error_result);
+ for (auto& callback : callbacks_) callback(event, error);
+ // If no more events are expected after this one, clear the callbacks to free
+ // memory. We can safely do this here, as this method is only called from
+ // foreground tasks.
+ if (event >= CompilationEvent::kFirstFinalEvent) callbacks_.clear();
}
-void CompileJsToWasmWrappers(Isolate* isolate, NativeModule* native_module,
+void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
JSToWasmWrapperCache js_to_wasm_cache;
int wrapper_index = 0;
- const WasmModule* module = native_module->module();
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
// optimization we keep the code space unlocked to avoid repeated unlocking
@@ -3251,8 +1878,7 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
DCHECK(name_chars >= 0 && name_chars < kBufferSize);
MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
- TENURED);
+ VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars), TENURED);
script->set_name(*name_str.ToHandleChecked());
if (source_map_url.size() != 0) {
@@ -3267,7 +1893,6 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
} // namespace internal
} // namespace v8
-#undef TRACE
#undef TRACE_COMPILE
#undef TRACE_STREAMING
#undef TRACE_LAZY
diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h
index 45af3a3dd9f..7f860ac0363 100644
--- a/chromium/v8/src/wasm/module-compiler.h
+++ b/chromium/v8/src/wasm/module-compiler.h
@@ -42,13 +42,13 @@ std::unique_ptr<NativeModule> CompileToNativeModule(
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out);
-MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
- MaybeHandle<JSArrayBuffer> memory);
+void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
+ ErrorThrower* thrower,
+ const WasmModule* wasm_module,
+ NativeModule* native_module);
V8_EXPORT_PRIVATE
-void CompileJsToWasmWrappers(Isolate* isolate, NativeModule* native_module,
+void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
@@ -93,8 +93,6 @@ class AsyncCompileJob {
class DecodeFail; // Step 1b (sync)
class PrepareAndStartCompile; // Step 2 (sync)
class CompileFailed; // Step 4b (sync)
- class CompileWrappers; // Step 5 (sync)
- class FinishModule; // Step 6 (sync)
friend class AsyncStreamingProcessor;
@@ -105,14 +103,19 @@ class AsyncCompileJob {
return outstanding_finishers_.fetch_sub(1) == 1;
}
- void PrepareRuntimeObjects(std::shared_ptr<const WasmModule>);
+ void CreateNativeModule(std::shared_ptr<const WasmModule> module);
+ void PrepareRuntimeObjects();
- void FinishCompile(bool compile_wrappers);
+ void FinishCompile();
void AsyncCompileFailed(Handle<Object> error_reason);
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
+ void CompileWrappers();
+
+ void FinishModule();
+
void StartForegroundTask();
void ExecuteForegroundTaskImmediately();
@@ -158,7 +161,7 @@ class AsyncCompileJob {
std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
- NativeModule* native_module_ = nullptr;
+ std::shared_ptr<NativeModule> native_module_;
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index dd2397d0f93..25a6633178c 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -9,7 +9,6 @@
#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/flags.h"
-#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/v8.h"
@@ -31,8 +30,6 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
-constexpr char kExceptionString[] = "exception";
-constexpr char kUnknownString[] = "<unknown>";
template <size_t N>
constexpr size_t num_chars(const char (&)[N]) {
@@ -83,14 +80,16 @@ const char* SectionName(SectionCode code) {
return "Element";
case kDataSectionCode:
return "Data";
+ case kExceptionSectionCode:
+ return "Exception";
+ case kDataCountSectionCode:
+ return "DataCount";
case kNameSectionCode:
return kNameString;
case kSourceMappingURLSectionCode:
return kSourceMappingURLString;
- case kExceptionSectionCode:
- return kExceptionString;
default:
- return kUnknownString;
+ return "<unknown>";
}
}
@@ -333,6 +332,30 @@ class ModuleDecoderImpl : public Decoder {
#undef BYTES
}
+ bool CheckSectionOrder(SectionCode section_code,
+ SectionCode prev_section_code,
+ SectionCode next_section_code) {
+ if (next_ordered_section_ > next_section_code) {
+ errorf(pc(), "The %s section must appear before the %s section",
+ SectionName(section_code), SectionName(next_section_code));
+ return false;
+ }
+ if (next_ordered_section_ <= prev_section_code) {
+ next_ordered_section_ = prev_section_code + 1;
+ }
+ return true;
+ }
+
+ bool CheckUnorderedSection(SectionCode section_code) {
+ if (has_seen_unordered_section(section_code)) {
+ errorf(pc(), "Multiple %s sections not allowed",
+ SectionName(section_code));
+ return false;
+ }
+ set_seen_unordered_section(section_code);
+ return true;
+ }
+
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
if (failed()) return;
@@ -351,20 +374,17 @@ class ModuleDecoderImpl : public Decoder {
switch (section_code) {
case kUnknownSectionCode:
break;
- case kExceptionSectionCode:
- // Note: kExceptionSectionCode > kExportSectionCode, but must appear
- // before the export (and code) section, as well as after the import
- // section. Hence, treat it as a special case.
- if (seen_unordered_sections_ & (1 << kExceptionSectionCode)) {
- errorf(pc(), "Multiple exception sections not allowed");
+ case kDataCountSectionCode:
+ if (!CheckUnorderedSection(section_code)) return;
+ if (!CheckSectionOrder(section_code, kElementSectionCode,
+ kCodeSectionCode))
return;
- } else if (next_ordered_section_ > kExportSectionCode) {
- errorf(pc(), "Exception section must appear before export section");
+ break;
+ case kExceptionSectionCode:
+ if (!CheckUnorderedSection(section_code)) return;
+ if (!CheckSectionOrder(section_code, kGlobalSectionCode,
+ kExportSectionCode))
return;
- } else if (next_ordered_section_ <= kImportSectionCode) {
- next_ordered_section_ = kImportSectionCode + 1;
- }
- seen_unordered_sections_ |= 1 << kExceptionSectionCode;
break;
case kSourceMappingURLSectionCode:
// sourceMappingURL is a custom section and currently can occur anywhere
@@ -422,6 +442,13 @@ class ModuleDecoderImpl : public Decoder {
case kSourceMappingURLSectionCode:
DecodeSourceMappingURLSection();
break;
+ case kDataCountSectionCode:
+ if (enabled_features_.bulk_memory) {
+ DecodeDataCountSection();
+ } else {
+ errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ }
+ break;
case kExceptionSectionCode:
if (enabled_features_.eh) {
DecodeExceptionSection();
@@ -534,11 +561,7 @@ class ModuleDecoderImpl : public Decoder {
global->type = consume_value_type();
global->mutability = consume_mutability();
if (global->mutability) {
- if (enabled_features_.mut_global) {
- module_->num_imported_mutable_globals++;
- } else {
- error("mutable globals cannot be imported");
- }
+ module_->num_imported_mutable_globals++;
}
break;
}
@@ -682,9 +705,6 @@ class ModuleDecoderImpl : public Decoder {
WasmGlobal* global = nullptr;
exp->index = consume_global_index(module_.get(), &global);
if (global) {
- if (!enabled_features_.mut_global && global->mutability) {
- error("mutable globals cannot be exported");
- }
global->exported = true;
}
break;
@@ -776,12 +796,12 @@ class ModuleDecoderImpl : public Decoder {
uint32_t num_elem =
consume_count("number of elements", kV8MaxWasmTableEntries);
if (is_active) {
- module_->table_inits.emplace_back(table_index, offset);
+ module_->elem_segments.emplace_back(table_index, offset);
} else {
- module_->table_inits.emplace_back();
+ module_->elem_segments.emplace_back();
}
- WasmTableInit* init = &module_->table_inits.back();
+ WasmElemSegment* init = &module_->elem_segments.back();
for (uint32_t j = 0; j < num_elem; j++) {
WasmFunction* func = nullptr;
uint32_t index = consume_func_index(module_.get(), &func);
@@ -835,9 +855,21 @@ class ModuleDecoderImpl : public Decoder {
}
}
+ bool CheckDataSegmentsCount(uint32_t data_segments_count) {
+ if (has_seen_unordered_section(kDataCountSectionCode) &&
+ data_segments_count != module_->num_declared_data_segments) {
+ errorf(pc(), "data segments count %u mismatch (%u expected)",
+ data_segments_count, module_->num_declared_data_segments);
+ return false;
+ }
+ return true;
+ }
+
void DecodeDataSection() {
uint32_t data_segments_count =
consume_count("data segments count", kV8MaxWasmDataSegments);
+ if (!CheckDataSegmentsCount(data_segments_count)) return;
+
module_->data_segments.reserve(data_segments_count);
for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
const byte* pos = pc();
@@ -881,8 +913,8 @@ class ModuleDecoderImpl : public Decoder {
void DecodeNameSection() {
// TODO(titzer): find a way to report name errors as warnings.
// ignore all but the first occurrence of name section.
- if (!(seen_unordered_sections_ & (1 << kNameSectionCode))) {
- seen_unordered_sections_ |= 1 << kNameSectionCode;
+ if (!has_seen_unordered_section(kNameSectionCode)) {
+ set_seen_unordered_section(kNameSectionCode);
// Use an inner decoder so that errors don't fail the outer decoder.
Decoder inner(start_, pc_, end_, buffer_offset_);
// Decode all name subsections.
@@ -912,16 +944,21 @@ class ModuleDecoderImpl : public Decoder {
Decoder inner(start_, pc_, end_, buffer_offset_);
WireBytesRef url = wasm::consume_string(inner, true, "module name");
if (inner.ok() &&
- !(seen_unordered_sections_ & (1 << kSourceMappingURLSectionCode))) {
+ !has_seen_unordered_section(kSourceMappingURLSectionCode)) {
const byte* url_start =
inner.start() + inner.GetBufferRelativeOffset(url.offset());
module_->source_map_url.assign(reinterpret_cast<const char*>(url_start),
url.length());
- seen_unordered_sections_ |= 1 << kSourceMappingURLSectionCode;
+ set_seen_unordered_section(kSourceMappingURLSectionCode);
}
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeDataCountSection() {
+ module_->num_declared_data_segments =
+ consume_count("data segments count", kV8MaxWasmDataSegments);
+ }
+
void DecodeExceptionSection() {
uint32_t exception_count =
consume_count("exception count", kV8MaxWasmExceptions);
@@ -935,14 +972,37 @@ class ModuleDecoderImpl : public Decoder {
}
}
+ bool CheckMismatchedCounts() {
+ // The declared vs. defined function count is normally checked when
+ // decoding the code section, but we have to check it here too in case the
+ // code section is absent.
+ if (module_->num_declared_functions != 0) {
+ DCHECK_LT(module_->num_imported_functions, module_->functions.size());
+ // We know that the code section has been decoded if the first
+ // non-imported function has its code set.
+ if (!module_->functions[module_->num_imported_functions].code.is_set()) {
+ errorf(pc(), "function count is %u, but code section is absent",
+ module_->num_declared_functions);
+ return false;
+ }
+ }
+ // Perform a similar check for the DataCount and Data sections, where data
+ // segments are declared but the Data section is absent.
+ if (!CheckDataSegmentsCount(
+ static_cast<uint32_t>(module_->data_segments.size()))) {
+ return false;
+ }
+ return true;
+ }
+
ModuleResult FinishDecoding(bool verify_functions = true) {
- if (ok()) {
+ if (ok() && CheckMismatchedCounts()) {
CalculateGlobalOffsets(module_.get());
}
ModuleResult result = toResult(std::move(module_));
- if (verify_functions && result.ok() && intermediate_result_.failed()) {
- // Copy error code and location.
- result = ModuleResult::ErrorFrom(std::move(intermediate_result_));
+ if (verify_functions && result.ok() && intermediate_error_.has_error()) {
+ // Copy error message and location.
+ return ModuleResult{std::move(intermediate_error_)};
}
return result;
}
@@ -953,7 +1013,7 @@ class ModuleDecoderImpl : public Decoder {
StartDecoding(counters, allocator);
uint32_t offset = 0;
Vector<const byte> orig_bytes(start(), end() - start());
- DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
+ DecodeModuleHeader(VectorOf(start(), end() - start()), offset);
if (failed()) {
return FinishDecoding(verify_functions);
}
@@ -997,8 +1057,8 @@ class ModuleDecoderImpl : public Decoder {
VerifyFunctionBody(zone->allocator(), 0, wire_bytes, module,
function.get());
- if (intermediate_result_.failed()) {
- return FunctionResult::ErrorFrom(std::move(intermediate_result_));
+ if (intermediate_error_.has_error()) {
+ return FunctionResult{std::move(intermediate_error_)};
}
return FunctionResult(std::move(function));
@@ -1045,9 +1105,17 @@ class ModuleDecoderImpl : public Decoder {
sizeof(ModuleDecoderImpl::seen_unordered_sections_) >
kLastKnownModuleSection,
"not enough bits");
- VoidResult intermediate_result_;
+ WasmError intermediate_error_;
ModuleOrigin origin_;
+ bool has_seen_unordered_section(SectionCode section_code) {
+ return seen_unordered_sections_ & (1 << section_code);
+ }
+
+ void set_seen_unordered_section(SectionCode section_code) {
+ seen_unordered_sections_ |= 1 << section_code;
+ }
+
uint32_t off(const byte* ptr) {
return static_cast<uint32_t>(ptr - start_) + buffer_offset_;
}
@@ -1106,24 +1174,26 @@ class ModuleDecoderImpl : public Decoder {
// Calculate individual global offsets and total size of globals table.
void CalculateGlobalOffsets(WasmModule* module) {
- uint32_t offset = 0;
+ uint32_t untagged_offset = 0;
+ uint32_t tagged_offset = 0;
uint32_t num_imported_mutable_globals = 0;
- if (module->globals.size() == 0) {
- module->globals_buffer_size = 0;
- return;
- }
for (WasmGlobal& global : module->globals) {
- byte size = ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
if (global.mutability && global.imported) {
- DCHECK(enabled_features_.mut_global);
global.index = num_imported_mutable_globals++;
+ } else if (global.type == ValueType::kWasmAnyRef) {
+ global.offset = tagged_offset;
+ // All entries in the tagged_globals_buffer have size 1.
+ tagged_offset++;
} else {
- offset = (offset + size - 1) & ~(size - 1); // align
- global.offset = offset;
- offset += size;
+ byte size =
+ ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
+ untagged_offset = (untagged_offset + size - 1) & ~(size - 1); // align
+ global.offset = untagged_offset;
+ untagged_offset += size;
}
}
- module->globals_buffer_size = offset;
+ module->untagged_globals_buffer_size = untagged_offset;
+ module->tagged_globals_buffer_size = tagged_offset;
}
// Verifies the body (code) of a given function.
@@ -1154,12 +1224,12 @@ class ModuleDecoderImpl : public Decoder {
// If the decode failed and this is the first error, set error code and
// location.
- if (result.failed() && intermediate_result_.ok()) {
+ if (result.failed() && intermediate_error_.empty()) {
// Wrap the error message from the function decoder.
std::ostringstream error_msg;
- error_msg << "in function " << func_name << ": " << result.error_msg();
- intermediate_result_ =
- VoidResult::Error(result.error_offset(), error_msg.str());
+ error_msg << "in function " << func_name << ": "
+ << result.error().message();
+ intermediate_error_ = WasmError{result.error().offset(), error_msg.str()};
}
}
@@ -1550,8 +1620,8 @@ ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
size_t size = module_end - module_start;
CHECK_LE(module_start, module_end);
if (size >= kV8MaxWasmModuleSize) {
- return ModuleResult::Error(0, "size > maximum module size (%zu): %zu",
- kV8MaxWasmModuleSize, size);
+ return ModuleResult{WasmError{0, "size > maximum module size (%zu): %zu",
+ kV8MaxWasmModuleSize, size}};
}
// TODO(bradnelson): Improve histogram handling of size_t.
auto size_counter =
@@ -1670,8 +1740,9 @@ FunctionResult DecodeWasmFunctionForTesting(
// TODO(bradnelson): Improve histogram handling of ptrdiff_t.
size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize) {
- return FunctionResult::Error(0, "size > maximum function size (%zu): %zu",
- kV8MaxWasmFunctionSize, size);
+ return FunctionResult{WasmError{0,
+ "size > maximum function size (%zu): %zu",
+ kV8MaxWasmFunctionSize, size}};
}
ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc
new file mode 100644
index 00000000000..04c0f3cf44b
--- /dev/null
+++ b/chromium/v8/src/wasm/module-instantiate.cc
@@ -0,0 +1,1537 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/module-instantiate.h"
+#include "src/asmjs/asm-js.h"
+#include "src/property-descriptor.h"
+#include "src/utils.h"
+#include "src/wasm/js-to-wasm-wrapper-cache-inl.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-import-wrapper-cache-inl.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects-inl.h"
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+ } while (false)
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+ return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
+}
+} // namespace
+
+// A helper class to simplify instantiating a module from a module object.
+// It closes over the {Isolate}, the {ErrorThrower}, etc.
+class InstanceBuilder {
+ public:
+ InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory);
+
+ // Build an instance, in all of its glory.
+ MaybeHandle<WasmInstanceObject> Build();
+ // Run the start function, if any.
+ bool ExecuteStartFunction();
+
+ private:
+ // Represents the initialized state of a table.
+ struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_wrappers; // JSFunctions exported
+ size_t table_size;
+ };
+
+ // A pre-evaluated value to use in import binding.
+ struct SanitizedImport {
+ Handle<String> module_name;
+ Handle<String> import_name;
+ Handle<Object> value;
+ };
+
+ Isolate* isolate_;
+ const WasmFeatures enabled_;
+ const WasmModule* const module_;
+ ErrorThrower* thrower_;
+ Handle<WasmModuleObject> module_object_;
+ MaybeHandle<JSReceiver> ffi_;
+ MaybeHandle<JSArrayBuffer> memory_;
+ Handle<JSArrayBuffer> untagged_globals_;
+ Handle<FixedArray> tagged_globals_;
+ std::vector<TableInstance> table_instances_;
+ std::vector<Handle<JSFunction>> js_wrappers_;
+ std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
+ Handle<WasmExportedFunction> start_function_;
+ JSToWasmWrapperCache js_to_wasm_cache_;
+ std::vector<SanitizedImport> sanitized_imports_;
+
+ UseTrapHandler use_trap_handler() const {
+ return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
+ : kNoTrapHandler;
+ }
+
+// Helper routines to print out errors with imports.
+#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
+ void Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name, Handle<String> import_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
+ index, module_name->ToCString().get(), \
+ import_name->ToCString().get(), error); \
+ } \
+ \
+ MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
+ module_name->ToCString().get(), error); \
+ return MaybeHandle<Object>(); \
+ }
+
+ ERROR_THROWER_WITH_MESSAGE(LinkError)
+ ERROR_THROWER_WITH_MESSAGE(TypeError)
+
+#undef ERROR_THROWER_WITH_MESSAGE
+
+ // Look up an import value in the {ffi_} object.
+ MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+ Handle<String> import_name);
+
+ // Look up an import value in the {ffi_} object specifically for linking an
+ // asm.js module. This only performs non-observable lookups, which allows
+ // falling back to JavaScript proper (and hence re-executing all lookups) if
+ // module instantiation fails.
+ MaybeHandle<Object> LookupImportAsm(uint32_t index,
+ Handle<String> import_name);
+
+ uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
+
+ // Load data segments into the memory.
+ void LoadDataSegments(Handle<WasmInstanceObject> instance);
+
+ void WriteGlobalValue(const WasmGlobal& global, double value);
+ void WriteGlobalValue(const WasmGlobal& global,
+ Handle<WasmGlobalObject> value);
+
+ void WriteGlobalAnyRef(const WasmGlobal& global, Handle<Object> value);
+
+ void SanitizeImports();
+
+ // Find the imported memory buffer if there is one. This is used to see if we
+ // need to recompile with bounds checks before creating the instance.
+ MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
+
+ // Processes a single imported function.
+ bool ProcessImportedFunction(Handle<WasmInstanceObject> instance,
+ int import_index, int func_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value);
+
+ // Process a single imported table.
+ bool ProcessImportedTable(Handle<WasmInstanceObject> instance,
+ int import_index, int table_index,
+ Handle<String> module_name,
+ Handle<String> import_name, Handle<Object> value);
+
+ // Process a single imported memory.
+ bool ProcessImportedMemory(Handle<WasmInstanceObject> instance,
+ int import_index, Handle<String> module_name,
+ Handle<String> import_name, Handle<Object> value);
+
+ // Process a single imported global.
+ bool ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
+ int import_index, int global_index,
+ Handle<String> module_name,
+ Handle<String> import_name, Handle<Object> value);
+
+ // Process a single imported WasmGlobalObject.
+ bool ProcessImportedWasmGlobalObject(Handle<WasmInstanceObject> instance,
+ int import_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ const WasmGlobal& global,
+ Handle<WasmGlobalObject> global_object);
+
+ // Process the imports, including functions, tables, globals, and memory, in
+ // order, loading them from the {ffi_} object. Returns the number of imported
+ // functions.
+ int ProcessImports(Handle<WasmInstanceObject> instance);
+
+ template <typename T>
+ T* GetRawGlobalPtr(const WasmGlobal& global);
+
+ // Process initialization of globals.
+ void InitGlobals();
+
+ // Allocate memory for a module instance as a new JSArrayBuffer.
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t num_pages);
+
+ bool NeedsWrappers() const;
+
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<WasmInstanceObject> instance);
+
+ void InitializeTables(Handle<WasmInstanceObject> instance);
+
+ void LoadTableSegments(Handle<WasmInstanceObject> instance);
+
+ // Creates new exception tags for all exceptions. Note that some tags might
+ // already exist if they were imported, those tags will be re-used.
+ void InitializeExceptions(Handle<WasmInstanceObject> instance);
+};
+
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ auto instance = builder.Build();
+ if (!instance.is_null() && builder.ExecuteStartFunction()) {
+ return instance;
+ }
+ DCHECK(isolate->has_pending_exception() || thrower->error());
+ return {};
+}
+
+InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory)
+ : isolate_(isolate),
+ enabled_(module_object->native_module()->enabled_features()),
+ module_(module_object->module()),
+ thrower_(thrower),
+ module_object_(module_object),
+ ffi_(ffi),
+ memory_(memory) {
+ sanitized_imports_.reserve(module_->import_table.size());
+}
+
+// Build an instance, in all of its glory.
+MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
+ // Check that an imports argument was provided, if the module requires it.
+ // No point in continuing otherwise.
+ if (!module_->import_table.empty() && ffi_.is_null()) {
+ thrower_->TypeError(
+ "Imports argument must be present and must be an object");
+ return {};
+ }
+
+ SanitizeImports();
+ if (thrower_->error()) return {};
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
+ // From here on, we expect the build pipeline to run without exiting to JS.
+ DisallowJavascriptExecution no_js(isolate_);
+ // Record build time into correct bucket, then build instance.
+ TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
+ isolate_->counters(), module_->origin, wasm_instantiate, module_time));
+
+ //--------------------------------------------------------------------------
+ // Allocate the memory array buffer.
+ //--------------------------------------------------------------------------
+ // We allocate the memory buffer before cloning or reusing the compiled module
+ // so we will know whether we need to recompile with bounds checks.
+ uint32_t initial_pages = module_->initial_pages;
+ auto initial_pages_counter = SELECT_WASM_COUNTER(
+ isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
+ initial_pages_counter->AddSample(initial_pages);
+ // Asm.js has memory_ already set at this point, so we don't want to
+ // overwrite it.
+ if (memory_.is_null()) {
+ memory_ = FindImportedMemoryBuffer();
+ }
+ if (!memory_.is_null()) {
+ // Set externally passed ArrayBuffer non detachable.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ memory->set_is_detachable(false);
+
+ DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin ||
+ memory->is_wasm_memory() ||
+ memory->backing_store() == nullptr);
+ } else if (initial_pages > 0 || use_trap_handler()) {
+ // We need to unconditionally create a guard region if using trap handlers,
+ // even when the size is zero to prevent null-dereference issues
+ // (e.g. https://crbug.com/769637).
+ // Allocate memory if the initial size is more than 0 pages.
+ memory_ = AllocateMemory(initial_pages);
+ if (memory_.is_null()) {
+ // failed to allocate memory
+ DCHECK(isolate_->has_pending_exception() || thrower_->error());
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Recompile module if using trap handlers but could not get guarded memory
+ //--------------------------------------------------------------------------
+ if (module_->origin == kWasmOrigin && use_trap_handler()) {
+ // Make sure the memory has suitable guard regions.
+ WasmMemoryTracker* const memory_tracker =
+ isolate_->wasm_engine()->memory_tracker();
+
+ if (!memory_tracker->HasFullGuardRegions(
+ memory_.ToHandleChecked()->backing_store())) {
+ if (!FLAG_wasm_trap_handler_fallback) {
+ thrower_->LinkError(
+ "Provided memory is lacking guard regions but fallback was "
+ "disabled.");
+ return {};
+ }
+
+ TRACE("Recompiling module without bounds checks\n");
+ ErrorThrower thrower(isolate_, "recompile");
+ auto native_module = module_object_->native_module();
+ CompileNativeModuleWithExplicitBoundsChecks(isolate_, &thrower, module_,
+ native_module);
+ if (thrower.error()) {
+ return {};
+ }
+ DCHECK(!native_module->use_trap_handler());
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Create the WebAssembly.Instance object.
+ //--------------------------------------------------------------------------
+ NativeModule* native_module = module_object_->native_module();
+ TRACE("New module instantiation for %p\n", native_module);
+ Handle<WasmInstanceObject> instance =
+ WasmInstanceObject::New(isolate_, module_object_);
+ NativeModuleModificationScope native_modification_scope(native_module);
+
+ //--------------------------------------------------------------------------
+ // Set up the globals for the new instance.
+ //--------------------------------------------------------------------------
+ uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size;
+ if (untagged_globals_buffer_size > 0) {
+ void* backing_store = isolate_->array_buffer_allocator()->Allocate(
+ untagged_globals_buffer_size);
+ if (backing_store == nullptr) {
+ thrower_->RangeError("Out of memory: wasm globals");
+ return {};
+ }
+ untagged_globals_ =
+ isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ constexpr bool is_external = false;
+ constexpr bool is_wasm_memory = false;
+ JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external,
+ backing_store, untagged_globals_buffer_size,
+ SharedFlag::kNotShared, is_wasm_memory);
+ if (untagged_globals_.is_null()) {
+ thrower_->RangeError("Out of memory: wasm globals");
+ return {};
+ }
+ instance->set_globals_start(
+ reinterpret_cast<byte*>(untagged_globals_->backing_store()));
+ instance->set_untagged_globals_buffer(*untagged_globals_);
+ }
+
+ uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size;
+ if (tagged_globals_buffer_size > 0) {
+ tagged_globals_ = isolate_->factory()->NewFixedArray(
+ static_cast<int>(tagged_globals_buffer_size));
+ instance->set_tagged_globals_buffer(*tagged_globals_);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the array of references to imported globals' array buffers.
+ //--------------------------------------------------------------------------
+ if (module_->num_imported_mutable_globals > 0) {
+ // TODO(binji): This allocates one slot for each mutable global, which is
+ // more than required if multiple globals are imported from the same
+ // module.
+ Handle<FixedArray> buffers_array = isolate_->factory()->NewFixedArray(
+ module_->num_imported_mutable_globals, TENURED);
+ instance->set_imported_mutable_globals_buffers(*buffers_array);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the exception table used for exception tag checks.
+ //--------------------------------------------------------------------------
+ int exceptions_count = static_cast<int>(module_->exceptions.size());
+ if (exceptions_count > 0) {
+ Handle<FixedArray> exception_table =
+ isolate_->factory()->NewFixedArray(exceptions_count, TENURED);
+ instance->set_exceptions_table(*exception_table);
+ exception_wrappers_.resize(exceptions_count);
+ }
+
+ //--------------------------------------------------------------------------
+ // Reserve the metadata for indirect function tables.
+ //--------------------------------------------------------------------------
+ int table_count = static_cast<int>(module_->tables.size());
+ table_instances_.resize(table_count);
+
+ //--------------------------------------------------------------------------
+ // Process the imports for the module.
+ //--------------------------------------------------------------------------
+ int num_imported_functions = ProcessImports(instance);
+ if (num_imported_functions < 0) return {};
+
+ //--------------------------------------------------------------------------
+ // Process the initialization for the module's globals.
+ //--------------------------------------------------------------------------
+ InitGlobals();
+
+ //--------------------------------------------------------------------------
+ // Initialize the indirect tables.
+ //--------------------------------------------------------------------------
+ if (table_count > 0) {
+ InitializeTables(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize the exceptions table.
+ //--------------------------------------------------------------------------
+ if (exceptions_count > 0) {
+ InitializeExceptions(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Create the WebAssembly.Memory object.
+ //--------------------------------------------------------------------------
+ if (module_->has_memory) {
+ if (!instance->has_memory_object()) {
+ // No memory object exists. Create one.
+ Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
+ isolate_, memory_,
+ module_->maximum_pages != 0 ? module_->maximum_pages : -1);
+ instance->set_memory_object(*memory_object);
+ }
+
+ // Add the instance object to the list of instances for this memory.
+ Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
+ WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
+
+ if (!memory_.is_null()) {
+ // Double-check the {memory} array buffer matches the instance.
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ CHECK_EQ(instance->memory_size(), memory->byte_length());
+ CHECK_EQ(instance->memory_start(), memory->backing_store());
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that indirect function table segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (const WasmElemSegment& elem_segment : module_->elem_segments) {
+ if (!elem_segment.active) continue;
+ DCHECK(elem_segment.table_index < table_instances_.size());
+ uint32_t base = EvalUint32InitExpr(elem_segment.offset);
+ size_t table_size = table_instances_[elem_segment.table_index].table_size;
+ if (!IsInBounds(base, elem_segment.entries.size(), table_size)) {
+ thrower_->LinkError("table initializer is out of bounds");
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that memory segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (const WasmDataSegment& seg : module_->data_segments) {
+ if (!seg.active) continue;
+ uint32_t base = EvalUint32InitExpr(seg.dest_addr);
+ if (!IsInBounds(base, seg.source.length(), instance->memory_size())) {
+ thrower_->LinkError("data segment is out of bounds");
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the exports object for the new instance.
+ //--------------------------------------------------------------------------
+ ProcessExports(instance);
+ if (thrower_->error()) return {};
+
+ //--------------------------------------------------------------------------
+ // Initialize the indirect function tables.
+ //--------------------------------------------------------------------------
+ if (table_count > 0) {
+ LoadTableSegments(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize the memory by loading data segments.
+ //--------------------------------------------------------------------------
+ if (module_->data_segments.size() > 0) {
+ LoadDataSegments(instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Debugging support.
+ //--------------------------------------------------------------------------
+ // Set all breakpoints that were set on the shared module.
+ WasmModuleObject::SetBreakpointsOnNewInstance(module_object_, instance);
+
+ if (FLAG_wasm_interpret_all && module_->origin == kWasmOrigin) {
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ std::vector<int> func_indexes;
+ for (int func_index = num_imported_functions,
+ num_wasm_functions = static_cast<int>(module_->functions.size());
+ func_index < num_wasm_functions; ++func_index) {
+ func_indexes.push_back(func_index);
+ }
+ WasmDebugInfo::RedirectToInterpreter(debug_info, VectorOf(func_indexes));
+ }
+
+ //--------------------------------------------------------------------------
+ // Create a wrapper for the start function.
+ //--------------------------------------------------------------------------
+ if (module_->start_function_index >= 0) {
+ int start_index = module_->start_function_index;
+ auto& function = module_->functions[start_index];
+ Handle<Code> wrapper_code = js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
+ isolate_, function.sig, function.imported);
+ // TODO(clemensh): Don't generate an exported function for the start
+ // function. Use CWasmEntry instead.
+ start_function_ = WasmExportedFunction::New(
+ isolate_, instance, MaybeHandle<String>(), start_index,
+ static_cast<int>(function.sig->parameter_count()), wrapper_code);
+ }
+
+ DCHECK(!isolate_->has_pending_exception());
+ TRACE("Successfully built instance for module %p\n",
+ module_object_->native_module());
+ return instance;
+}
+
+bool InstanceBuilder::ExecuteStartFunction() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "InstanceBuilder::ExecuteStartFunction");
+ if (start_function_.is_null()) return true; // No start function.
+
+ HandleScope scope(isolate_);
+ // Call the JS function.
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ return false;
+ }
+ return true;
+}
+
+// Look up an import value in the {ffi_} object.
+MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
+ Handle<String> module_name,
+
+ Handle<String> import_name) {
+ // We pre-validated in the js-api layer that the ffi object is present, and
+ // a JSObject, if the module has imports.
+ DCHECK(!ffi_.is_null());
+
+ // Look up the module first.
+ MaybeHandle<Object> result = Object::GetPropertyOrElement(
+ isolate_, ffi_.ToHandleChecked(), module_name);
+ if (result.is_null()) {
+ return ReportTypeError("module not found", index, module_name);
+ }
+
+ Handle<Object> module = result.ToHandleChecked();
+
+ // Look up the value in the module.
+ if (!module->IsJSReceiver()) {
+ return ReportTypeError("module is not an object or function", index,
+ module_name);
+ }
+
+ result = Object::GetPropertyOrElement(isolate_, module, import_name);
+ if (result.is_null()) {
+ ReportLinkError("import not found", index, module_name, import_name);
+ return MaybeHandle<JSFunction>();
+ }
+
+ return result;
+}
+
+// Look up an import value in the {ffi_} object specifically for linking an
+// asm.js module. This only performs non-observable lookups, which allows
+// falling back to JavaScript proper (and hence re-executing all lookups) if
+// module instantiation fails.
+MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
+ uint32_t index, Handle<String> import_name) {
+ // Check that a foreign function interface object was provided.
+ if (ffi_.is_null()) {
+ return ReportLinkError("missing imports object", index, import_name);
+ }
+
+ // Perform lookup of the given {import_name} without causing any observable
+ // side-effect. We only accept accesses that resolve to data properties,
+ // which is indicated by the asm.js spec in section 7 ("Linking") as well.
+ Handle<Object> result;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, ffi_.ToHandleChecked(), import_name);
+ switch (it.state()) {
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::TRANSITION:
+ return ReportLinkError("not a data property", index, import_name);
+ case LookupIterator::NOT_FOUND:
+ // Accepting missing properties as undefined does not cause any
+ // observable difference from JavaScript semantics, we are lenient.
+ result = isolate_->factory()->undefined_value();
+ break;
+ case LookupIterator::DATA:
+ result = it.GetDataValue();
+ break;
+ }
+
+ return result;
+}
+
+uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
+ switch (expr.kind) {
+ case WasmInitExpr::kI32Const:
+ return expr.val.i32_const;
+ case WasmInitExpr::kGlobalIndex: {
+ uint32_t offset = module_->globals[expr.val.global_index].offset;
+ return ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(raw_buffer_ptr(untagged_globals_, offset)));
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Load data segments into the memory.
+void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
+ Vector<const uint8_t> wire_bytes =
+ module_object_->native_module()->wire_bytes();
+ for (const WasmDataSegment& segment : module_->data_segments) {
+ uint32_t source_size = segment.source.length();
+ // Segments of size == 0 are just nops.
+ if (source_size == 0) continue;
+ // Passive segments are not copied during instantiation.
+ if (!segment.active) continue;
+ uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
+ DCHECK(IsInBounds(dest_offset, source_size, instance->memory_size()));
+ byte* dest = instance->memory_start() + dest_offset;
+ const byte* src = wire_bytes.start() + segment.source.offset();
+ memcpy(dest, src, source_size);
+ }
+}
+
+void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
+ TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
+ reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
+ global.offset, num, ValueTypes::TypeName(global.type));
+ switch (global.type) {
+ case kWasmI32:
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
+ static_cast<int32_t>(num));
+ break;
+ case kWasmI64:
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
+ static_cast<int64_t>(num));
+ break;
+ case kWasmF32:
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
+ static_cast<float>(num));
+ break;
+ case kWasmF64:
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
+ static_cast<double>(num));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
+ Handle<WasmGlobalObject> value) {
+ TRACE("init [globals_start=%p + %u] = ",
+ reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
+ global.offset);
+ switch (global.type) {
+ case kWasmI32: {
+ int32_t num = value->GetI32();
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
+ TRACE("%d", num);
+ break;
+ }
+ case kWasmI64: {
+ int64_t num = value->GetI64();
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
+ TRACE("%" PRId64, num);
+ break;
+ }
+ case kWasmF32: {
+ float num = value->GetF32();
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
+ TRACE("%f", num);
+ break;
+ }
+ case kWasmF64: {
+ double num = value->GetF64();
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
+ TRACE("%lf", num);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ TRACE(", type = %s (from WebAssembly.Global)\n",
+ ValueTypes::TypeName(global.type));
+}
+
+void InstanceBuilder::WriteGlobalAnyRef(const WasmGlobal& global,
+ Handle<Object> value) {
+ tagged_globals_->set(global.offset, *value, UPDATE_WRITE_BARRIER);
+}
+
+void InstanceBuilder::SanitizeImports() {
+ Vector<const uint8_t> wire_bytes =
+ module_object_->native_module()->wire_bytes();
+ for (size_t index = 0; index < module_->import_table.size(); ++index) {
+ const WasmImport& import = module_->import_table[index];
+
+ Handle<String> module_name;
+ MaybeHandle<String> maybe_module_name =
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
+ import.module_name);
+ if (!maybe_module_name.ToHandle(&module_name)) {
+ thrower_->LinkError("Could not resolve module name for import %zu",
+ index);
+ return;
+ }
+
+ Handle<String> import_name;
+ MaybeHandle<String> maybe_import_name =
+ WasmModuleObject::ExtractUtf8StringFromModuleBytes(isolate_, wire_bytes,
+ import.field_name);
+ if (!maybe_import_name.ToHandle(&import_name)) {
+ thrower_->LinkError("Could not resolve import name for import %zu",
+ index);
+ return;
+ }
+
+ int int_index = static_cast<int>(index);
+ MaybeHandle<Object> result =
+ module_->origin == kAsmJsOrigin
+ ? LookupImportAsm(int_index, import_name)
+ : LookupImport(int_index, module_name, import_name);
+ if (thrower_->error()) {
+ thrower_->LinkError("Could not find value for import %zu", index);
+ return;
+ }
+ Handle<Object> value = result.ToHandleChecked();
+ sanitized_imports_.push_back({module_name, import_name, value});
+ }
+}
+
+MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
+ DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
+ for (size_t index = 0; index < module_->import_table.size(); index++) {
+ const WasmImport& import = module_->import_table[index];
+
+ if (import.kind == kExternalMemory) {
+ const auto& value = sanitized_imports_[index].value;
+ if (!value->IsWasmMemoryObject()) {
+ return {};
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ return buffer;
+ }
+ }
+ return {};
+}
+
+bool InstanceBuilder::ProcessImportedFunction(
+ Handle<WasmInstanceObject> instance, int import_index, int func_index,
+ Handle<String> module_name, Handle<String> import_name,
+ Handle<Object> value) {
+ // Function imports must be callable.
+ if (!value->IsCallable()) {
+ ReportLinkError("function import requires a callable", import_index,
+ module_name, import_name);
+ return false;
+ }
+ auto js_receiver = Handle<JSReceiver>::cast(value);
+ FunctionSig* expected_sig = module_->functions[func_index].sig;
+ auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig,
+ enabled_.bigint);
+ switch (kind) {
+ case compiler::WasmImportCallKind::kLinkError:
+ ReportLinkError("imported function does not match the expected type",
+ import_index, module_name, import_name);
+ return false;
+ case compiler::WasmImportCallKind::kWasmToWasm: {
+ // The imported function is a WASM function from another instance.
+ auto imported_function = Handle<WasmExportedFunction>::cast(value);
+ Handle<WasmInstanceObject> imported_instance(
+ imported_function->instance(), isolate_);
+ // The import reference is the instance object itself.
+ Address imported_target = imported_function->GetWasmCallTarget();
+ ImportedFunctionEntry entry(instance, func_index);
+ entry.SetWasmToWasm(*imported_instance, imported_target);
+ break;
+ }
+ default: {
+ // The imported function is a callable.
+ NativeModule* native_module = instance->module_object()->native_module();
+ WasmCode* wasm_code = native_module->import_wrapper_cache()->GetOrCompile(
+ isolate_->wasm_engine(), isolate_->counters(), kind, expected_sig);
+ ImportedFunctionEntry entry(instance, func_index);
+ if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) {
+ // Wasm to JS wrappers are treated specially in the import table.
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code);
+ } else {
+ // Wasm math intrinsics are compiled as regular Wasm functions.
+ DCHECK(kind >= compiler::WasmImportCallKind::kFirstMathIntrinsic &&
+ kind <= compiler::WasmImportCallKind::kLastMathIntrinsic);
+ entry.SetWasmToWasm(*instance, wasm_code->instruction_start());
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
+ int import_index, int table_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value) {
+ if (!value->IsWasmTableObject()) {
+ ReportLinkError("table import requires a WebAssembly.Table", import_index,
+ module_name, import_name);
+ return false;
+ }
+ const WasmTable& table = module_->tables[table_index];
+ TableInstance& table_instance = table_instances_[table_index];
+ table_instance.table_object = Handle<WasmTableObject>::cast(value);
+ instance->set_table_object(*table_instance.table_object);
+ table_instance.js_wrappers =
+ Handle<FixedArray>(table_instance.table_object->functions(), isolate_);
+
+ int imported_table_size = table_instance.js_wrappers->length();
+ if (imported_table_size < static_cast<int>(table.initial_size)) {
+ thrower_->LinkError("table import %d is smaller than initial %d, got %u",
+ import_index, table.initial_size, imported_table_size);
+ return false;
+ }
+
+ if (table.has_maximum_size) {
+ int64_t imported_maximum_size =
+ table_instance.table_object->maximum_length()->Number();
+ if (imported_maximum_size < 0) {
+ thrower_->LinkError("table import %d has no maximum length, expected %d",
+ import_index, table.maximum_size);
+ return false;
+ }
+ if (imported_maximum_size > table.maximum_size) {
+ thrower_->LinkError("table import %d has a larger maximum size %" PRIx64
+ " than the module's declared maximum %u",
+ import_index, imported_maximum_size,
+ table.maximum_size);
+ return false;
+ }
+ }
+
+ // Allocate a new dispatch table.
+ if (!instance->has_indirect_function_table()) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, imported_table_size);
+ table_instances_[table_index].table_size = imported_table_size;
+ }
+ // Initialize the dispatch table with the (foreign) JS functions
+ // that are already in the table.
+ for (int i = 0; i < imported_table_size; ++i) {
+ Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+ // TODO(mtrofin): this is the same logic as WasmTableObject::Set:
+ // insert in the local table a wrapper from the other module, and add
+ // a reference to the owning instance of the other module.
+ if (!val->IsJSFunction()) continue;
+ if (!WasmExportedFunction::IsWasmExportedFunction(*val)) {
+ thrower_->LinkError("table import %d[%d] is not a wasm function",
+ import_index, i);
+ return false;
+ }
+ auto target_func = Handle<WasmExportedFunction>::cast(val);
+ Handle<WasmInstanceObject> target_instance =
+ handle(target_func->instance(), isolate_);
+ // Look up the signature's canonical id. If there is no canonical
+ // id, then the signature does not appear at all in this module,
+ // so putting {-1} in the table will cause checks to always fail.
+ FunctionSig* sig = target_func->sig();
+ IndirectFunctionTableEntry(instance, i)
+ .Set(module_->signature_map.Find(*sig), target_instance,
+ target_func->function_index());
+ }
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
+ int import_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value) {
+ // Validation should have failed if more than one memory object was
+ // provided.
+ DCHECK(!instance->has_memory_object());
+ if (!value->IsWasmMemoryObject()) {
+ ReportLinkError("memory import must be a WebAssembly.Memory object",
+ import_index, module_name, import_name);
+ return false;
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ instance->set_memory_object(*memory);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ // memory_ should have already been assigned in Build().
+ DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
+ uint32_t imported_cur_pages =
+ static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
+ if (imported_cur_pages < module_->initial_pages) {
+ thrower_->LinkError("memory import %d is smaller than initial %u, got %u",
+ import_index, module_->initial_pages,
+ imported_cur_pages);
+ return false;
+ }
+ int32_t imported_maximum_pages = memory->maximum_pages();
+ if (module_->has_maximum_pages) {
+ if (imported_maximum_pages < 0) {
+ thrower_->LinkError(
+ "memory import %d has no maximum limit, expected at most %u",
+ import_index, imported_maximum_pages);
+ return false;
+ }
+ if (static_cast<uint32_t>(imported_maximum_pages) >
+ module_->maximum_pages) {
+ thrower_->LinkError(
+ "memory import %d has a larger maximum size %u than the "
+ "module's declared maximum %u",
+ import_index, imported_maximum_pages, module_->maximum_pages);
+ return false;
+ }
+ }
+ if (module_->has_shared_memory != buffer->is_shared()) {
+ thrower_->LinkError(
+ "mismatch in shared state of memory, declared = %d, imported = %d",
+ module_->has_shared_memory, buffer->is_shared());
+ return false;
+ }
+
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedWasmGlobalObject(
+ Handle<WasmInstanceObject> instance, int import_index,
+ Handle<String> module_name, Handle<String> import_name,
+ const WasmGlobal& global, Handle<WasmGlobalObject> global_object) {
+ if (global_object->type() != global.type) {
+ ReportLinkError("imported global does not match the expected type",
+ import_index, module_name, import_name);
+ return false;
+ }
+ if (global_object->is_mutable() != global.mutability) {
+ ReportLinkError("imported global does not match the expected mutability",
+ import_index, module_name, import_name);
+ return false;
+ }
+ if (global.mutability) {
+ DCHECK_LT(global.index, module_->num_imported_mutable_globals);
+ Handle<Object> buffer;
+ Address address_or_offset;
+ if (global.type == kWasmAnyRef) {
+ static_assert(sizeof(global_object->offset()) <= sizeof(Address),
+ "The offset into the globals buffer does not fit into "
+ "the imported_mutable_globals array");
+ buffer = handle(global_object->tagged_buffer(), isolate_);
+ // For anyref globals we use a relative offset, not an absolute address.
+ address_or_offset = static_cast<Address>(global_object->offset());
+ } else {
+ buffer = handle(global_object->untagged_buffer(), isolate_);
+ // It is safe in this case to store the raw pointer to the buffer
+ // since the backing store of the JSArrayBuffer will not be
+ // relocated.
+ address_or_offset = reinterpret_cast<Address>(raw_buffer_ptr(
+ Handle<JSArrayBuffer>::cast(buffer), global_object->offset()));
+ }
+ instance->imported_mutable_globals_buffers()->set(global.index, *buffer);
+ instance->imported_mutable_globals()[global.index] = address_or_offset;
+ return true;
+ }
+
+ WriteGlobalValue(global, global_object);
+ return true;
+}
+
+bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
+ int import_index, int global_index,
+ Handle<String> module_name,
+ Handle<String> import_name,
+ Handle<Object> value) {
+ // Immutable global imports are converted to numbers and written into
+ // the {untagged_globals_} array buffer.
+ //
+ // Mutable global imports instead have their backing array buffers
+ // referenced by this instance, and store the address of the imported
+ // global in the {imported_mutable_globals_} array.
+ const WasmGlobal& global = module_->globals[global_index];
+
+ // The mutable-global proposal allows importing i64 values, but only if
+ // they are passed as a WebAssembly.Global object.
+ //
+ // However, the bigint proposal allows importing constant i64 values,
+ // as non WebAssembly.Global object.
+ if (global.type == kWasmI64 && !enabled_.bigint &&
+ !value->IsWasmGlobalObject()) {
+ ReportLinkError("global import cannot have type i64", import_index,
+ module_name, import_name);
+ return false;
+ }
+ if (module_->origin == kAsmJsOrigin) {
+ // Accepting {JSFunction} on top of just primitive values here is a
+ // workaround to support legacy asm.js code with broken binding. Note
+ // that using {NaN} (or Smi::kZero) here is what using the observable
+ // conversion via {ToPrimitive} would produce as well.
+ // TODO(mstarzinger): Still observable if Function.prototype.valueOf
+ // or friends are patched, we might need to check for that as well.
+ if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
+ if (value->IsPrimitive() && !value->IsSymbol()) {
+ if (global.type == kWasmI32) {
+ value = Object::ToInt32(isolate_, value).ToHandleChecked();
+ } else {
+ value = Object::ToNumber(isolate_, value).ToHandleChecked();
+ }
+ }
+ }
+
+ if (value->IsWasmGlobalObject()) {
+ auto global_object = Handle<WasmGlobalObject>::cast(value);
+ return ProcessImportedWasmGlobalObject(instance, import_index, module_name,
+ import_name, global, global_object);
+ }
+
+ if (global.mutability) {
+ ReportLinkError(
+ "imported mutable global must be a WebAssembly.Global object",
+ import_index, module_name, import_name);
+ return false;
+ }
+
+ if (global.type == ValueType::kWasmAnyRef) {
+ WriteGlobalAnyRef(global, value);
+ return true;
+ }
+
+ if (value->IsNumber()) {
+ WriteGlobalValue(global, value->Number());
+ return true;
+ }
+
+ if (enabled_.bigint && global.type == kWasmI64) {
+ Handle<BigInt> bigint;
+
+ if (!BigInt::FromObject(isolate_, value).ToHandle(&bigint)) {
+ return false;
+ }
+ WriteGlobalValue(global, bigint->AsInt64());
+ return true;
+ }
+
+ ReportLinkError("global import must be a number or WebAssembly.Global object",
+ import_index, module_name, import_name);
+ return false;
+}
+
+// Process the imports, including functions, tables, globals, and memory, in
+// order, loading them from the {ffi_} object. Returns the number of imported
+// functions.
+int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
+ int num_imported_functions = 0;
+ int num_imported_tables = 0;
+
+ DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
+ int num_imports = static_cast<int>(module_->import_table.size());
+ for (int index = 0; index < num_imports; ++index) {
+ const WasmImport& import = module_->import_table[index];
+
+ Handle<String> module_name = sanitized_imports_[index].module_name;
+ Handle<String> import_name = sanitized_imports_[index].import_name;
+ Handle<Object> value = sanitized_imports_[index].value;
+
+ switch (import.kind) {
+ case kExternalFunction: {
+ uint32_t func_index = import.index;
+ DCHECK_EQ(num_imported_functions, func_index);
+ if (!ProcessImportedFunction(instance, index, func_index, module_name,
+ import_name, value)) {
+ return -1;
+ }
+ num_imported_functions++;
+ break;
+ }
+ case kExternalTable: {
+ uint32_t table_index = import.index;
+ DCHECK_EQ(table_index, num_imported_tables);
+ if (!ProcessImportedTable(instance, index, table_index, module_name,
+ import_name, value)) {
+ return -1;
+ }
+ num_imported_tables++;
+ break;
+ }
+ case kExternalMemory: {
+ if (!ProcessImportedMemory(instance, index, module_name, import_name,
+ value)) {
+ return -1;
+ }
+ break;
+ }
+ case kExternalGlobal: {
+ if (!ProcessImportedGlobal(instance, index, import.index, module_name,
+ import_name, value)) {
+ return -1;
+ }
+ break;
+ }
+ case kExternalException: {
+ if (!value->IsWasmExceptionObject()) {
+ ReportLinkError("exception import requires a WebAssembly.Exception",
+ index, module_name, import_name);
+ return -1;
+ }
+ Handle<WasmExceptionObject> imported_exception =
+ Handle<WasmExceptionObject>::cast(value);
+ if (!imported_exception->IsSignatureEqual(
+ module_->exceptions[import.index].sig)) {
+ ReportLinkError("imported exception does not match the expected type",
+ index, module_name, import_name);
+ return -1;
+ }
+ Object exception_tag = imported_exception->exception_tag();
+ DCHECK(instance->exceptions_table()->get(import.index)->IsUndefined());
+ instance->exceptions_table()->set(import.index, exception_tag);
+ exception_wrappers_[import.index] = imported_exception;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return num_imported_functions;
+}
+
+template <typename T>
+T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
+ return reinterpret_cast<T*>(raw_buffer_ptr(untagged_globals_, global.offset));
+}
+
+// Process initialization of globals.
+void InstanceBuilder::InitGlobals() {
+ for (auto global : module_->globals) {
+ if (global.mutability && global.imported) {
+ continue;
+ }
+
+ switch (global.init.kind) {
+ case WasmInitExpr::kI32Const:
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
+ global.init.val.i32_const);
+ break;
+ case WasmInitExpr::kI64Const:
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
+ global.init.val.i64_const);
+ break;
+ case WasmInitExpr::kF32Const:
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
+ global.init.val.f32_const);
+ break;
+ case WasmInitExpr::kF64Const:
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
+ global.init.val.f64_const);
+ break;
+ case WasmInitExpr::kAnyRefConst:
+ DCHECK(enabled_.anyref);
+ if (global.imported) break; // We already initialized imported globals.
+
+ tagged_globals_->set(global.offset,
+ ReadOnlyRoots(isolate_).null_value(),
+ SKIP_WRITE_BARRIER);
+ break;
+ case WasmInitExpr::kGlobalIndex: {
+ if (global.type == ValueType::kWasmAnyRef) {
+ DCHECK(enabled_.anyref);
+ int other_offset =
+ module_->globals[global.init.val.global_index].offset;
+
+ tagged_globals_->set(global.offset,
+ tagged_globals_->get(other_offset),
+ SKIP_WRITE_BARRIER);
+ }
+ // Initialize with another global.
+ uint32_t new_offset = global.offset;
+ uint32_t old_offset =
+ module_->globals[global.init.val.global_index].offset;
+ TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
+ size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
+ ? sizeof(double)
+ : sizeof(int32_t);
+ memcpy(raw_buffer_ptr(untagged_globals_, new_offset),
+ raw_buffer_ptr(untagged_globals_, old_offset), size);
+ break;
+ }
+ case WasmInitExpr::kNone:
+ // Happens with imported globals.
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+// Allocate memory for a module instance as a new JSArrayBuffer.
+Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
+ if (num_pages > max_mem_pages()) {
+ thrower_->RangeError("Out of memory: wasm memory too large");
+ return Handle<JSArrayBuffer>::null();
+ }
+ const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
+ SharedFlag shared_flag =
+ is_shared_memory ? SharedFlag::kShared : SharedFlag::kNotShared;
+ Handle<JSArrayBuffer> mem_buffer;
+ if (!NewArrayBuffer(isolate_, num_pages * kWasmPageSize, shared_flag)
+ .ToHandle(&mem_buffer)) {
+ thrower_->RangeError("Out of memory: wasm memory");
+ }
+ return mem_buffer;
+}
+
+bool InstanceBuilder::NeedsWrappers() const {
+ if (module_->num_exported_functions > 0) return true;
+ for (auto& table_instance : table_instances_) {
+ if (!table_instance.js_wrappers.is_null()) return true;
+ }
+ for (auto& table : module_->tables) {
+ if (table.exported) return true;
+ }
+ return false;
+}
+
+// Process the exports, creating wrappers for functions, tables, memories,
+// globals, and exceptions.
+void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
+ isolate_);
+ if (NeedsWrappers()) {
+ // Fill the table to cache the exported JSFunction wrappers.
+ js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
+ Handle<JSFunction>::null());
+
+ // If an imported WebAssembly function gets exported, the exported function
+ // has to be identical to to imported function. Therefore we put all
+ // imported WebAssembly functions into the js_wrappers_ list.
+ for (int index = 0, end = static_cast<int>(module_->import_table.size());
+ index < end; ++index) {
+ const WasmImport& import = module_->import_table[index];
+ if (import.kind == kExternalFunction) {
+ Handle<Object> value = sanitized_imports_[index].value;
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
+ }
+ }
+ }
+ }
+
+ Handle<JSObject> exports_object;
+ bool is_asm_js = false;
+ switch (module_->origin) {
+ case kWasmOrigin: {
+ // Create the "exports" object.
+ exports_object = isolate_->factory()->NewJSObjectWithNullProto();
+ break;
+ }
+ case kAsmJsOrigin: {
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate_->native_context()->object_function(), isolate_);
+ exports_object = isolate_->factory()->NewJSObject(object_function);
+ is_asm_js = true;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ instance->set_exports_object(*exports_object);
+
+ Handle<String> single_function_name =
+ isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
+
+ PropertyDescriptor desc;
+ desc.set_writable(is_asm_js);
+ desc.set_enumerable(true);
+ desc.set_configurable(is_asm_js);
+
+ // Process each export in the export table.
+ int export_index = 0; // Index into {export_wrappers}.
+ for (const WasmExport& exp : module_->export_table) {
+ Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, exp.name)
+ .ToHandleChecked();
+ Handle<JSObject> export_to;
+ if (is_asm_js && exp.kind == kExternalFunction &&
+ String::Equals(isolate_, name, single_function_name)) {
+ export_to = instance;
+ } else {
+ export_to = exports_object;
+ }
+
+ switch (exp.kind) {
+ case kExternalFunction: {
+ // Wrap and export the code as a JSFunction.
+ const WasmFunction& function = module_->functions[exp.index];
+ Handle<JSFunction> js_function = js_wrappers_[exp.index];
+ if (js_function.is_null()) {
+ // Wrap the exported code as a JSFunction.
+ Handle<Code> export_code =
+ export_wrappers->GetValueChecked<Code>(isolate_, export_index);
+ MaybeHandle<String> func_name;
+ if (is_asm_js) {
+ // For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref = module_->LookupFunctionName(
+ ModuleWireBytes(module_object_->native_module()->wire_bytes()),
+ function.func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, func_name_ref)
+ .ToHandleChecked();
+ }
+ js_function = WasmExportedFunction::New(
+ isolate_, instance, func_name, function.func_index,
+ static_cast<int>(function.sig->parameter_count()), export_code);
+ js_wrappers_[exp.index] = js_function;
+ }
+ desc.set_value(js_function);
+ export_index++;
+ break;
+ }
+ case kExternalTable: {
+ // Export a table as a WebAssembly.Table object.
+ TableInstance& table_instance = table_instances_[exp.index];
+ const WasmTable& table = module_->tables[exp.index];
+ if (table_instance.table_object.is_null()) {
+ uint32_t maximum = table.has_maximum_size ? table.maximum_size
+ : FLAG_wasm_max_table_size;
+ table_instance.table_object =
+ WasmTableObject::New(isolate_, table.initial_size, maximum,
+ &table_instance.js_wrappers);
+ }
+ instance->set_table_object(*table_instance.table_object);
+ desc.set_value(table_instance.table_object);
+ break;
+ }
+ case kExternalMemory: {
+ // Export the memory as a WebAssembly.Memory object. A WasmMemoryObject
+ // should already be available if the module has memory, since we always
+ // create or import it when building an WasmInstanceObject.
+ DCHECK(instance->has_memory_object());
+ desc.set_value(
+ Handle<WasmMemoryObject>(instance->memory_object(), isolate_));
+ break;
+ }
+ case kExternalGlobal: {
+ const WasmGlobal& global = module_->globals[exp.index];
+ Handle<JSArrayBuffer> untagged_buffer;
+ Handle<FixedArray> tagged_buffer;
+ uint32_t offset;
+
+ if (global.mutability && global.imported) {
+ Handle<FixedArray> buffers_array(
+ instance->imported_mutable_globals_buffers(), isolate_);
+ if (global.type == kWasmAnyRef) {
+ tagged_buffer = buffers_array->GetValueChecked<FixedArray>(
+ isolate_, global.index);
+ // For anyref globals we store the relative offset in the
+ // imported_mutable_globals array instead of an absolute address.
+ Address addr = instance->imported_mutable_globals()[global.index];
+ DCHECK_LE(addr, static_cast<Address>(
+ std::numeric_limits<uint32_t>::max()));
+ offset = static_cast<uint32_t>(addr);
+ } else {
+ untagged_buffer = buffers_array->GetValueChecked<JSArrayBuffer>(
+ isolate_, global.index);
+ Address global_addr =
+ instance->imported_mutable_globals()[global.index];
+
+ size_t buffer_size = untagged_buffer->byte_length();
+ Address backing_store =
+ reinterpret_cast<Address>(untagged_buffer->backing_store());
+ CHECK(global_addr >= backing_store &&
+ global_addr < backing_store + buffer_size);
+ offset = static_cast<uint32_t>(global_addr - backing_store);
+ }
+ } else {
+ if (global.type == kWasmAnyRef) {
+ tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
+ } else {
+ untagged_buffer =
+ handle(instance->untagged_globals_buffer(), isolate_);
+ }
+ offset = global.offset;
+ }
+
+ // Since the global's array untagged_buffer is always provided,
+ // allocation should never fail.
+ Handle<WasmGlobalObject> global_obj =
+ WasmGlobalObject::New(isolate_, untagged_buffer, tagged_buffer,
+ global.type, offset, global.mutability)
+ .ToHandleChecked();
+ desc.set_value(global_obj);
+ break;
+ }
+ case kExternalException: {
+ const WasmException& exception = module_->exceptions[exp.index];
+ Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
+ if (wrapper.is_null()) {
+ Handle<HeapObject> exception_tag(
+ HeapObject::cast(instance->exceptions_table()->get(exp.index)),
+ isolate_);
+ wrapper =
+ WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
+ exception_wrappers_[exp.index] = wrapper;
+ }
+ desc.set_value(wrapper);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate_, export_to, name, &desc, kThrowOnError);
+ if (!status.IsJust()) {
+ DisallowHeapAllocation no_gc;
+ TruncatedUserString<> trunc_name(name->GetCharVector<uint8_t>(no_gc));
+ thrower_->LinkError("export of %.*s failed.", trunc_name.length(),
+ trunc_name.start());
+ return;
+ }
+ }
+ DCHECK_EQ(export_index, export_wrappers->length());
+
+ if (module_->origin == kWasmOrigin) {
+ v8::Maybe<bool> success =
+ JSReceiver::SetIntegrityLevel(exports_object, FROZEN, kDontThrow);
+ DCHECK(success.FromMaybe(false));
+ USE(success);
+ }
+}
+
+void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
+ size_t table_count = module_->tables.size();
+ for (size_t index = 0; index < table_count; ++index) {
+ const WasmTable& table = module_->tables[index];
+ TableInstance& table_instance = table_instances_[index];
+
+ if (!instance->has_indirect_function_table() &&
+ table.type == kWasmAnyFunc) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table.initial_size);
+ table_instance.table_size = table.initial_size;
+ }
+ }
+}
+
+void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
+ NativeModule* native_module = module_object_->native_module();
+ for (auto& elem_segment : module_->elem_segments) {
+ // Passive segments are not copied during instantiation.
+ if (!elem_segment.active) continue;
+
+ uint32_t base = EvalUint32InitExpr(elem_segment.offset);
+ uint32_t num_entries = static_cast<uint32_t>(elem_segment.entries.size());
+ uint32_t index = elem_segment.table_index;
+ TableInstance& table_instance = table_instances_[index];
+ DCHECK(IsInBounds(base, num_entries, table_instance.table_size));
+ for (uint32_t i = 0; i < num_entries; ++i) {
+ uint32_t func_index = elem_segment.entries[i];
+ const WasmFunction* function = &module_->functions[func_index];
+ int table_index = static_cast<int>(i + base);
+
+ // Update the local dispatch table first.
+ uint32_t sig_id = module_->signature_ids[function->sig_index];
+ IndirectFunctionTableEntry(instance, table_index)
+ .Set(sig_id, instance, func_index);
+
+ if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
+ if (js_wrappers_[func_index].is_null()) {
+ // No JSFunction entry yet exists for this function. Create one.
+ // TODO(titzer): We compile JS->wasm wrappers for functions are
+ // not exported but are in an exported table. This should be done
+ // at module compile time and cached instead.
+
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache_.GetOrCompileJSToWasmWrapper(
+ isolate_, function->sig, function->imported);
+ MaybeHandle<String> func_name;
+ if (module_->origin == kAsmJsOrigin) {
+ // For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref = module_->LookupFunctionName(
+ ModuleWireBytes(native_module->wire_bytes()), func_index);
+ func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate_, module_object_, func_name_ref)
+ .ToHandleChecked();
+ }
+ Handle<WasmExportedFunction> js_function = WasmExportedFunction::New(
+ isolate_, instance, func_name, func_index,
+ static_cast<int>(function->sig->parameter_count()), wrapper_code);
+ js_wrappers_[func_index] = js_function;
+ }
+ table_instance.js_wrappers->set(table_index, *js_wrappers_[func_index]);
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_instance.table_object, table_index, function->sig,
+ instance, func_index);
+ }
+ }
+ }
+
+ int table_count = static_cast<int>(module_->tables.size());
+ for (int index = 0; index < table_count; ++index) {
+ TableInstance& table_instance = table_instances_[index];
+
+ // Add the new dispatch table at the end to avoid redundant lookups.
+ if (!table_instance.table_object.is_null()) {
+ WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
+ instance, index);
+ }
+ }
+}
+
+void InstanceBuilder::InitializeExceptions(
+ Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
+ for (int index = 0; index < exceptions_table->length(); ++index) {
+ if (!exceptions_table->get(index)->IsUndefined(isolate_)) continue;
+ Handle<WasmExceptionTag> exception_tag =
+ WasmExceptionTag::New(isolate_, index);
+ exceptions_table->set(index, *exception_tag);
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#undef TRACE
diff --git a/chromium/v8/src/wasm/module-instantiate.h b/chromium/v8/src/wasm/module-instantiate.h
new file mode 100644
index 00000000000..15393969b92
--- /dev/null
+++ b/chromium/v8/src/wasm/module-instantiate.h
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_INSTANTIATE_H_
+#define V8_WASM_MODULE_INSTANTIATE_H_
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class JSArrayBuffer;
+class JSReceiver;
+class WasmModuleObject;
+class WasmInstanceObject;
+
+template <typename T>
+class Handle;
+template <typename T>
+class MaybeHandle;
+
+namespace wasm {
+
+class ErrorThrower;
+
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_INSTANTIATE_H_
diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc
index ca49655e96b..1896178c485 100644
--- a/chromium/v8/src/wasm/streaming-decoder.cc
+++ b/chromium/v8/src/wasm/streaming-decoder.cc
@@ -119,6 +119,44 @@ bool StreamingDecoder::SetCompiledModuleBytes(
return true;
}
+namespace {
+
+class TopTierCompiledCallback {
+ public:
+ TopTierCompiledCallback(std::shared_ptr<NativeModule> native_module,
+ StreamingDecoder::ModuleCompiledCallback callback)
+ : native_module_(std::move(native_module)),
+ callback_(std::move(callback)) {}
+
+ void operator()(CompilationEvent event, const WasmError* error) const {
+ if (event != CompilationEvent::kFinishedTopTierCompilation) return;
+ DCHECK_NULL(error);
+ callback_(native_module_);
+#ifdef DEBUG
+ DCHECK(!called_);
+ called_ = true;
+#endif
+ }
+
+ private:
+ const std::shared_ptr<NativeModule> native_module_;
+ const StreamingDecoder::ModuleCompiledCallback callback_;
+#ifdef DEBUG
+ mutable bool called_ = false;
+#endif
+};
+
+} // namespace
+
+void StreamingDecoder::NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module) {
+ if (!module_compiled_callback_) return;
+ auto* comp_state = native_module->compilation_state();
+ comp_state->AddCallback(TopTierCompiledCallback{
+ std::move(native_module), std::move(module_compiled_callback_)});
+ module_compiled_callback_ = {};
+}
+
// An abstract class to share code among the states which decode VarInts. This
// class takes over the decoding of the VarInt and then calls the actual decode
// code with the decoded value.
@@ -288,7 +326,7 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
if (decoder.failed()) {
if (new_bytes == remaining_buf.size()) {
// We only report an error if we read all bytes.
- streaming->Error(decoder.toResult(nullptr));
+ streaming->Error(decoder.error());
}
set_offset(offset() + new_bytes);
return new_bytes;
diff --git a/chromium/v8/src/wasm/streaming-decoder.h b/chromium/v8/src/wasm/streaming-decoder.h
index f3a57eeb0ba..d4e3ff7d149 100644
--- a/chromium/v8/src/wasm/streaming-decoder.h
+++ b/chromium/v8/src/wasm/streaming-decoder.h
@@ -16,12 +16,8 @@
namespace v8 {
namespace internal {
-
-template <typename T>
-class Handle;
-class WasmModuleObject;
-
namespace wasm {
+class NativeModule;
// This class is an interface for the StreamingDecoder to start the processing
// of the incoming module bytes.
@@ -55,7 +51,7 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// empty array is passed.
virtual void OnFinishedStream(OwnedVector<uint8_t> bytes) = 0;
// Report an error detected in the StreamingDecoder.
- virtual void OnError(VoidResult result) = 0;
+ virtual void OnError(const WasmError&) = 0;
// Report the abortion of the stream.
virtual void OnAbort() = 0;
@@ -84,15 +80,14 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
// Caching support.
// Sets the callback that is called after the module is fully compiled.
- using ModuleCompiledCallback = std::function<void(Handle<WasmModuleObject>)>;
+ using ModuleCompiledCallback =
+ std::function<void(const std::shared_ptr<NativeModule>&)>;
void SetModuleCompiledCallback(ModuleCompiledCallback callback);
// Passes previously compiled module bytes from the embedder's cache.
bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes);
- // The callback is stored on the StreamingDecoder so it can be called by the
- // AsyncCompileJob.
- ModuleCompiledCallback module_compiled_callback() const {
- return module_compiled_callback_;
- }
+
+ void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module);
private:
// TODO(ahaas): Put the whole private state of the StreamingDecoder into the
@@ -207,14 +202,14 @@ class V8_EXPORT_PRIVATE StreamingDecoder {
size_t length,
Vector<const uint8_t> length_bytes);
- std::unique_ptr<DecodingState> Error(VoidResult result) {
- if (ok()) processor_->OnError(std::move(result));
+ std::unique_ptr<DecodingState> Error(const WasmError& error) {
+ if (ok()) processor_->OnError(error);
Fail();
return std::unique_ptr<DecodingState>(nullptr);
}
std::unique_ptr<DecodingState> Error(std::string message) {
- return Error(VoidResult::Error(module_offset_ - 1, std::move(message)));
+ return Error(WasmError{module_offset_ - 1, std::move(message)});
}
void ProcessModuleHeader() {
diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h
index e5b3d2c2ab6..02e9c79bd26 100644
--- a/chromium/v8/src/wasm/value-type.h
+++ b/chromium/v8/src/wasm/value-type.h
@@ -269,6 +269,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineRepresentation::kFloat64;
case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef:
return MachineRepresentation::kTaggedPointer;
case kWasmS128:
return MachineRepresentation::kSimd128;
@@ -312,6 +314,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 'd';
case kWasmAnyRef:
return 'r';
+ case kWasmAnyFunc:
+ return 'a';
case kWasmS128:
return 's';
case kWasmStmt:
@@ -334,7 +338,11 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return "f64";
case kWasmAnyRef:
- return "ref";
+ return "anyref";
+ case kWasmAnyFunc:
+ return "anyfunc";
+ case kWasmExceptRef:
+ return "exn";
case kWasmS128:
return "s128";
case kWasmStmt:
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index bcb9f07c08f..f55508c7a64 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -10,12 +10,13 @@
#include "src/base/adapters.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/codegen.h"
#include "src/disassembler.h"
#include "src/globals.h"
+#include "src/log.h"
#include "src/macro-assembler-inl.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
@@ -105,13 +106,20 @@ base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
Address WasmCode::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
- if (constant_pool_offset_ < instructions().size()) {
+ if (constant_pool_offset_ < code_comments_offset_) {
return instruction_start() + constant_pool_offset_;
}
}
return kNullAddress;
}
+Address WasmCode::code_comments() const {
+ if (code_comments_offset_ < unpadded_binary_size_) {
+ return instruction_start() + code_comments_offset_;
+ }
+ return kNullAddress;
+}
+
size_t WasmCode::trap_handler_index() const {
CHECK(HasTrapHandlerIndex());
return static_cast<size_t>(trap_handler_index_);
@@ -227,7 +235,6 @@ void WasmCode::Validate() const {
break;
}
case RelocInfo::EXTERNAL_REFERENCE:
- case RelocInfo::COMMENT:
case RelocInfo::CONST_POOL:
case RelocInfo::VENEER_POOL:
// These are OK to appear.
@@ -239,6 +246,14 @@ void WasmCode::Validate() const {
#endif
}
+void WasmCode::MaybePrint(const char* name) const {
+ // Determines whether flags want this code to be printed.
+ if ((FLAG_print_wasm_code && kind() == kFunction) ||
+ (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
+ Print(name);
+ }
+}
+
void WasmCode::Print(const char* name) const {
StdoutStream os;
os << "--- WebAssembly code ---\n";
@@ -252,12 +267,13 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
- size_t body_size = instructions().size();
- os << "Body (size = " << body_size << ")\n";
+ size_t padding = instructions().size() - unpadded_binary_size_;
+ os << "Body (size = " << instructions().size() << " = "
+ << unpadded_binary_size_ << " + " << padding << " padding)\n";
#ifdef ENABLE_DISASSEMBLER
- size_t instruction_size = body_size;
- if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
+ size_t instruction_size = unpadded_binary_size_;
+ if (constant_pool_offset_ < instruction_size) {
instruction_size = constant_pool_offset_;
}
if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
@@ -301,12 +317,40 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
}
+ if (safepoint_table_offset_ > 0) {
+ SafepointTable table(instruction_start(), safepoint_table_offset_,
+ stack_slots_);
+ os << "Safepoints (size = " << table.size() << ")\n";
+ for (uint32_t i = 0; i < table.length(); i++) {
+ uintptr_t pc_offset = table.GetPcOffset(i);
+ os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
+ os << std::setw(6) << std::hex << pc_offset << " " << std::dec;
+ table.PrintEntry(i, os);
+ os << " (sp -> fp)";
+ SafepointEntry entry = table.GetEntry(i);
+ if (entry.trampoline_pc() != -1) {
+ os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
+ }
+ if (entry.has_deoptimization_index()) {
+ os << " deopt: " << std::setw(6) << entry.deoptimization_index();
+ }
+ os << "\n";
+ }
+ os << "\n";
+ }
+
os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
for (RelocIterator it(instructions(), reloc_info(), constant_pool());
!it.done(); it.next()) {
it.rinfo()->Print(nullptr, os);
}
os << "\n";
+
+ if (code_comments_offset() < unpadded_binary_size_) {
+ Address code_comments = reinterpret_cast<Address>(instructions().start() +
+ code_comments_offset());
+ PrintCodeCommentsSection(os, code_comments);
+ }
#endif // ENABLE_DISASSEMBLER
}
@@ -386,13 +430,15 @@ void NativeModule::LogWasmCodes(Isolate* isolate) {
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- return {module(), use_trap_handler_, kRuntimeExceptionSupport};
+ return {module(), use_trap_handler_, kRuntimeExceptionSupport,
+ enabled_features_};
}
WasmCode* NativeModule::AddOwnedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
@@ -404,11 +450,11 @@ WasmCode* NativeModule::AddOwnedCode(
base::MutexGuard lock(&allocation_mutex_);
Vector<byte> executable_buffer = AllocateForCode(instructions.size());
// Ownership will be transferred to {owned_code_} below.
- code = new WasmCode(this, index, executable_buffer, stack_slots,
- safepoint_table_offset, handler_table_offset,
- constant_pool_offset, std::move(protected_instructions),
- std::move(reloc_info), std::move(source_position_table),
- kind, tier);
+ code = new WasmCode(
+ this, index, executable_buffer, stack_slots, safepoint_table_offset,
+ handler_table_offset, constant_pool_offset, code_comments_offset,
+ unpadded_binary_size, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_position_table), kind, tier);
if (owned_code_.empty() ||
code->instruction_start() > owned_code_.back()->instruction_start()) {
@@ -492,6 +538,8 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
safepoint_table_offset, // safepoint_table_offset
code->handler_table_offset(), // handler_table_offset
code->constant_pool_offset(), // constant_pool_offset
+ code->code_comments_offset(), // code_comments_offset
+ instructions.size(), // unpadded_binary_size
{}, // protected_instructions
std::move(reloc_info), // reloc_info
std::move(source_pos), // source positions
@@ -523,7 +571,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
- if (FLAG_print_code || FLAG_print_wasm_code) ret->Print(name);
+ ret->MaybePrint(name);
ret->Validate();
return ret;
}
@@ -537,12 +585,13 @@ WasmCode* NativeModule::AddCode(
OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
- WasmCode* ret =
- AddOwnedCode(index, {desc.buffer, static_cast<size_t>(desc.instr_size)},
- stack_slots, safepoint_table_offset, handler_table_offset,
- desc.instr_size - desc.constant_pool_size,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos_table), kind, tier);
+
+ WasmCode* ret = AddOwnedCode(
+ index, {desc.buffer, static_cast<size_t>(desc.instr_size)}, stack_slots,
+ safepoint_table_offset, handler_table_offset, desc.constant_pool_offset(),
+ desc.code_comments_offset(), desc.instr_size,
+ std::move(protected_instructions), std::move(reloc_info),
+ std::move(source_pos_table), kind, tier);
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = ret->instructions().start() - desc.buffer;
@@ -573,7 +622,7 @@ WasmCode* NativeModule::AddCode(
// made while iterating over the RelocInfo above.
Assembler::FlushICache(ret->instructions().start(),
ret->instructions().size());
- if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
+ ret->MaybePrint();
ret->Validate();
return ret;
}
@@ -581,13 +630,15 @@ WasmCode* NativeModule::AddCode(
WasmCode* NativeModule::AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
WasmCode* code =
AddOwnedCode(index, instructions, stack_slots, safepoint_table_offset,
handler_table_offset, constant_pool_offset,
+ code_comments_offset, unpadded_binary_size,
std::move(protected_instructions), std::move(reloc_info),
std::move(source_position_table), WasmCode::kFunction, tier);
@@ -638,9 +689,11 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
instructions.as_vector(), // instructions
0, // stack_slots
- 0, // safepoint_table_offset
- 0, // handler_table_offset
- 0, // constant_pool_offset
+ instructions.size(), // safepoint_table_offset
+ instructions.size(), // handler_table_offset
+ instructions.size(), // constant_pool_offset
+ instructions.size(), // code_comments_offset
+ instructions.size(), // unpadded_binary_size
{}, // protected_instructions
{}, // reloc_info
{}, // source_pos
@@ -749,24 +802,27 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {
public:
- explicit NativeModuleWireBytesStorage(NativeModule* native_module)
- : native_module_(native_module) {}
+ explicit NativeModuleWireBytesStorage(
+ std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
+ : wire_bytes_(std::move(wire_bytes)) {}
Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
- return native_module_->wire_bytes().SubVector(ref.offset(),
- ref.end_offset());
+ return wire_bytes_->as_vector().SubVector(ref.offset(), ref.end_offset());
}
private:
- NativeModule* const native_module_;
+ const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
};
} // namespace
-void NativeModule::SetWireBytes(OwnedVector<const byte> wire_bytes) {
- wire_bytes_ = std::move(wire_bytes);
- if (!wire_bytes.is_empty()) {
+void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
+ auto shared_wire_bytes =
+ std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
+ wire_bytes_ = shared_wire_bytes;
+ if (!shared_wire_bytes->is_empty()) {
compilation_state_->SetWireBytesStorage(
- std::make_shared<NativeModuleWireBytesStorage>(this));
+ std::make_shared<NativeModuleWireBytesStorage>(
+ std::move(shared_wire_bytes)));
}
}
@@ -830,7 +886,8 @@ NativeModule::~NativeModule() {
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed)
: memory_tracker_(memory_tracker),
- remaining_uncommitted_code_space_(max_committed) {
+ remaining_uncommitted_code_space_(max_committed),
+ critical_uncommitted_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
}
@@ -842,8 +899,8 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
// Reserve the size. Use CAS loop to avoid underflow on
// {remaining_uncommitted_}. Temporary underflow would allow concurrent
// threads to over-commit.
+ size_t old_value = remaining_uncommitted_code_space_.load();
while (true) {
- size_t old_value = remaining_uncommitted_code_space_.load();
if (old_value < size) return false;
if (remaining_uncommitted_code_space_.compare_exchange_weak(
old_value, old_value - size)) {
@@ -920,6 +977,7 @@ void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
remaining_uncommitted_code_space_.store(limit);
+ critical_uncommitted_code_space_.store(limit / 2);
}
namespace {
@@ -943,7 +1001,7 @@ size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
constexpr size_t kCodeOverhead = 32; // for prologue, stack check, ...
constexpr size_t kStaticCodeSize = 512; // runtime stubs, ...
- constexpr size_t kImportSize = 64 * kPointerSize;
+ constexpr size_t kImportSize = 64 * kSystemPointerSize;
size_t estimate = kStaticCodeSize;
for (auto& function : module->functions) {
@@ -972,30 +1030,22 @@ size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
return wasm_module_estimate + native_module_estimate;
}
-bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
- base::MutexGuard lock(&native_modules_mutex_);
- // TODO(titzer): we force a critical memory pressure notification
- // when the code space is almost exhausted, but only upon the next module
- // creation. This is only for one isolate, and it should really do this for
- // all isolates, at the point of commit.
- constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
- return native_modules_.size() > 1 &&
- remaining_uncommitted_code_space_.load() < kCriticalThreshold;
-}
-
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
bool can_request_more, std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
- if (ShouldForceCriticalMemoryPressureNotification()) {
+ if (remaining_uncommitted_code_space_.load() <
+ critical_uncommitted_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical);
+ critical_uncommitted_code_space_.store(
+ remaining_uncommitted_code_space_.load() / 2);
}
// If the code must be contiguous, reserve enough address space up front.
size_t code_vmem_size =
kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
- // Try up to three times; getting rid of dead JSArrayBuffer allocations might
+ // Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
static constexpr int kAllocationRetries = 2;
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 7220d65f5ff..4247350cebf 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -45,8 +45,9 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
- DisjointAllocationPool(DisjointAllocationPool&& other) = default;
- DisjointAllocationPool& operator=(DisjointAllocationPool&& other) = default;
+ DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
+ DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
+ V8_NOEXCEPT = default;
// Merge the parameter region into this object while preserving ordering of
// the regions. The assumption is that the passed parameter is not
@@ -64,7 +65,7 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
private:
std::list<base::AddressRegion> regions_;
- DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
+ DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
class V8_EXPORT_PRIVATE WasmCode final {
@@ -113,9 +114,12 @@ class V8_EXPORT_PRIVATE WasmCode final {
NativeModule* native_module() const { return native_module_; }
Tier tier() const { return tier_; }
Address constant_pool() const;
+ Address code_comments() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
size_t handler_table_offset() const { return handler_table_offset_; }
+ size_t code_comments_offset() const { return code_comments_offset_; }
+ size_t unpadded_binary_size() const { return unpadded_binary_size_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return tier_ == kLiftoff; }
bool contains(Address pc) const {
@@ -132,6 +136,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Validate() const;
void Print(const char* name = nullptr) const;
+ void MaybePrint(const char* name = nullptr) const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
@@ -151,7 +156,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
WasmCode(NativeModule* native_module, uint32_t index,
Vector<byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
@@ -166,11 +172,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
+ code_comments_offset_(code_comments_offset),
+ unpadded_binary_size_(unpadded_binary_size),
protected_instructions_(std::move(protected_instructions)),
tier_(tier) {
- DCHECK_LE(safepoint_table_offset, instructions.size());
- DCHECK_LE(constant_pool_offset, instructions.size());
- DCHECK_LE(handler_table_offset, instructions.size());
+ DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
+ DCHECK_LE(handler_table_offset, unpadded_binary_size);
+ DCHECK_LE(code_comments_offset, unpadded_binary_size);
+ DCHECK_LE(constant_pool_offset, unpadded_binary_size);
}
// Code objects that have been registered with the global trap handler within
@@ -196,6 +205,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
// conversions.
size_t safepoint_table_offset_ = 0;
size_t handler_table_offset_ = 0;
+ size_t code_comments_offset_ = 0;
+ size_t unpadded_binary_size_ = 0;
intptr_t trap_handler_index_ = -1;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
Tier tier_;
@@ -226,7 +237,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
size_t safepoint_table_offset, size_t handler_table_offset,
- size_t constant_pool_offset,
+ size_t constant_pool_offset, size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>
protected_instructions,
OwnedVector<const byte> reloc_info,
@@ -327,11 +339,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- Vector<const byte> wire_bytes() const { return wire_bytes_.as_vector(); }
+ Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
size_t committed_code_space() const { return committed_code_space_.load(); }
- void SetWireBytes(OwnedVector<const byte> wire_bytes);
+ void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
WasmCode* Lookup(Address) const;
@@ -366,6 +378,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
size_t constant_pool_offset,
+ size_t code_comments_offset,
+ size_t unpadded_binary_size,
OwnedVector<trap_handler::ProtectedInstructionData>,
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table,
@@ -413,7 +427,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
- OwnedVector<const byte> wire_bytes_;
+ // Wire bytes, held in a shared_ptr so they can be kept alive by the
+ // {WireBytesStorage}, held by background compile tasks.
+ std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
@@ -507,10 +523,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void FreeNativeModule(NativeModule*);
void AssignRanges(Address start, Address end, NativeModule*);
void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
- bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_;
+ // If the remaining uncommitted code space falls below
+ // {critical_uncommitted_code_space_}, then we trigger a GC before creating
+ // the next module. This value is initialized to 50% of the available code
+ // space on creation and after each GC.
+ std::atomic<size_t> critical_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index 49525d9143f..668b08eba93 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -71,14 +71,19 @@ enum SectionCode : int8_t {
kElementSectionCode = 9, // Elements section
kCodeSectionCode = 10, // Function code
kDataSectionCode = 11, // Data segments
- kNameSectionCode = 12, // Name section (encoded as a string)
- kExceptionSectionCode = 13, // Exception section
- kSourceMappingURLSectionCode = 14, // Source Map URL section
+ kExceptionSectionCode = 12, // Exception section
+ kDataCountSectionCode = 13, // Number of data segments
+
+ // The following sections are custom sections, and are identified using a
+ // string rather than an integer. Their enumeration values are not guaranteed
+ // to be consistent.
+ kNameSectionCode, // Name section (encoded as a string)
+ kSourceMappingURLSectionCode, // Source Map URL section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
kLastKnownModuleSection = kSourceMappingURLSectionCode,
- kFirstUnorderedSection = kNameSectionCode,
+ kFirstUnorderedSection = kExceptionSectionCode,
};
// Binary encoding of name section kinds.
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index 1718b615bf8..98619b5c14f 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -31,9 +31,9 @@ namespace {
template <bool internal, typename... Args>
Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
Args... args) {
- // Maximum length of a formatted value name ("param#%d", "local#%d",
- // "global#%d").
- constexpr int kMaxStrLen = 18;
+ // Maximum length of a formatted value name ("arg#%d", "local#%d",
+ // "global#%d", i32 constants, i64 constants), including null character.
+ static constexpr int kMaxStrLen = 21;
EmbeddedVector<char, kMaxStrLen> value;
int len = SNPrintF(value, format, args...);
CHECK(len > 0 && len < value.length());
@@ -49,11 +49,13 @@ Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
if (Smi::IsValid(value.to<int32_t>()))
return handle(Smi::FromInt(value.to<int32_t>()), isolate);
return PrintFToOneByteString<false>(isolate, "%d", value.to<int32_t>());
- case kWasmI64:
- if (Smi::IsValid(value.to<int64_t>()))
- return handle(Smi::FromIntptr(value.to<int64_t>()), isolate);
- return PrintFToOneByteString<false>(isolate, "%" PRId64,
- value.to<int64_t>());
+ case kWasmI64: {
+ int64_t i64 = value.to<int64_t>();
+ int32_t i32 = static_cast<int32_t>(i64);
+ if (i32 == i64 && Smi::IsValid(i32))
+ return handle(Smi::FromIntptr(i32), isolate);
+ return PrintFToOneByteString<false>(isolate, "%" PRId64, i64);
+ }
case kWasmF32:
return isolate->factory()->NewNumber(value.to<float>());
case kWasmF64:
@@ -129,12 +131,12 @@ class InterpreterHandle {
return {frame_base, frame_limit};
}
- static Vector<const byte> GetBytes(WasmDebugInfo* debug_info) {
+ static ModuleWireBytes GetBytes(WasmDebugInfo debug_info) {
// Return raw pointer into heap. The WasmInterpreter will make its own copy
// of this data anyway, and there is no heap allocation in-between.
NativeModule* native_module =
debug_info->wasm_instance()->module_object()->native_module();
- return native_module->wire_bytes();
+ return ModuleWireBytes{native_module->wire_bytes()};
}
public:
@@ -412,7 +414,7 @@ class InterpreterHandle {
isolate_->factory()->NewJSObjectWithNullProto();
if (instance->has_memory_object()) {
Handle<String> name = isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("memory"));
+ StaticCharVector("memory"));
Handle<JSArrayBuffer> memory_buffer(
instance->memory_object()->array_buffer(), isolate_);
Handle<JSTypedArray> uint8_array = isolate_->factory()->NewJSTypedArray(
@@ -439,7 +441,7 @@ class InterpreterHandle {
isolate_->factory()->NewJSObjectWithNullProto();
Handle<String> locals_name =
isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("locals"));
+ StaticCharVector("locals"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, locals_name,
locals_obj, NONE)
.Assert();
@@ -468,7 +470,7 @@ class InterpreterHandle {
Handle<JSObject> stack_obj =
isolate_->factory()->NewJSObjectWithNullProto();
Handle<String> stack_name = isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("stack"));
+ StaticCharVector("stack"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, stack_name,
stack_obj, NONE)
.Assert();
@@ -539,14 +541,14 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
return Handle<Managed<wasm::InterpreterHandle>>::cast(handle)->raw();
}
-wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->interpreter_handle();
+wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo debug_info) {
+ Object handle_obj = debug_info->interpreter_handle();
DCHECK(!handle_obj->IsUndefined());
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
}
-wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->interpreter_handle();
+wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) {
+ Object handle_obj = debug_info->interpreter_handle();
if (handle_obj->IsUndefined()) return nullptr;
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
}
@@ -627,7 +629,8 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
wasm::WasmCode* wasm_new_code = compiler::CompileWasmInterpreterEntry(
- isolate, native_module, func_index, module->functions[func_index].sig);
+ isolate->wasm_engine(), native_module, func_index,
+ module->functions[func_index].sig);
native_module->PublishInterpreterEntry(wasm_new_code, func_index);
Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
wasm_new_code->instruction_start(), TENURED);
@@ -636,7 +639,7 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
- GetInterpreterHandle(this)->PrepareStep(step_action);
+ GetInterpreterHandle(*this)->PrepareStep(step_action);
}
// static
@@ -653,20 +656,20 @@ bool WasmDebugInfo::RunInterpreter(Isolate* isolate,
std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
Address frame_pointer) {
- return GetInterpreterHandle(this)->GetInterpretedStack(frame_pointer);
+ return GetInterpreterHandle(*this)->GetInterpretedStack(frame_pointer);
}
wasm::WasmInterpreter::FramePtr WasmDebugInfo::GetInterpretedFrame(
Address frame_pointer, int idx) {
- return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
+ return GetInterpreterHandle(*this)->GetInterpretedFrame(frame_pointer, idx);
}
void WasmDebugInfo::Unwind(Address frame_pointer) {
- return GetInterpreterHandle(this)->Unwind(frame_pointer);
+ return GetInterpreterHandle(*this)->Unwind(frame_pointer);
}
uint64_t WasmDebugInfo::NumInterpretedCalls() {
- auto* handle = GetInterpreterHandleOrNull(this);
+ auto* handle = GetInterpreterHandleOrNull(*this);
return handle ? handle->NumInterpretedCalls() : 0;
}
@@ -727,7 +730,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
function_data->set_jump_table_offset(-1);
function_data->set_function_index(-1);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("c-wasm-entry"));
+ StaticCharVector("c-wasm-entry"));
NewFunctionArgs args = NewFunctionArgs::ForWasm(
name, function_data, isolate->sloppy_function_map());
Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index 91d916d4db6..d948157a128 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -7,10 +7,13 @@
#include "src/code-tracer.h"
#include "src/compilation-statistics.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
+#include "src/ostreams.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/module-instantiate.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -19,7 +22,7 @@ namespace internal {
namespace wasm {
WasmEngine::WasmEngine()
- : code_manager_(&memory_tracker_, kMaxWasmCodeMemory) {}
+ : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
// All AsyncCompileJobs have been canceled.
@@ -91,7 +94,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), allocator());
if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
+ thrower->CompileFailed("Wasm decoding failed", result.error());
return {};
}
@@ -226,30 +229,30 @@ std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
return job->CreateStreamingDecoder();
}
-bool WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
+void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier) {
// Note we assume that "one-off" compilations can discard detected features.
WasmFeatures detected = kNoWasmFeatures;
- return WasmCompilationUnit::CompileWasmFunction(
+ WasmCompilationUnit::CompileWasmFunction(
isolate, native_module, &detected,
&native_module->module()->functions[function_index], tier);
}
std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
Handle<WasmModuleObject> module_object) {
- return module_object->managed_native_module()->get();
+ return module_object->shared_native_module();
}
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_module) {
- Vector<const byte> wire_bytes = shared_module->wire_bytes();
+ ModuleWireBytes wire_bytes(shared_module->wire_bytes());
const WasmModule* module = shared_module->module();
Handle<Script> script =
CreateWasmScript(isolate, wire_bytes, module->source_map_url);
size_t code_size = shared_module->committed_code_space();
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
isolate, std::move(shared_module), script, code_size);
- CompileJsToWasmWrappers(isolate, module_object->native_module(),
+ CompileJsToWasmWrappers(isolate, module_object->native_module()->module(),
handle(module_object->export_wrappers(), isolate));
return module_object;
}
@@ -335,38 +338,27 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
namespace {
-struct WasmEnginePointerConstructTrait final {
- static void Construct(void* raw_ptr) {
- auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr);
- *engine_ptr = std::shared_ptr<WasmEngine>();
- }
-};
-
-// Holds the global shared pointer to the single {WasmEngine} that is intended
-// to be shared among Isolates within the same process. The {LazyStaticInstance}
-// here is required because {std::shared_ptr} has a non-trivial initializer.
-base::LazyStaticInstance<std::shared_ptr<WasmEngine>,
- WasmEnginePointerConstructTrait>::type
- global_wasm_engine;
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
+ GetSharedWasmEngine);
} // namespace
// static
void WasmEngine::InitializeOncePerProcess() {
if (!FLAG_wasm_shared_engine) return;
- global_wasm_engine.Pointer()->reset(new WasmEngine());
+ *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
}
// static
void WasmEngine::GlobalTearDown() {
if (!FLAG_wasm_shared_engine) return;
- global_wasm_engine.Pointer()->reset();
+ GetSharedWasmEngine()->reset();
}
// static
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
- if (FLAG_wasm_shared_engine) return global_wasm_engine.Get();
- return std::shared_ptr<WasmEngine>(new WasmEngine());
+ if (FLAG_wasm_shared_engine) return *GetSharedWasmEngine();
+ return std::make_shared<WasmEngine>();
}
// {max_mem_pages} is declared in wasm-limits.h.
diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h
index 426ec8655ef..4aa9331268c 100644
--- a/chromium/v8/src/wasm/wasm-engine.h
+++ b/chromium/v8/src/wasm/wasm-engine.h
@@ -19,6 +19,7 @@ namespace internal {
class AsmWasmData;
class CodeTracer;
class CompilationStatistics;
+class HeapNumber;
class WasmInstanceObject;
class WasmModuleObject;
@@ -98,10 +99,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
std::shared_ptr<CompilationResultResolver> resolver);
- // Compiles the function with the given index at a specific compilation tier
- // and returns true on success, false otherwise. This is mostly used for
- // testing to force a function into a specific tier.
- bool CompileFunction(Isolate* isolate, NativeModule* native_module,
+ // Compiles the function with the given index at a specific compilation tier.
+ // Errors are stored internally in the CompilationState.
+ // This is mostly used for testing to force a function into a specific tier.
+ void CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier);
// Exports the sharable parts of the given module object so that they can be
diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc
index 0317bb7bf51..9fc3b707c46 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.cc
+++ b/chromium/v8/src/wasm/wasm-external-refs.cc
@@ -10,6 +10,7 @@
#include "include/v8config.h"
#include "src/base/bits.h"
+#include "src/memcopy.h"
#include "src/utils.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-external-refs.h"
@@ -232,13 +233,13 @@ uint32_t word64_popcnt_wrapper(Address data) {
uint32_t word32_rol_wrapper(Address data) {
uint32_t input = ReadUnalignedValue<uint32_t>(data);
uint32_t shift = ReadUnalignedValue<uint32_t>(data + sizeof(input)) & 31;
- return (input << shift) | (input >> (32 - shift));
+ return (input << shift) | (input >> ((32 - shift) & 31));
}
uint32_t word32_ror_wrapper(Address data) {
uint32_t input = ReadUnalignedValue<uint32_t>(data);
uint32_t shift = ReadUnalignedValue<uint32_t>(data + sizeof(input)) & 31;
- return (input >> shift) | (input << (32 - shift));
+ return (input >> shift) | (input << ((32 - shift) & 31));
}
void float64_pow_wrapper(Address data) {
@@ -247,6 +248,14 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, Pow(x, y));
}
+void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
+ MemMove(reinterpret_cast<void*>(dst), reinterpret_cast<void*>(src), size);
+}
+
+void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size) {
+ memset(reinterpret_cast<void*>(dst), value, size);
+}
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h
index fc116b7fd8f..64a66532776 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.h
+++ b/chromium/v8/src/wasm/wasm-external-refs.h
@@ -67,6 +67,10 @@ uint32_t word32_ror_wrapper(Address data);
void float64_pow_wrapper(Address data);
+void memory_copy_wrapper(Address dst, Address src, uint32_t size);
+
+void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size);
+
typedef void (*WasmTrapCallbackForTesting)();
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h
index 71bcd55f0d2..711c747d8ec 100644
--- a/chromium/v8/src/wasm/wasm-feature-flags.h
+++ b/chromium/v8/src/wasm/wasm-feature-flags.h
@@ -21,7 +21,7 @@
SEPARATOR \
V(anyref, "anyref opcodes", false) \
SEPARATOR \
- V(mut_global, "import/export mutable global support", true) \
+ V(bigint, "JS BigInt support", false) \
SEPARATOR \
V(bulk_memory, "bulk memory opcodes", false)
diff --git a/chromium/v8/src/wasm/wasm-import-wrapper-cache-inl.h b/chromium/v8/src/wasm/wasm-import-wrapper-cache-inl.h
index 5e06b3b664f..290df248980 100644
--- a/chromium/v8/src/wasm/wasm-import-wrapper-cache-inl.h
+++ b/chromium/v8/src/wasm/wasm-import-wrapper-cache-inl.h
@@ -7,8 +7,6 @@
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
-#include "src/handles-inl.h"
-#include "src/objects/code-inl.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-code-manager.h"
@@ -19,19 +17,16 @@ namespace wasm {
// Implements a cache for import wrappers.
class WasmImportWrapperCache {
public:
- WasmCode* GetOrCompile(Isolate* isolate, compiler::WasmImportCallKind kind,
- FunctionSig* sig) {
- // TODO(titzer): remove the isolate parameter.
+ WasmCode* GetOrCompile(WasmEngine* wasm_engine, Counters* counters,
+ compiler::WasmImportCallKind kind, FunctionSig* sig) {
base::MutexGuard lock(&mutex_);
CacheKey key(static_cast<uint8_t>(kind), *sig);
WasmCode*& cached = entry_map_[key];
if (cached == nullptr) {
// TODO(wasm): no need to hold the lock while compiling an import wrapper.
- HandleScope scope(isolate);
bool source_positions = native_module_->module()->origin == kAsmJsOrigin;
cached = compiler::CompileWasmImportCallWrapper(
- isolate, native_module_, kind, sig, source_positions);
- auto counters = isolate->counters();
+ wasm_engine, native_module_, kind, sig, source_positions);
counters->wasm_generated_code_size()->Increment(
cached->instructions().length());
counters->wasm_reloc_size()->Increment(cached->reloc_info().length());
diff --git a/chromium/v8/src/wasm/wasm-interpreter.cc b/chromium/v8/src/wasm/wasm-interpreter.cc
index 6c0ab964f3c..8e75ad233f4 100644
--- a/chromium/v8/src/wasm/wasm-interpreter.cc
+++ b/chromium/v8/src/wasm/wasm-interpreter.cc
@@ -8,6 +8,7 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/assembler-inl.h"
+#include "src/base/overflowing-math.h"
#include "src/boxed-float.h"
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
@@ -289,23 +290,19 @@ inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
}
inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1F);
- return (a >> shift) | (a << (32 - shift));
+ return (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F));
}
inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x1F);
- return (a << shift) | (a >> (32 - shift));
+ return (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F));
}
inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3F);
- return (a >> shift) | (a << (64 - shift));
+ return (a >> (b & 0x3F)) | (a << ((64 - b) & 0x3F));
}
inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
- uint32_t shift = (b & 0x3F);
- return (a << shift) | (a >> (64 - shift));
+ return (a << (b & 0x3F)) | (a >> ((64 - b) & 0x3F));
}
inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
@@ -858,14 +855,14 @@ class SideTable : public ZoneObject {
break;
}
case kExprBr: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
@@ -1405,14 +1402,18 @@ class ThreadImpl {
template <typename mtype>
inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
- size_t mem_size = instance_object_->memory_size();
- if (sizeof(mtype) > mem_size) return kNullAddress;
- if (offset > (mem_size - sizeof(mtype))) return kNullAddress;
- if (index > (mem_size - sizeof(mtype) - offset)) return kNullAddress;
+ uint32_t effective_index = offset + index;
+ if (effective_index < index) {
+ return kNullAddress; // wraparound => oob
+ }
+ if (!IsInBounds(effective_index, sizeof(mtype),
+ instance_object_->memory_size())) {
+ return kNullAddress; // oob
+ }
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
return reinterpret_cast<Address>(instance_object_->memory_start()) +
- offset + (index & instance_object_->memory_mask());
+ (effective_index & instance_object_->memory_mask());
}
template <typename ctype, typename mtype>
@@ -1737,9 +1738,9 @@ class ThreadImpl {
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
- BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
- BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
- BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
+ BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
+ BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
+ BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
BINOP_CASE(I32x4MinU, i32x4, int4, 4,
static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
@@ -1749,9 +1750,9 @@ class ThreadImpl {
BINOP_CASE(S128And, i32x4, int4, 4, a & b)
BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
- BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
- BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
- BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
+ BINOP_CASE(I16x8Add, i16x8, int8, 8, base::AddWithWraparound(a, b))
+ BINOP_CASE(I16x8Sub, i16x8, int8, 8, base::SubWithWraparound(a, b))
+ BINOP_CASE(I16x8Mul, i16x8, int8, 8, base::MulWithWraparound(a, b))
BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
BINOP_CASE(I16x8MinU, i16x8, int8, 8,
static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
@@ -1762,9 +1763,9 @@ class ThreadImpl {
BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
- BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
- BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
- BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
+ BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
+ BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
+ BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
BINOP_CASE(I8x16MinU, i8x16, int16, 16,
static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
@@ -1792,12 +1793,12 @@ class ThreadImpl {
}
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
- UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
- UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
- UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
+ UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
+ UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
+ UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
- UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
- UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
+ UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
+ UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
#undef UNOP_CASE
#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
case kExpr##op: { \
@@ -2191,13 +2192,15 @@ class ThreadImpl {
break;
}
case kExprBr: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
len = DoBreak(code, pc, imm.depth);
TRACE(" br => @%zu\n", pc + len);
break;
}
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -2714,8 +2717,11 @@ class ThreadImpl {
Handle<Object> object_ref,
const WasmCode* code,
FunctionSig* sig) {
+ wasm::WasmFeatures enabled_features =
+ wasm::WasmFeaturesFromIsolate(isolate);
+
if (code->kind() == WasmCode::kWasmToJsWrapper &&
- !IsJSCompatibleSignature(sig)) {
+ !IsJSCompatibleSignature(sig, enabled_features.bigint)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
return TryHandleException(isolate);
@@ -2771,11 +2777,10 @@ class ThreadImpl {
// Wrap the arg_buffer and the code target data pointers in handles. As
// these are aligned pointers, to the GC it will look like Smis.
- Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
- isolate);
+ Handle<Object> arg_buffer_obj(
+ Object(reinterpret_cast<Address>(arg_buffer.data())), isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(
- reinterpret_cast<Object*>(code->instruction_start()), isolate);
+ Handle<Object> code_entry_obj(Object(code->instruction_start()), isolate);
DCHECK(!code_entry_obj->IsHeapObject());
static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
@@ -2793,9 +2798,9 @@ class ThreadImpl {
maybe_retval.is_null() ? " with exception" : "");
if (maybe_retval.is_null()) {
- // JSEntryStub may through a stack overflow before we actually get to wasm
- // code or back to the interpreter, meaning the thread-in-wasm flag won't
- // be cleared.
+ // JSEntry may throw a stack overflow before we actually get to wasm code
+ // or back to the interpreter, meaning the thread-in-wasm flag won't be
+ // cleared.
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
}
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index 67859aadd2e..4ad7d49076c 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -4,10 +4,13 @@
#include "src/wasm/wasm-js.h"
+#include <string>
+
#include "src/api-inl.h"
#include "src/api-natives.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
+#include "src/base/overflowing-math.h"
#include "src/execution.h"
#include "src/handles.h"
#include "src/heap/factory.h"
@@ -42,7 +45,7 @@ class WasmStreaming::WasmStreamingImpl {
}
void OnBytesReceived(const uint8_t* bytes, size_t size) {
- streaming_decoder_->OnBytesReceived(i::Vector<const uint8_t>(bytes, size));
+ streaming_decoder_->OnBytesReceived(i::VectorOf(bytes, size));
}
void Finish() { streaming_decoder_->Finish(); }
@@ -59,24 +62,20 @@ class WasmStreaming::WasmStreamingImpl {
Utils::OpenHandle(*exception.ToLocalChecked()));
}
- void SetModuleCompiledCallback(ModuleCompiledCallback callback,
- intptr_t data) {
- // Wrap the embedder callback here so we can also wrap the result as a
- // Local<WasmCompiledModule> here.
- streaming_decoder_->SetModuleCompiledCallback(
- [callback, data](i::Handle<i::WasmModuleObject> module_object) {
- callback(data, Local<WasmCompiledModule>::Cast(Utils::ToLocal(
- i::Handle<i::JSObject>::cast(module_object))));
- });
- }
-
bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
- if (!i::wasm::IsSupportedVersion(reinterpret_cast<i::Isolate*>(isolate_),
- {bytes, size}))
- return false;
+ if (!i::wasm::IsSupportedVersion({bytes, size})) return false;
return streaming_decoder_->SetCompiledModuleBytes({bytes, size});
}
+ void SetClient(std::shared_ptr<Client> client) {
+ // There are no other event notifications so just pass client to decoder.
+ // Wrap the client with a callback here so we can also wrap the result.
+ streaming_decoder_->SetModuleCompiledCallback(
+ [client](const std::shared_ptr<i::wasm::NativeModule>& native_module) {
+ client->OnModuleCompiled(Utils::Convert(native_module));
+ });
+ }
+
private:
Isolate* isolate_ = nullptr;
std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
@@ -100,15 +99,14 @@ void WasmStreaming::Abort(MaybeLocal<Value> exception) {
impl_->Abort(exception);
}
-void WasmStreaming::SetModuleCompiledCallback(ModuleCompiledCallback callback,
- intptr_t data) {
- impl_->SetModuleCompiledCallback(callback, data);
-}
-
bool WasmStreaming::SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
return impl_->SetCompiledModuleBytes(bytes, size);
}
+void WasmStreaming::SetClient(std::shared_ptr<Client> client) {
+ impl_->SetClient(client);
+}
+
// static
std::shared_ptr<WasmStreaming> WasmStreaming::Unpack(Isolate* isolate,
Local<Value> value) {
@@ -238,7 +236,10 @@ namespace {
class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
public:
AsyncCompilationResolver(i::Isolate* isolate, i::Handle<i::JSPromise> promise)
- : promise_(isolate->global_handles()->Create(*promise)) {}
+ : promise_(isolate->global_handles()->Create(*promise)) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ }
~AsyncCompilationResolver() override {
i::GlobalHandles::Destroy(promise_.location());
@@ -263,10 +264,14 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "AsyncCompilationResolver::promise_";
bool finished_ = false;
i::Handle<i::JSPromise> promise_;
};
+constexpr char AsyncCompilationResolver::kGlobalPromiseHandle[];
+
// This class resolves the result of WebAssembly.instantiate(module, imports).
// It just places the instantiation result in the supplied {promise}.
class InstantiateModuleResultResolver
@@ -274,7 +279,10 @@ class InstantiateModuleResultResolver
public:
InstantiateModuleResultResolver(i::Isolate* isolate,
i::Handle<i::JSPromise> promise)
- : promise_(isolate->global_handles()->Create(*promise)) {}
+ : promise_(isolate->global_handles()->Create(*promise)) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ }
~InstantiateModuleResultResolver() override {
i::GlobalHandles::Destroy(promise_.location());
@@ -296,9 +304,13 @@ class InstantiateModuleResultResolver
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "InstantiateModuleResultResolver::promise_";
i::Handle<i::JSPromise> promise_;
};
+constexpr char InstantiateModuleResultResolver::kGlobalPromiseHandle[];
+
// This class resolves the result of WebAssembly.instantiate(bytes, imports).
// For that it creates a new {JSObject} which contains both the provided
// {WasmModuleObject} and the resulting {WebAssemblyInstanceObject} itself.
@@ -310,7 +322,12 @@ class InstantiateBytesResultResolver
i::Handle<i::WasmModuleObject> module)
: isolate_(isolate),
promise_(isolate_->global_handles()->Create(*promise)),
- module_(isolate_->global_handles()->Create(*module)) {}
+ module_(isolate_->global_handles()->Create(*module)) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ i::GlobalHandles::AnnotateStrongRetainer(module_.location(),
+ kGlobalModuleHandle);
+ }
~InstantiateBytesResultResolver() override {
i::GlobalHandles::Destroy(promise_.location());
@@ -324,20 +341,14 @@ class InstantiateBytesResultResolver
i::Handle<i::JSObject> result =
isolate_->factory()->NewJSObject(isolate_->object_function());
- const uint8_t* instance_str = reinterpret_cast<const uint8_t*>("instance");
i::Handle<i::String> instance_name =
isolate_->factory()
- ->NewStringFromOneByte(i::Vector<const uint8_t>(
- instance_str,
- i::StrLength(reinterpret_cast<const char*>(instance_str))))
+ ->NewStringFromOneByte(i::StaticCharVector("instance"))
.ToHandleChecked();
- const uint8_t* module_str = reinterpret_cast<const uint8_t*>("module");
i::Handle<i::String> module_name =
isolate_->factory()
- ->NewStringFromOneByte(i::Vector<const uint8_t>(
- module_str,
- i::StrLength(reinterpret_cast<const char*>(module_str))))
+ ->NewStringFromOneByte(i::StaticCharVector("module"))
.ToHandleChecked();
i::JSObject::AddProperty(isolate_, result, instance_name, instance,
@@ -356,11 +367,18 @@ class InstantiateBytesResultResolver
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "InstantiateBytesResultResolver::promise_";
+ static constexpr char kGlobalModuleHandle[] =
+ "InstantiateBytesResultResolver::module_";
i::Isolate* isolate_;
i::Handle<i::JSPromise> promise_;
i::Handle<i::WasmModuleObject> module_;
};
+constexpr char InstantiateBytesResultResolver::kGlobalPromiseHandle[];
+constexpr char InstantiateBytesResultResolver::kGlobalModuleHandle[];
+
// This class is the {CompilationResultResolver} for
// WebAssembly.instantiate(bytes, imports). When compilation finishes,
// {AsyncInstantiate} is started on the compilation result.
@@ -375,7 +393,14 @@ class AsyncInstantiateCompileResultResolver
maybe_imports_(maybe_imports.is_null()
? maybe_imports
: isolate_->global_handles()->Create(
- *maybe_imports.ToHandleChecked())) {}
+ *maybe_imports.ToHandleChecked())) {
+ i::GlobalHandles::AnnotateStrongRetainer(promise_.location(),
+ kGlobalPromiseHandle);
+ if (!maybe_imports_.is_null()) {
+ i::GlobalHandles::AnnotateStrongRetainer(
+ maybe_imports_.ToHandleChecked().location(), kGlobalImportsHandle);
+ }
+ }
~AsyncInstantiateCompileResultResolver() override {
i::GlobalHandles::Destroy(promise_.location());
@@ -403,12 +428,57 @@ class AsyncInstantiateCompileResultResolver
}
private:
+ static constexpr char kGlobalPromiseHandle[] =
+ "AsyncInstantiateCompileResultResolver::promise_";
+ static constexpr char kGlobalImportsHandle[] =
+ "AsyncInstantiateCompileResultResolver::module_";
bool finished_ = false;
i::Isolate* isolate_;
i::Handle<i::JSPromise> promise_;
i::MaybeHandle<i::JSReceiver> maybe_imports_;
};
+constexpr char AsyncInstantiateCompileResultResolver::kGlobalPromiseHandle[];
+constexpr char AsyncInstantiateCompileResultResolver::kGlobalImportsHandle[];
+
+std::string ToString(const char* name) { return std::string(name); }
+
+std::string ToString(const i::Handle<i::String> name) {
+ return std::string("Property '") + name->ToCString().get() + "'";
+}
+
+// Web IDL: '[EnforceRange] unsigned long'
+// Previously called ToNonWrappingUint32 in the draft WebAssembly JS spec.
+// https://heycam.github.io/webidl/#EnforceRange
+template <typename T>
+bool EnforceUint32(T argument_name, Local<v8::Value> v, Local<Context> context,
+ ErrorThrower* thrower, uint32_t* res) {
+ double double_number;
+
+ if (!v->NumberValue(context).To(&double_number)) {
+ thrower->TypeError("%s must be convertible to a number",
+ ToString(argument_name).c_str());
+ return false;
+ }
+ if (!std::isfinite(double_number)) {
+ thrower->TypeError("%s must be convertible to a valid number",
+ ToString(argument_name).c_str());
+ return false;
+ }
+ if (double_number < 0) {
+ thrower->TypeError("%s must be non-negative",
+ ToString(argument_name).c_str());
+ return false;
+ }
+ if (double_number > std::numeric_limits<uint32_t>::max()) {
+ thrower->TypeError("%s must be in the unsigned long range",
+ ToString(argument_name).c_str());
+ return false;
+ }
+
+ *res = static_cast<uint32_t>(double_number);
+ return true;
+}
} // namespace
// WebAssembly.compile(bytes) -> Promise
@@ -849,30 +919,74 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
- Local<Context> context, Local<v8::Object> object,
- Local<String> property, int64_t* result,
+ Local<Context> context, v8::Local<v8::Value> value,
+ i::Handle<i::String> property_name, int64_t* result,
int64_t lower_bound, uint64_t upper_bound) {
- v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
+ uint32_t number;
+ if (!EnforceUint32(property_name, value, context, thrower, &number)) {
+ return false;
+ }
+ if (number < lower_bound) {
+ thrower->RangeError("Property '%s': value %" PRIu32
+ " is below the lower bound %" PRIx64,
+ property_name->ToCString().get(), number, lower_bound);
+ return false;
+ }
+ if (number > upper_bound) {
+ thrower->RangeError("Property '%s': value %" PRIu32
+ " is above the upper bound %" PRIu64,
+ property_name->ToCString().get(), number, upper_bound);
+ return false;
+ }
+
+ *result = static_cast<int64_t>(number);
+ return true;
+}
+
+bool GetRequiredIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+ Local<Context> context,
+ Local<v8::Object> object,
+ Local<String> property, int64_t* result,
+ int64_t lower_bound, uint64_t upper_bound) {
v8::Local<v8::Value> value;
- if (maybe.ToLocal(&value)) {
- int64_t number;
- if (!value->IntegerValue(context).To(&number)) return false;
- if (number < lower_bound) {
- thrower->RangeError("Property value %" PRId64
- " is below the lower bound %" PRIx64,
- number, lower_bound);
- return false;
- }
- if (number > static_cast<int64_t>(upper_bound)) {
- thrower->RangeError("Property value %" PRId64
- " is above the upper bound %" PRIu64,
- number, upper_bound);
- return false;
- }
- *result = static_cast<int>(number);
+ if (!object->Get(context, property).ToLocal(&value)) {
+ return false;
+ }
+
+ i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
+
+ // Web IDL: dictionary presence
+ // https://heycam.github.io/webidl/#dfn-present
+ if (value->IsUndefined()) {
+ thrower->TypeError("Property '%s' is required",
+ property_name->ToCString().get());
+ return false;
+ }
+
+ return GetIntegerProperty(isolate, thrower, context, value, property_name,
+ result, lower_bound, upper_bound);
+}
+
+bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
+ Local<Context> context,
+ Local<v8::Object> object,
+ Local<String> property, int64_t* result,
+ int64_t lower_bound, uint64_t upper_bound) {
+ v8::Local<v8::Value> value;
+ if (!object->Get(context, property).ToLocal(&value)) {
+ return false;
+ }
+
+ // Web IDL: dictionary presence
+ // https://heycam.github.io/webidl/#dfn-present
+ if (value->IsUndefined()) {
return true;
}
- return false;
+
+ i::Handle<i::String> property_name = v8::Utils::OpenHandle(*property);
+
+ return GetIntegerProperty(isolate, thrower, context, value, property_name,
+ result, lower_bound, upper_bound);
}
// new WebAssembly.Table(args) -> WebAssembly.Table
@@ -906,27 +1020,23 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// The descriptor's 'initial'.
int64_t initial = 0;
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0,
- i::FLAG_wasm_max_table_size)) {
+ if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "initial"), &initial, 0,
+ i::FLAG_wasm_max_table_size)) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
- Local<String> maximum_key = v8_str(isolate, "maximum");
- Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
-
- if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
- &maximum, initial,
- i::wasm::kSpecMaxWasmTableSize)) {
- return;
- }
+ if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "maximum"), &maximum, initial,
+ i::wasm::kSpecMaxWasmTableSize)) {
+ return;
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj = i::WasmTableObject::New(
- i_isolate, static_cast<uint32_t>(initial), maximum, &fixed_array);
+ i::Handle<i::JSObject> table_obj =
+ i::WasmTableObject::New(i_isolate, static_cast<uint32_t>(initial),
+ static_cast<uint32_t>(maximum), &fixed_array);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(table_obj));
}
@@ -948,22 +1058,17 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
// The descriptor's 'initial'.
int64_t initial = 0;
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0,
- i::wasm::max_mem_pages())) {
+ if (!GetRequiredIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "initial"), &initial, 0,
+ i::wasm::max_mem_pages())) {
return;
}
// The descriptor's 'maximum'.
int64_t maximum = -1;
- Local<String> maximum_key = v8_str(isolate, "maximum");
- Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
-
- if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
- if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
- &maximum, initial,
- i::wasm::kSpecMaxWasmMemoryPages)) {
- return;
- }
+ if (!GetOptionalIntegerProperty(isolate, &thrower, context, descriptor,
+ v8_str(isolate, "maximum"), &maximum, initial,
+ i::wasm::kSpecMaxWasmMemoryPages)) {
+ return;
}
bool is_shared_memory = false;
@@ -1005,7 +1110,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
- i_isolate, buffer, static_cast<int32_t>(maximum));
+ i_isolate, buffer, static_cast<uint32_t>(maximum));
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
@@ -1052,11 +1157,16 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
type = i::wasm::kWasmI32;
} else if (string->StringEquals(v8_str(isolate, "f32"))) {
type = i::wasm::kWasmF32;
+ } else if (string->StringEquals(v8_str(isolate, "i64"))) {
+ type = i::wasm::kWasmI64;
} else if (string->StringEquals(v8_str(isolate, "f64"))) {
type = i::wasm::kWasmF64;
+ } else if (string->StringEquals(v8_str(isolate, "anyref"))) {
+ type = i::wasm::kWasmAnyRef;
} else {
thrower.TypeError(
- "Descriptor property 'value' must be 'i32', 'f32', or 'f64'");
+ "Descriptor property 'value' must be 'i32', 'i64', 'f32', or "
+ "'f64'");
return;
}
}
@@ -1064,7 +1174,8 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
const uint32_t offset = 0;
i::MaybeHandle<i::WasmGlobalObject> maybe_global_obj =
i::WasmGlobalObject::New(i_isolate, i::MaybeHandle<i::JSArrayBuffer>(),
- type, offset, is_mutable);
+ i::MaybeHandle<i::FixedArray>(), type, offset,
+ is_mutable);
i::Handle<i::WasmGlobalObject> global_obj;
if (!maybe_global_obj.ToHandle(&global_obj)) {
@@ -1085,6 +1196,22 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetI32(i32_value);
break;
}
+ case i::wasm::kWasmI64: {
+ int64_t i64_value = 0;
+ if (!value->IsUndefined()) {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (!enabled_features.bigint) {
+ thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ return;
+ }
+
+ v8::Local<v8::BigInt> bigint_value;
+ if (!value->ToBigInt(context).ToLocal(&bigint_value)) return;
+ i64_value = bigint_value->Int64Value();
+ }
+ global_obj->SetI64(i64_value);
+ break;
+ }
case i::wasm::kWasmF32: {
float f32_value = 0;
if (!value->IsUndefined()) {
@@ -1107,6 +1234,17 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF64(f64_value);
break;
}
+ case i::wasm::kWasmAnyRef: {
+ if (args.Length() < 2) {
+ // When no inital value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetAnyRef(
+ handle(i::ReadOnlyRoots(i_isolate).null_value(), i_isolate));
+ break;
+ }
+ global_obj->SetAnyRef(Utils::OpenHandle(*value));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1171,31 +1309,39 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
- int64_t grow_by = 0;
- if (!args[0]->IntegerValue(context).To(&grow_by)) return;
+ uint32_t grow_by;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &grow_by)) {
+ return;
+ }
+
i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
- int old_size = old_array->length();
+ uint32_t old_size = static_cast<uint32_t>(old_array->length());
- int64_t max_size64 = receiver->maximum_length()->Number();
- if (max_size64 < 0 || max_size64 > i::FLAG_wasm_max_table_size) {
+ uint64_t max_size64 = receiver->maximum_length()->Number();
+ if (max_size64 > i::FLAG_wasm_max_table_size) {
max_size64 = i::FLAG_wasm_max_table_size;
}
- if (grow_by < 0 || grow_by > max_size64 - old_size) {
- thrower.RangeError(grow_by < 0 ? "trying to shrink table"
- : "maximum table size exceeded");
+ DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+
+ uint64_t new_size64 =
+ static_cast<uint64_t>(old_size) + static_cast<uint64_t>(grow_by);
+ if (new_size64 > max_size64) {
+ thrower.RangeError("maximum table size exceeded");
return;
}
-
- int new_size = static_cast<int>(old_size + grow_by);
- receiver->Grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
+ uint32_t new_size = static_cast<uint32_t>(new_size64);
if (new_size != old_size) {
+ receiver->Grow(i_isolate, new_size - old_size);
+
i::Handle<i::FixedArray> new_array =
i_isolate->factory()->NewFixedArray(new_size);
- for (int i = 0; i < old_size; ++i) new_array->set(i, old_array->get(i));
- i::Object* null = i::ReadOnlyRoots(i_isolate).null_value();
- for (int i = old_size; i < new_size; ++i) new_array->set(i, null);
+ for (uint32_t i = 0; i < old_size; ++i) {
+ new_array->set(i, old_array->get(i));
+ }
+ i::Object null = i::ReadOnlyRoots(i_isolate).null_value();
+ for (uint32_t i = old_size; i < new_size; ++i) new_array->set(i, null);
receiver->set_functions(*new_array);
}
@@ -1213,15 +1359,19 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
- int64_t i = 0;
- if (!args[0]->IntegerValue(context).To(&i)) return;
+
+ uint32_t index;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
+ return;
+ }
+
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- if (i < 0 || i >= array->length()) {
- thrower.RangeError("index out of bounds");
+ if (index >= static_cast<uint32_t>(array->length())) {
+ thrower.RangeError("Index out of bounds");
return;
}
- i::Handle<i::Object> value(array->get(static_cast<int>(i)), i_isolate);
+ i::Handle<i::Object> value(array->get(static_cast<int>(index)), i_isolate);
return_value.Set(Utils::ToLocal(value));
}
@@ -1235,8 +1385,10 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
EXTRACT_THIS(receiver, WasmTableObject);
// Parameter 0.
- int64_t index;
- if (!args[0]->IntegerValue(context).To(&index)) return;
+ uint32_t index;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &index)) {
+ return;
+ }
// Parameter 1.
i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
@@ -1246,12 +1398,12 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- if (index < 0 || index >= receiver->functions()->length()) {
+ if (index >= static_cast<uint64_t>(receiver->functions()->length())) {
thrower.RangeError("index out of bounds");
return;
}
- i::WasmTableObject::Set(i_isolate, receiver, static_cast<int32_t>(index),
+ i::WasmTableObject::Set(i_isolate, receiver, index,
value->IsNull(i_isolate)
? i::Handle<i::JSFunction>::null()
: i::Handle<i::JSFunction>::cast(value));
@@ -1266,11 +1418,13 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmMemoryObject);
- int64_t delta_size = 0;
- if (!args[0]->IntegerValue(context).To(&delta_size)) return;
+ uint32_t delta_size;
+ if (!EnforceUint32("Argument 0", args[0], context, &thrower, &delta_size)) {
+ return;
+ }
- int64_t max_size64 = receiver->maximum_pages();
- if (max_size64 < 0 || max_size64 > int64_t{i::wasm::max_mem_pages()}) {
+ uint64_t max_size64 = receiver->maximum_pages();
+ if (max_size64 > uint64_t{i::wasm::max_mem_pages()}) {
max_size64 = i::wasm::max_mem_pages();
}
i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer(), i_isolate);
@@ -1278,15 +1432,18 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("This memory cannot be grown");
return;
}
- int64_t old_size = old_buffer->byte_length() / i::wasm::kWasmPageSize;
- int64_t new_size64 = old_size + delta_size;
- if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
- thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
- : "maximum memory size exceeded");
+
+ DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
+
+ uint64_t old_size64 = old_buffer->byte_length() / i::wasm::kWasmPageSize;
+ uint64_t new_size64 = old_size64 + static_cast<uint64_t>(delta_size);
+
+ if (new_size64 > max_size64) {
+ thrower.RangeError("Maximum memory size exceeded");
return;
}
- int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver,
- static_cast<uint32_t>(delta_size));
+
+ int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver, delta_size);
if (ret == -1) {
thrower.RangeError("Unable to grow instance memory.");
return;
@@ -1337,15 +1494,26 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::kWasmI32:
return_value.Set(receiver->GetI32());
break;
- case i::wasm::kWasmI64:
- thrower.TypeError("Can't get the value of i64 WebAssembly.Global");
+ case i::wasm::kWasmI64: {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (enabled_features.bigint) {
+ Local<BigInt> value = BigInt::New(isolate, receiver->GetI64());
+
+ return_value.Set(value);
+ } else {
+ thrower.TypeError("Can't get the value of i64 WebAssembly.Global");
+ }
break;
+ }
case i::wasm::kWasmF32:
return_value.Set(receiver->GetF32());
break;
case i::wasm::kWasmF64:
return_value.Set(receiver->GetF64());
break;
+ case i::wasm::kWasmAnyRef:
+ return_value.Set(Utils::ToLocal(receiver->GetAnyRef()));
+ break;
default:
UNREACHABLE();
}
@@ -1376,6 +1544,10 @@ void WebAssemblyGlobalSetValue(
thrower.TypeError("Can't set the value of an immutable global.");
return;
}
+ if (args[0]->IsUndefined()) {
+ thrower.TypeError("Argument 0: must be a value");
+ return;
+ }
switch (receiver->type()) {
case i::wasm::kWasmI32: {
@@ -1384,9 +1556,17 @@ void WebAssemblyGlobalSetValue(
receiver->SetI32(i32_value);
break;
}
- case i::wasm::kWasmI64:
- thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ case i::wasm::kWasmI64: {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (enabled_features.bigint) {
+ v8::Local<v8::BigInt> bigint_value;
+ if (!args[0]->ToBigInt(context).ToLocal(&bigint_value)) return;
+ receiver->SetI64(bigint_value->Int64Value());
+ } else {
+ thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ }
break;
+ }
case i::wasm::kWasmF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
@@ -1399,6 +1579,10 @@ void WebAssemblyGlobalSetValue(
receiver->SetF64(f64_value);
break;
}
+ case i::wasm::kWasmAnyRef: {
+ receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1436,31 +1620,36 @@ Handle<JSFunction> CreateFunc(Isolate* isolate, Handle<String> name,
Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
const char* str, FunctionCallback func,
- int length = 0) {
+ int length = 0,
+ PropertyAttributes attributes = NONE) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> function = CreateFunc(isolate, name, func);
function->shared()->set_length(length);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(isolate, object, name, function, attributes);
return function;
}
+Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
+ Handle<JSObject> object,
+ const char* str,
+ FunctionCallback func) {
+ return InstallFunc(isolate, object, str, func, 1, DONT_ENUM);
+}
+
Handle<String> GetterName(Isolate* isolate, Handle<String> name) {
return Name::ToFunctionName(isolate, name, isolate->factory()->get_string())
.ToHandleChecked();
}
-void InstallGetter(Isolate* isolate, Handle<JSObject> object,
- const char* str, FunctionCallback func) {
+void InstallGetter(Isolate* isolate, Handle<JSObject> object, const char* str,
+ FunctionCallback func) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> function =
CreateFunc(isolate, GetterName(isolate, name), func);
- v8::PropertyAttribute attributes =
- static_cast<v8::PropertyAttribute>(v8::DontEnum);
Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
Utils::ToLocal(function),
- Local<Function>(), attributes);
+ Local<Function>(), v8::None);
}
Handle<String> SetterName(Isolate* isolate, Handle<String> name) {
@@ -1478,8 +1667,7 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
CreateFunc(isolate, SetterName(isolate, name), setter);
setter_func->shared()->set_length(1);
- v8::PropertyAttribute attributes =
- static_cast<v8::PropertyAttribute>(v8::DontEnum);
+ v8::PropertyAttribute attributes = v8::None;
Utils::ToLocal(object)->SetAccessorProperty(
Utils::ToLocal(name), Utils::ToLocal(getter_func),
@@ -1502,7 +1690,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
// Install the JS API once only.
- Object* prev = context->get(Context::WASM_MODULE_CONSTRUCTOR_INDEX);
+ Object prev = context->get(Context::WASM_MODULE_CONSTRUCTOR_INDEX);
if (!prev->IsUndefined(isolate)) {
DCHECK(prev->IsJSFunction());
return;
@@ -1517,7 +1705,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
PropertyAttributes ro_attributes =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
@@ -1536,12 +1723,12 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Expose the API on the global object if configured to do so.
if (exposed_on_global_object) {
- JSObject::AddProperty(isolate, global, name, webassembly, attributes);
+ JSObject::AddProperty(isolate, global, name, webassembly, DONT_ENUM);
}
// Setup Module
Handle<JSFunction> module_constructor =
- InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
+ InstallConstructorFunc(isolate, webassembly, "Module", WebAssemblyModule);
context->set_wasm_module_constructor(*module_constructor);
SetDummyInstanceTemplate(isolate, module_constructor);
JSFunction::EnsureHasInitialMap(module_constructor);
@@ -1560,8 +1747,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
v8_str(isolate, "WebAssembly.Module"), ro_attributes);
// Setup Instance
- Handle<JSFunction> instance_constructor =
- InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance, 1);
+ Handle<JSFunction> instance_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Instance", WebAssemblyInstance);
context->set_wasm_instance_constructor(*instance_constructor);
SetDummyInstanceTemplate(isolate, instance_constructor);
JSFunction::EnsureHasInitialMap(instance_constructor);
@@ -1578,7 +1765,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Table
Handle<JSFunction> table_constructor =
- InstallFunc(isolate, webassembly, "Table", WebAssemblyTable, 1);
+ InstallConstructorFunc(isolate, webassembly, "Table", WebAssemblyTable);
context->set_wasm_table_constructor(*table_constructor);
SetDummyInstanceTemplate(isolate, table_constructor);
JSFunction::EnsureHasInitialMap(table_constructor);
@@ -1596,7 +1783,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Memory
Handle<JSFunction> memory_constructor =
- InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory, 1);
+ InstallConstructorFunc(isolate, webassembly, "Memory", WebAssemblyMemory);
context->set_wasm_memory_constructor(*memory_constructor);
SetDummyInstanceTemplate(isolate, memory_constructor);
JSFunction::EnsureHasInitialMap(memory_constructor);
@@ -1615,29 +1802,26 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
// Setup Global
- if (enabled_features.mut_global) {
- Handle<JSFunction> global_constructor =
- InstallFunc(isolate, webassembly, "Global", WebAssemblyGlobal, 1);
- context->set_wasm_global_constructor(*global_constructor);
- SetDummyInstanceTemplate(isolate, global_constructor);
- JSFunction::EnsureHasInitialMap(global_constructor);
- Handle<JSObject> global_proto(
- JSObject::cast(global_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> global_map = isolate->factory()->NewMap(
- i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
- JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
- InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
- InstallGetterSetter(isolate, global_proto, "value",
- WebAssemblyGlobalGetValue, WebAssemblyGlobalSetValue);
- JSObject::AddProperty(isolate, global_proto,
- factory->to_string_tag_symbol(),
- v8_str(isolate, "WebAssembly.Global"), ro_attributes);
- }
+ Handle<JSFunction> global_constructor =
+ InstallConstructorFunc(isolate, webassembly, "Global", WebAssemblyGlobal);
+ context->set_wasm_global_constructor(*global_constructor);
+ SetDummyInstanceTemplate(isolate, global_constructor);
+ JSFunction::EnsureHasInitialMap(global_constructor);
+ Handle<JSObject> global_proto(
+ JSObject::cast(global_constructor->instance_prototype()), isolate);
+ i::Handle<i::Map> global_map =
+ isolate->factory()->NewMap(i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
+ JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
+ InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
+ InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
+ WebAssemblyGlobalSetValue);
+ JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Global"), ro_attributes);
// Setup Exception
if (enabled_features.eh) {
- Handle<JSFunction> exception_constructor =
- InstallFunc(isolate, webassembly, "Exception", WebAssemblyException, 1);
+ Handle<JSFunction> exception_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Exception", WebAssemblyException);
context->set_wasm_exception_constructor(*exception_constructor);
SetDummyInstanceTemplate(isolate, exception_constructor);
JSFunction::EnsureHasInitialMap(exception_constructor);
@@ -1650,22 +1834,21 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
}
// Setup errors
- attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
isolate->native_context()->wasm_compile_error_function(), isolate);
JSObject::AddProperty(isolate, webassembly,
isolate->factory()->CompileError_string(),
- compile_error, attributes);
+ compile_error, DONT_ENUM);
Handle<JSFunction> link_error(
isolate->native_context()->wasm_link_error_function(), isolate);
JSObject::AddProperty(isolate, webassembly,
isolate->factory()->LinkError_string(), link_error,
- attributes);
+ DONT_ENUM);
Handle<JSFunction> runtime_error(
isolate->native_context()->wasm_runtime_error_function(), isolate);
JSObject::AddProperty(isolate, webassembly,
isolate->factory()->RuntimeError_string(),
- runtime_error, attributes);
+ runtime_error, DONT_ENUM);
}
#undef ASSIGN
diff --git a/chromium/v8/src/wasm/wasm-linkage.h b/chromium/v8/src/wasm/wasm-linkage.h
index 6365496a6cd..1761a4cea0c 100644
--- a/chromium/v8/src/wasm/wasm-linkage.h
+++ b/chromium/v8/src/wasm/wasm-linkage.h
@@ -205,7 +205,7 @@ class LinkageAllocator {
// Stackslots are counted upwards starting from 0 (or the offset set by
// {SetStackOffset}.
int NumStackSlots(MachineRepresentation type) {
- return std::max(1, ElementSizeInBytes(type) / kPointerSize);
+ return std::max(1, ElementSizeInBytes(type) / kSystemPointerSize);
}
// Stackslots are counted upwards starting from 0 (or the offset set by
diff --git a/chromium/v8/src/wasm/wasm-memory.cc b/chromium/v8/src/wasm/wasm-memory.cc
index df3116ebb8e..b4aee28d78b 100644
--- a/chromium/v8/src/wasm/wasm-memory.cc
+++ b/chromium/v8/src/wasm/wasm-memory.cc
@@ -154,6 +154,20 @@ WasmMemoryTracker::~WasmMemoryTracker() {
DCHECK_EQ(allocated_address_space_, 0u);
}
+void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
+ Heap* heap, size_t size, void** allocation_base,
+ size_t* allocation_length) {
+ return TryAllocateBackingStore(this, heap, size, allocation_base,
+ allocation_length);
+}
+
+void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
+ void* buffer_start) {
+ ReleaseAllocation(nullptr, buffer_start);
+ CHECK(FreePages(GetPlatformPageAllocator(),
+ reinterpret_cast<void*>(memory.begin()), memory.size()));
+}
+
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
ReservationLimit limit) {
size_t reservation_limit =
@@ -271,7 +285,7 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
constexpr bool is_wasm_memory = true;
JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
shared, is_wasm_memory);
- buffer->set_is_neuterable(false);
+ buffer->set_is_detachable(false);
buffer->set_is_growable(true);
return buffer;
}
@@ -309,17 +323,17 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory) {
if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
- DCHECK(!buffer->is_neuterable());
+ DCHECK(!buffer->is_detachable());
const bool is_external = buffer->is_external();
- DCHECK(!buffer->is_neuterable());
+ DCHECK(!buffer->is_detachable());
if (!is_external) {
buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*buffer);
if (free_memory) {
- // We need to free the memory before neutering the buffer because
+ // We need to free the memory before detaching the buffer because
// FreeBackingStore reads buffer->allocation_base(), which is nulled out
- // by Neuter. This means there is a dangling pointer until we neuter the
+ // by Detach. This means there is a dangling pointer until we detach the
// buffer. Since there is no way for the user to directly call
// FreeBackingStore, we can ensure this is safe.
buffer->FreeBackingStoreFromMainThread();
@@ -328,8 +342,8 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
DCHECK(buffer->is_external());
buffer->set_is_wasm_memory(false);
- buffer->set_is_neuterable(true);
- buffer->Neuter();
+ buffer->set_is_detachable(true);
+ buffer->Detach();
}
} // namespace wasm
diff --git a/chromium/v8/src/wasm/wasm-memory.h b/chromium/v8/src/wasm/wasm-memory.h
index 5a919fe71c5..5fb4554cc2d 100644
--- a/chromium/v8/src/wasm/wasm-memory.h
+++ b/chromium/v8/src/wasm/wasm-memory.h
@@ -66,6 +66,18 @@ class WasmMemoryTracker {
friend WasmMemoryTracker;
};
+ // Allow tests to allocate a backing store the same way as we do it for
+ // WebAssembly memory. This is used in unit tests for trap handler to
+ // generate the same signals/exceptions for invalid memory accesses as
+ // we would get with WebAssembly memory.
+ V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting(
+ Heap* heap, size_t size, void** allocation_base,
+ size_t* allocation_length);
+
+ // Free memory allocated with TryAllocateBackingStoreForTesting.
+ V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory,
+ void* buffer_start);
+
// Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes);
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index eccd897737d..3502a03272c 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -260,7 +260,8 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
constexpr bool is_external = false;
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
memcpy(memory, wire_bytes.start() + section.payload.offset(),
@@ -320,7 +321,7 @@ size_t EstimateStoredSize(const WasmModule* module) {
VectorSize(module->functions) + VectorSize(module->data_segments) +
VectorSize(module->tables) + VectorSize(module->import_table) +
VectorSize(module->export_table) + VectorSize(module->exceptions) +
- VectorSize(module->table_inits);
+ VectorSize(module->elem_segments);
}
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index 8514a321450..75f6e98ca52 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -109,16 +109,16 @@ struct WasmTable {
bool exported = false; // true if exported.
};
-// Static representation of how to initialize a table.
-struct WasmTableInit {
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmTableInit);
+// Static representation of wasm element segment (table initializer).
+struct WasmElemSegment {
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
// Construct an active segment.
- WasmTableInit(uint32_t table_index, WasmInitExpr offset)
+ WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
: table_index(table_index), offset(offset), active(true) {}
// Construct a passive segment, which has no table index or offset.
- WasmTableInit() : table_index(0), active(false) {}
+ WasmElemSegment() : table_index(0), active(false) {}
uint32_t table_index;
WasmInitExpr offset;
@@ -165,11 +165,13 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmGlobal> globals;
// Size of the buffer required for all globals that are not imported and
// mutable.
- uint32_t globals_buffer_size = 0;
+ uint32_t untagged_globals_buffer_size = 0;
+ uint32_t tagged_globals_buffer_size = 0;
uint32_t num_imported_mutable_globals = 0;
uint32_t num_imported_functions = 0;
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
+ uint32_t num_declared_data_segments = 0; // From the DataCount section.
WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
@@ -179,7 +181,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmImport> import_table;
std::vector<WasmExport> export_table;
std::vector<WasmException> exceptions;
- std::vector<WasmTableInit> table_inits;
+ std::vector<WasmElemSegment> elem_segments;
SignatureMap signature_map; // canonicalizing map for signature indexes.
ModuleOrigin origin = kWasmOrigin; // origin of the module
@@ -201,7 +203,7 @@ size_t EstimateStoredSize(const WasmModule* module);
// on module_bytes, as this storage is only guaranteed to be alive as long as
// this struct is alive.
struct V8_EXPORT_PRIVATE ModuleWireBytes {
- ModuleWireBytes(Vector<const byte> module_bytes)
+ explicit ModuleWireBytes(Vector<const byte> module_bytes)
: module_bytes_(module_bytes) {}
ModuleWireBytes(const byte* start, const byte* end)
: module_bytes_(start, static_cast<int>(end - start)) {
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index 7f642f523df..9adcc94f12e 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -9,8 +9,11 @@
#include "src/contexts-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/oddball-inl.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
@@ -21,8 +24,22 @@
namespace v8 {
namespace internal {
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmDebugInfo, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(WasmTableObject, JSObject)
+OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
+
+NEVER_READ_ONLY_SPACE_IMPL(WasmDebugInfo)
+
CAST_ACCESSOR(WasmDebugInfo)
CAST_ACCESSOR(WasmExceptionObject)
+CAST_ACCESSOR(WasmExceptionTag)
CAST_ACCESSOR(WasmExportedFunctionData)
CAST_ACCESSOR(WasmGlobalObject)
CAST_ACCESSOR(WasmInstanceObject)
@@ -33,17 +50,10 @@ CAST_ACCESSOR(AsmWasmData)
#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
bool holder::has_##name() { \
- return !READ_FIELD(this, offset)->IsUndefined(); \
+ return !READ_FIELD(*this, offset)->IsUndefined(); \
} \
ACCESSORS(holder, name, type, offset)
-// TODO(3770): Replacement for the above, temporarily separate.
-#define OPTIONAL_ACCESSORS2(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(this, offset)->IsUndefined(); \
- } \
- ACCESSORS2(holder, name, type, offset)
-
#define READ_PRIMITIVE_FIELD(p, type, offset) \
(*reinterpret_cast<type const*>(FIELD_ADDR(p, offset)))
@@ -61,17 +71,21 @@ CAST_ACCESSOR(AsmWasmData)
// WasmModuleObject
ACCESSORS(WasmModuleObject, managed_native_module, Managed<wasm::NativeModule>,
kNativeModuleOffset)
-ACCESSORS2(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset)
+ACCESSORS(WasmModuleObject, export_wrappers, FixedArray, kExportWrappersOffset)
ACCESSORS(WasmModuleObject, script, Script, kScriptOffset)
ACCESSORS(WasmModuleObject, weak_instance_list, WeakArrayList,
kWeakInstanceListOffset)
-OPTIONAL_ACCESSORS2(WasmModuleObject, asm_js_offset_table, ByteArray,
- kAsmJsOffsetTableOffset)
-OPTIONAL_ACCESSORS2(WasmModuleObject, breakpoint_infos, FixedArray,
- kBreakPointInfosOffset)
+OPTIONAL_ACCESSORS(WasmModuleObject, asm_js_offset_table, ByteArray,
+ kAsmJsOffsetTableOffset)
+OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
+ kBreakPointInfosOffset)
wasm::NativeModule* WasmModuleObject::native_module() const {
return managed_native_module()->raw();
}
+std::shared_ptr<wasm::NativeModule> WasmModuleObject::shared_native_module()
+ const {
+ return managed_native_module()->get();
+}
const wasm::WasmModule* WasmModuleObject::module() const {
// TODO(clemensh): Remove this helper (inline in callers).
return native_module()->module();
@@ -88,9 +102,9 @@ bool WasmModuleObject::is_asm_js() {
}
// WasmTableObject
-ACCESSORS2(WasmTableObject, functions, FixedArray, kFunctionsOffset)
+ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
-ACCESSORS2(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
+ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
// WasmMemoryObject
ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
@@ -98,7 +112,9 @@ SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakArrayList, kInstancesOffset)
// WasmGlobalObject
-ACCESSORS(WasmGlobalObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
+ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
+ kUntaggedBufferOffset)
+ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
SMI_ACCESSORS(WasmGlobalObject, flags, kFlagsOffset)
BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, type, WasmGlobalObject::TypeBits)
@@ -110,8 +126,9 @@ int WasmGlobalObject::type_size() const {
}
Address WasmGlobalObject::address() const {
- DCHECK_LE(offset() + type_size(), array_buffer()->byte_length());
- return Address(array_buffer()->backing_store()) + offset();
+ DCHECK_NE(type(), wasm::kWasmAnyRef);
+ DCHECK_LE(offset() + type_size(), untagged_buffer()->byte_length());
+ return Address(untagged_buffer()->backing_store()) + offset();
}
int32_t WasmGlobalObject::GetI32() {
@@ -130,6 +147,11 @@ double WasmGlobalObject::GetF64() {
return ReadLittleEndianValue<double>(address());
}
+Handle<Object> WasmGlobalObject::GetAnyRef() {
+ DCHECK_EQ(type(), wasm::kWasmAnyRef);
+ return handle(tagged_buffer()->get(offset()), GetIsolate());
+}
+
void WasmGlobalObject::SetI32(int32_t value) {
WriteLittleEndianValue<int32_t>(address(), value);
}
@@ -146,6 +168,11 @@ void WasmGlobalObject::SetF64(double value) {
WriteLittleEndianValue<double>(address(), value);
}
+void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
+ DCHECK_EQ(type(), wasm::kWasmAnyRef);
+ tagged_buffer()->set(offset(), *value);
+}
+
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
@@ -170,37 +197,55 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address,
kJumpTableStartOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_starts, Address*,
+ kDataSegmentStartsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*,
+ kDataSegmentSizesOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_data_segments, byte*,
+ kDroppedDataSegmentsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
+ kDroppedElemSegmentsOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
-ACCESSORS2(WasmInstanceObject, native_context, Context, kNativeContextOffset)
+ACCESSORS(WasmInstanceObject, native_context, Context, kNativeContextOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
kMemoryObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
- kGlobalsBufferOffset)
-OPTIONAL_ACCESSORS2(WasmInstanceObject, imported_mutable_globals_buffers,
- FixedArray, kImportedMutableGlobalsBuffersOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, untagged_globals_buffer, JSArrayBuffer,
+ kUntaggedGlobalsBufferOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, tagged_globals_buffer, FixedArray,
+ kTaggedGlobalsBufferOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
+ FixedArray, kImportedMutableGlobalsBuffersOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
-ACCESSORS2(WasmInstanceObject, imported_function_refs, FixedArray,
- kImportedFunctionRefsOffset)
-OPTIONAL_ACCESSORS2(WasmInstanceObject, indirect_function_table_refs,
- FixedArray, kIndirectFunctionTableRefsOffset)
+ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
+ kImportedFunctionRefsOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
+ kIndirectFunctionTableRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
-OPTIONAL_ACCESSORS2(WasmInstanceObject, exceptions_table, FixedArray,
- kExceptionsTableOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
+ kExceptionsTableOffset)
ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
-ACCESSORS2(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
+ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
inline bool WasmInstanceObject::has_indirect_function_table() {
return indirect_function_table_sig_ids() != nullptr;
}
+void WasmInstanceObject::clear_padding() {
+ if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
+ DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
+ FIELD_SIZE(kOptionalPaddingOffset));
+ }
+}
+
IndirectFunctionTableEntry::IndirectFunctionTableEntry(
Handle<WasmInstanceObject> instance, int index)
: instance_(instance), index_(index) {
@@ -216,12 +261,18 @@ ImportedFunctionEntry::ImportedFunctionEntry(
}
// WasmExceptionObject
-ACCESSORS2(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
- kSerializedSignatureOffset)
+ACCESSORS(WasmExceptionObject, serialized_signature, PodArray<wasm::ValueType>,
+ kSerializedSignatureOffset)
ACCESSORS(WasmExceptionObject, exception_tag, HeapObject, kExceptionTagOffset)
+// WasmExportedFunction
+WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
+ SLOW_DCHECK(IsWasmExportedFunction(*this));
+}
+CAST_ACCESSOR(WasmExportedFunction)
+
// WasmExportedFunctionData
-ACCESSORS2(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
+ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
kInstanceOffset)
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
@@ -231,11 +282,11 @@ SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
-ACCESSORS2(WasmDebugInfo, interpreted_functions, FixedArray,
- kInterpretedFunctionsOffset)
-OPTIONAL_ACCESSORS2(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
-OPTIONAL_ACCESSORS2(WasmDebugInfo, c_wasm_entries, FixedArray,
- kCWasmEntriesOffset)
+ACCESSORS(WasmDebugInfo, interpreted_functions, FixedArray,
+ kInterpretedFunctionsOffset)
+OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
+OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
+ kCWasmEntriesOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
kCWasmEntryMapOffset)
@@ -248,11 +299,14 @@ uint32_t WasmTableObject::current_length() { return functions()->length(); }
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
+// WasmExceptionTag
+SMI_ACCESSORS(WasmExceptionTag, index, kIndexOffset)
+
// AsmWasmData
ACCESSORS(AsmWasmData, managed_native_module, Managed<wasm::NativeModule>,
kManagedNativeModuleOffset)
-ACCESSORS2(AsmWasmData, export_wrappers, FixedArray, kExportWrappersOffset)
-ACCESSORS2(AsmWasmData, asm_js_offset_table, ByteArray, kAsmJsOffsetTableOffset)
+ACCESSORS(AsmWasmData, export_wrappers, FixedArray, kExportWrappersOffset)
+ACCESSORS(AsmWasmData, asm_js_offset_table, ByteArray, kAsmJsOffsetTableOffset)
ACCESSORS(AsmWasmData, uses_bitset, HeapNumber, kUsesBitsetOffset)
#include "src/objects/object-macros-undef.h"
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index 6a0be1d81d2..392ddd4ca83 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -14,6 +14,7 @@
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
@@ -65,13 +66,24 @@ class WasmInstanceNativeAllocations {
// Allocates initial native storage for a given instance.
WasmInstanceNativeAllocations(Handle<WasmInstanceObject> instance,
size_t num_imported_functions,
- size_t num_imported_mutable_globals) {
+ size_t num_imported_mutable_globals,
+ size_t num_data_segments,
+ size_t num_elem_segments) {
SET(instance, imported_function_targets,
reinterpret_cast<Address*>(
calloc(num_imported_functions, sizeof(Address))));
SET(instance, imported_mutable_globals,
reinterpret_cast<Address*>(
calloc(num_imported_mutable_globals, sizeof(Address))));
+ SET(instance, data_segment_starts,
+ reinterpret_cast<Address*>(calloc(num_data_segments, sizeof(Address))));
+ SET(instance, data_segment_sizes,
+ reinterpret_cast<uint32_t*>(
+ calloc(num_data_segments, sizeof(uint32_t))));
+ SET(instance, dropped_data_segments,
+ reinterpret_cast<uint8_t*>(calloc(num_data_segments, sizeof(uint8_t))));
+ SET(instance, dropped_elem_segments,
+ reinterpret_cast<uint8_t*>(calloc(num_elem_segments, sizeof(uint8_t))));
}
~WasmInstanceNativeAllocations() {
::free(indirect_function_table_sig_ids_);
@@ -82,6 +94,14 @@ class WasmInstanceNativeAllocations {
imported_function_targets_ = nullptr;
::free(imported_mutable_globals_);
imported_mutable_globals_ = nullptr;
+ ::free(data_segment_starts_);
+ data_segment_starts_ = nullptr;
+ ::free(data_segment_sizes_);
+ data_segment_sizes_ = nullptr;
+ ::free(dropped_data_segments_);
+ dropped_data_segments_ = nullptr;
+ ::free(dropped_elem_segments_);
+ dropped_elem_segments_ = nullptr;
}
// Resizes the indirect function table.
void resize_indirect_function_table(Isolate* isolate,
@@ -123,22 +143,29 @@ class WasmInstanceNativeAllocations {
Address* indirect_function_table_targets_ = nullptr;
Address* imported_function_targets_ = nullptr;
Address* imported_mutable_globals_ = nullptr;
+ Address* data_segment_starts_ = nullptr;
+ uint32_t* data_segment_sizes_ = nullptr;
+ uint8_t* dropped_data_segments_ = nullptr;
+ uint8_t* dropped_elem_segments_ = nullptr;
#undef SET
};
size_t EstimateNativeAllocationsSize(const WasmModule* module) {
- size_t estimate = sizeof(WasmInstanceNativeAllocations) +
- (1 * kPointerSize * module->num_imported_mutable_globals) +
- (2 * kPointerSize * module->num_imported_functions);
+ size_t estimate =
+ sizeof(WasmInstanceNativeAllocations) +
+ (1 * kSystemPointerSize * module->num_imported_mutable_globals) +
+ (2 * kSystemPointerSize * module->num_imported_functions) +
+ ((kSystemPointerSize + sizeof(uint32_t) + sizeof(uint8_t)) *
+ module->num_declared_data_segments);
for (auto& table : module->tables) {
- estimate += 3 * kPointerSize * table.initial_size;
+ estimate += 3 * kSystemPointerSize * table.initial_size;
}
return estimate;
}
WasmInstanceNativeAllocations* GetNativeAllocations(
- WasmInstanceObject* instance) {
- return reinterpret_cast<Managed<WasmInstanceNativeAllocations>*>(
+ WasmInstanceObject instance) {
+ return Managed<WasmInstanceNativeAllocations>::cast(
instance->managed_native_allocations())
->raw();
}
@@ -278,7 +305,7 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
namespace {
-int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
+int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
}
@@ -294,7 +321,7 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
int right = breakpoint_infos->length(); // exclusive
while (right - left > 1) {
int mid = left + (right - left) / 2;
- Object* mid_obj = breakpoint_infos->get(mid);
+ Object mid_obj = breakpoint_infos->get(mid);
if (GetBreakpointPos(isolate, mid_obj) <= position) {
left = mid;
} else {
@@ -349,7 +376,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
// Move elements [insert_pos, ...] up by one.
for (int i = breakpoint_infos->length() - 1; i >= insert_pos; --i) {
- Object* entry = breakpoint_infos->get(i);
+ Object entry = breakpoint_infos->get(i);
if (entry->IsUndefined(isolate)) continue;
new_breakpoint_infos->set(i + 1, entry);
}
@@ -521,7 +548,7 @@ v8::debug::WasmDisassembly WasmModuleObject::DisassembleFunction(
static_cast<uint32_t>(func_index) >= module()->functions.size())
return {};
- Vector<const byte> wire_bytes = native_module()->wire_bytes();
+ wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
std::ostringstream disassembly_os;
v8::debug::WasmDisassembly::OffsetTable offset_table;
@@ -748,7 +775,7 @@ bool WasmModuleObject::GetPositionInfo(uint32_t position,
}
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
- int64_t maximum,
+ uint32_t maximum,
Handle<FixedArray>* js_functions) {
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor(), isolate);
@@ -756,13 +783,12 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
isolate->factory()->NewJSObject(table_ctor));
*js_functions = isolate->factory()->NewFixedArray(initial);
- Object* null = ReadOnlyRoots(isolate).null_value();
+ Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
(*js_functions)->set(i, null);
}
table_obj->set_functions(**js_functions);
- DCHECK_EQ(maximum, static_cast<int>(maximum));
- Handle<Object> max = isolate->factory()->NewNumber(maximum);
+ Handle<Object> max = isolate->factory()->NewNumberFromUint(maximum);
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -818,7 +844,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
}
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t table_index, Handle<JSFunction> function) {
+ uint32_t table_index, Handle<JSFunction> function) {
Handle<FixedArray> array(table->functions(), isolate);
if (function.is_null()) {
ClearDispatchTables(isolate, table, table_index); // Degenerate case.
@@ -826,8 +852,6 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
return;
}
- // TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
- DCHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
auto exported_function = Handle<WasmExportedFunction>::cast(function);
Handle<WasmInstanceObject> target_instance(exported_function->instance(),
isolate);
@@ -883,23 +907,10 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
namespace {
MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
- uint32_t pages,
- uint32_t maximum_pages) {
- CHECK_GE(wasm::max_mem_pages(), maximum_pages);
- if (!old_buffer->is_growable()) return {};
- void* old_mem_start = old_buffer->backing_store();
+ size_t new_size) {
+ CHECK_EQ(0, new_size % wasm::kWasmPageSize);
size_t old_size = old_buffer->byte_length();
- CHECK_EQ(0, old_size % wasm::kWasmPageSize);
- size_t old_pages = old_size / wasm::kWasmPageSize;
- CHECK_GE(wasm::max_mem_pages(), old_pages);
-
- if ((pages > maximum_pages - old_pages) || // exceeds remaining
- (pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
- return {};
- }
- size_t new_size =
- static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
-
+ void* old_mem_start = old_buffer->backing_store();
// Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
@@ -914,8 +925,9 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
new_size, PageAllocator::kReadWrite)) {
return {};
}
+ DCHECK_GE(new_size, old_size);
reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
+ ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
}
// NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size.
@@ -976,7 +988,7 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<WasmMemoryObject> WasmMemoryObject::New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
- int32_t maximum) {
+ uint32_t maximum) {
// TODO(kschimpf): Do we need to add an argument that defines the
// style of memory the user prefers (with/without trap handling), so
// that the memory will match the style of the compiled wasm module.
@@ -997,11 +1009,6 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
return memory_obj;
}
-uint32_t WasmMemoryObject::current_pages() {
- return static_cast<uint32_t>(array_buffer()->byte_length() /
- wasm::kWasmPageSize);
-}
-
bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
const wasm::WasmMemoryTracker::AllocationData* allocation =
isolate->wasm_engine()->memory_tracker()->FindAllocationData(
@@ -1041,13 +1048,6 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
SetInstanceMemory(instance, buffer);
}
-void WasmMemoryObject::RemoveInstance(Handle<WasmMemoryObject> memory,
- Handle<WasmInstanceObject> instance) {
- if (memory->has_instances()) {
- memory->instances()->RemoveOne(MaybeObjectHandle::Weak(instance));
- }
-}
-
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
@@ -1055,25 +1055,37 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
if (!old_buffer->is_growable()) return -1;
- size_t old_size = old_buffer->byte_length();
- DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
- Handle<JSArrayBuffer> new_buffer;
+ // Checks for maximum memory size, compute new size.
uint32_t maximum_pages = wasm::max_mem_pages();
if (memory_object->has_maximum_pages()) {
maximum_pages = std::min(
maximum_pages, static_cast<uint32_t>(memory_object->maximum_pages()));
}
- if (!MemoryGrowBuffer(isolate, old_buffer, pages, maximum_pages)
- .ToHandle(&new_buffer)) {
+ CHECK_GE(wasm::max_mem_pages(), maximum_pages);
+ size_t old_size = old_buffer->byte_length();
+ CHECK_EQ(0, old_size % wasm::kWasmPageSize);
+ size_t old_pages = old_size / wasm::kWasmPageSize;
+ CHECK_GE(wasm::max_mem_pages(), old_pages);
+ if ((pages > maximum_pages - old_pages) || // exceeds remaining
+ (pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
+ return -1;
+ }
+ size_t new_size =
+ static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+
+ // Grow the buffer.
+ Handle<JSArrayBuffer> new_buffer;
+ if (!MemoryGrowBuffer(isolate, old_buffer, new_size).ToHandle(&new_buffer)) {
return -1;
}
+ // Update instances if any.
if (memory_object->has_instances()) {
Handle<WeakArrayList> instances(memory_object->instances(), isolate);
for (int i = 0; i < instances->length(); i++) {
MaybeObject elem = instances->Get(i);
- HeapObject* heap_object;
+ HeapObject heap_object;
if (elem->GetHeapObjectIfWeak(&heap_object)) {
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(heap_object), isolate);
@@ -1089,32 +1101,44 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// static
MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
- wasm::ValueType type, int32_t offset, bool is_mutable) {
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_untagged_buffer,
+ MaybeHandle<FixedArray> maybe_tagged_buffer, wasm::ValueType type,
+ int32_t offset, bool is_mutable) {
Handle<JSFunction> global_ctor(
isolate->native_context()->wasm_global_constructor(), isolate);
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
- uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
-
- Handle<JSArrayBuffer> buffer;
- if (!maybe_buffer.ToHandle(&buffer)) {
- // If no buffer was provided, create one long enough for the given type.
- buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
-
- const bool initialize = true;
- if (!JSArrayBuffer::SetupAllocatingData(buffer, isolate, type_size,
- initialize)) {
- return {};
+ if (type == wasm::kWasmAnyRef) {
+ DCHECK(maybe_untagged_buffer.is_null());
+ Handle<FixedArray> tagged_buffer;
+ if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
+ // If no buffer was provided, create one.
+ tagged_buffer = isolate->factory()->NewFixedArray(1, TENURED);
+ CHECK_EQ(offset, 0);
+ }
+ global_obj->set_tagged_buffer(*tagged_buffer);
+ } else {
+ DCHECK(maybe_tagged_buffer.is_null());
+ Handle<JSArrayBuffer> untagged_buffer;
+ uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
+ if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
+ // If no buffer was provided, create one long enough for the given type.
+ untagged_buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+
+ const bool initialize = true;
+ if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate,
+ type_size, initialize)) {
+ return {};
+ }
}
- }
- // Check that the offset is in bounds.
- CHECK_LE(offset + type_size, buffer->byte_length());
+ // Check that the offset is in bounds.
+ CHECK_LE(offset + type_size, untagged_buffer->byte_length());
- global_obj->set_array_buffer(*buffer);
+ global_obj->set_untagged_buffer(*untagged_buffer);
+ }
global_obj->set_flags(0);
global_obj->set_type(type);
global_obj->set_offset(offset);
@@ -1136,9 +1160,10 @@ void IndirectFunctionTableEntry::Set(int sig_id,
TRACE_IFT(
"IFT entry %p[%d] = {sig_id=%d, target_instance=%p, "
"target_func_index=%d}\n",
- *instance_, index_, sig_id, *target_instance, target_func_index);
+ reinterpret_cast<void*>(instance_->ptr()), index_, sig_id,
+ reinterpret_cast<void*>(target_instance->ptr()), target_func_index);
- Object* ref = nullptr;
+ Object ref;
Address call_target = 0;
if (target_func_index <
static_cast<int>(target_instance->module()->num_imported_functions)) {
@@ -1159,7 +1184,7 @@ void IndirectFunctionTableEntry::Set(int sig_id,
instance_->indirect_function_table_refs()->set(index_, ref);
}
-Object* IndirectFunctionTableEntry::object_ref() {
+Object IndirectFunctionTableEntry::object_ref() {
return instance_->indirect_function_table_refs()->get(index_);
}
@@ -1171,11 +1196,23 @@ Address IndirectFunctionTableEntry::target() {
return instance_->indirect_function_table_targets()[index_];
}
+void IndirectFunctionTableEntry::CopyFrom(
+ const IndirectFunctionTableEntry& that) {
+ instance_->indirect_function_table_sig_ids()[index_] =
+ that.instance_->indirect_function_table_sig_ids()[that.index_];
+ instance_->indirect_function_table_targets()[index_] =
+ that.instance_->indirect_function_table_targets()[that.index_];
+ instance_->indirect_function_table_refs()->set(
+ index_, that.instance_->indirect_function_table_refs()->get(that.index_));
+}
+
void ImportedFunctionEntry::SetWasmToJs(
Isolate* isolate, Handle<JSReceiver> callable,
const wasm::WasmCode* wasm_to_js_wrapper) {
- TRACE_IFT("Import callable %p[%d] = {callable=%p, target=%p}\n", *instance_,
- index_, *callable, wasm_to_js_wrapper->instructions().start());
+ TRACE_IFT("Import callable %p[%d] = {callable=%p, target=%p}\n",
+ reinterpret_cast<void*>(instance_->ptr()), index_,
+ reinterpret_cast<void*>(callable->ptr()),
+ wasm_to_js_wrapper->instructions().start());
DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_to_js_wrapper->kind());
Handle<Tuple2> tuple =
isolate->factory()->NewTuple2(instance_, callable, TENURED);
@@ -1184,30 +1221,31 @@ void ImportedFunctionEntry::SetWasmToJs(
wasm_to_js_wrapper->instruction_start();
}
-void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject* instance,
+void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance,
Address call_target) {
TRACE_IFT("Import WASM %p[%d] = {instance=%p, target=%" PRIuPTR "}\n",
- *instance_, index_, instance, call_target);
+ reinterpret_cast<void*>(instance_->ptr()), index_,
+ reinterpret_cast<void*>(instance->ptr()), call_target);
instance_->imported_function_refs()->set(index_, instance);
instance_->imported_function_targets()[index_] = call_target;
}
-WasmInstanceObject* ImportedFunctionEntry::instance() {
+WasmInstanceObject ImportedFunctionEntry::instance() {
// The imported reference entry is either a target instance or a tuple
// of this instance and the target callable.
- Object* value = instance_->imported_function_refs()->get(index_);
+ Object value = instance_->imported_function_refs()->get(index_);
if (value->IsWasmInstanceObject()) {
return WasmInstanceObject::cast(value);
}
- Tuple2* tuple = Tuple2::cast(value);
+ Tuple2 tuple = Tuple2::cast(value);
return WasmInstanceObject::cast(tuple->value1());
}
-JSReceiver* ImportedFunctionEntry::callable() {
+JSReceiver ImportedFunctionEntry::callable() {
return JSReceiver::cast(Tuple2::cast(object_ref())->value2());
}
-Object* ImportedFunctionEntry::object_ref() {
+Object ImportedFunctionEntry::object_ref() {
return instance_->imported_function_refs()->get(index_);
}
@@ -1271,16 +1309,19 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->factory()->NewJSObject(instance_cons, TENURED);
Handle<WasmInstanceObject> instance(
- reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
+ WasmInstanceObject::cast(*instance_object), isolate);
+ instance->clear_padding();
// Initialize the imported function arrays.
auto module = module_object->module();
auto num_imported_functions = module->num_imported_functions;
auto num_imported_mutable_globals = module->num_imported_mutable_globals;
+ auto num_data_segments = module->num_declared_data_segments;
size_t native_allocations_size = EstimateNativeAllocationsSize(module);
auto native_allocations = Managed<WasmInstanceNativeAllocations>::Allocate(
isolate, native_allocations_size, instance, num_imported_functions,
- num_imported_mutable_globals);
+ num_imported_mutable_globals, num_data_segments,
+ module->elem_segments.size());
instance->set_managed_native_allocations(*native_allocations);
Handle<FixedArray> imported_function_refs =
@@ -1315,9 +1356,54 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate, weak_instance_list, MaybeObjectHandle::Weak(instance));
module_object->set_weak_instance_list(*weak_instance_list);
+ InitDataSegmentArrays(instance, module_object);
+ InitElemSegmentArrays(instance, module_object);
+
return instance;
}
+// static
+void WasmInstanceObject::InitDataSegmentArrays(
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmModuleObject> module_object) {
+ auto module = module_object->module();
+ auto wire_bytes = module_object->native_module()->wire_bytes();
+ auto num_data_segments = module->num_declared_data_segments;
+ // The number of declared data segments will be zero if there is no DataCount
+ // section. These arrays will not be allocated nor initialized in that case,
+ // since they cannot be used (since the validator checks that number of
+ // declared data segments when validating the memory.init and memory.drop
+ // instructions).
+ DCHECK(num_data_segments == 0 ||
+ num_data_segments == module->data_segments.size());
+ for (size_t i = 0; i < num_data_segments; ++i) {
+ const wasm::WasmDataSegment& segment = module->data_segments[i];
+ // Set the active segments to being already dropped, since memory.init on
+ // a dropped passive segment and an active segment have the same
+ // behavior.
+ instance->dropped_data_segments()[i] = segment.active ? 1 : 0;
+
+ // Initialize the pointer and size of passive segments.
+ instance->data_segment_starts()[i] =
+ reinterpret_cast<Address>(&wire_bytes[segment.source.offset()]);
+ instance->data_segment_sizes()[i] = segment.source.length();
+ }
+}
+
+void WasmInstanceObject::InitElemSegmentArrays(
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmModuleObject> module_object) {
+ auto module = module_object->module();
+ auto num_elem_segments = module->elem_segments.size();
+ for (size_t i = 0; i < num_elem_segments; ++i) {
+ const wasm::WasmElemSegment& segment = module->elem_segments[i];
+ // Set the active segments to being already dropped, since table.init on
+ // a dropped passive segment and an active segment have the same
+ // behavior.
+ instance->dropped_elem_segments()[i] = segment.active ? 1 : 0;
+ }
+}
+
Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
wasm::NativeModule* native_module = module_object()->native_module();
if (func_index < native_module->num_imported_functions()) {
@@ -1326,6 +1412,71 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
return native_module->GetCallTargetForFunction(func_index);
}
+namespace {
+void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
+ uint32_t src, uint32_t count) {
+ DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
+ if (src < dst) {
+ for (uint32_t i = count; i > 0; i--) {
+ auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
+ auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
+ to_entry.CopyFrom(from_entry);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; i++) {
+ auto to_entry = IndirectFunctionTableEntry(instance, dst + i);
+ auto from_entry = IndirectFunctionTableEntry(instance, src + i);
+ to_entry.CopyFrom(from_entry);
+ }
+ }
+}
+} // namespace
+
+// static
+bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t dst,
+ uint32_t src, uint32_t count) {
+ CHECK_EQ(0, table_index); // TODO(titzer): multiple tables in TableCopy
+ if (count == 0) return true; // no-op
+ auto max = instance->indirect_function_table_size();
+ if (!IsInBounds(dst, count, max)) return false;
+ if (!IsInBounds(src, count, max)) return false;
+ if (dst == src) return true; // no-op
+
+ if (!instance->has_table_object()) {
+ // No table object, only need to update this instance.
+ CopyTableEntriesImpl(instance, dst, src, count);
+ return true;
+ }
+
+ Handle<WasmTableObject> table =
+ Handle<WasmTableObject>(instance->table_object(), isolate);
+ // Broadcast table copy operation to all instances that import this table.
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ Handle<WasmInstanceObject> target_instance(
+ WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset)),
+ isolate);
+ CopyTableEntriesImpl(target_instance, dst, src, count);
+ }
+
+ // Copy the function entries.
+ Handle<FixedArray> functions(table->functions(), isolate);
+ if (src < dst) {
+ for (uint32_t i = count; i > 0; i--) {
+ functions->set(dst + i - 1, functions->get(src + i - 1));
+ }
+ } else {
+ for (uint32_t i = 0; i < count; i++) {
+ functions->set(dst + i, functions->get(src + i));
+ }
+ }
+ return true;
+}
+
// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
@@ -1366,20 +1517,15 @@ bool WasmExceptionObject::IsSignatureEqual(const wasm::FunctionSig* sig) {
return true;
}
-bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
+bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
if (!object->IsJSFunction()) return false;
- JSFunction* js_function = JSFunction::cast(object);
+ JSFunction js_function = JSFunction::cast(object);
if (Code::JS_TO_WASM_FUNCTION != js_function->code()->kind()) return false;
DCHECK(js_function->shared()->HasWasmExportedFunctionData());
return true;
}
-WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
- DCHECK(IsWasmExportedFunction(object));
- return reinterpret_cast<WasmExportedFunction*>(object);
-}
-
-WasmInstanceObject* WasmExportedFunction::instance() {
+WasmInstanceObject WasmExportedFunction::instance() {
return shared()->wasm_exported_function_data()->instance();
}
@@ -1436,6 +1582,13 @@ wasm::FunctionSig* WasmExportedFunction::sig() {
return instance()->module()->functions[function_index()].sig;
}
+Handle<WasmExceptionTag> WasmExceptionTag::New(Isolate* isolate, int index) {
+ Handle<WasmExceptionTag> result = Handle<WasmExceptionTag>::cast(
+ isolate->factory()->NewStruct(WASM_EXCEPTION_TAG_TYPE, TENURED));
+ result->set_index(index);
+ return result;
+}
+
Handle<AsmWasmData> AsmWasmData::New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<FixedArray> export_wrappers, Handle<ByteArray> asm_js_offset_table,
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index 0d93fd51476..84aeb8972d0 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -45,10 +45,6 @@ class Managed;
#define DECL_OPTIONAL_ACCESSORS(name, type) \
V8_INLINE bool has_##name(); \
DECL_ACCESSORS(name, type)
-// TODO(3770): Replacement for the above, temporarily separate.
-#define DECL_OPTIONAL_ACCESSORS2(name, type) \
- V8_INLINE bool has_##name(); \
- DECL_ACCESSORS2(name, type)
// A helper for an entry in an indirect function table (IFT).
// The underlying storage in the instance is used by generated code to
@@ -65,7 +61,9 @@ class IndirectFunctionTableEntry {
void Set(int sig_id, Handle<WasmInstanceObject> target_instance,
int target_func_index);
- Object* object_ref();
+ void CopyFrom(const IndirectFunctionTableEntry& that);
+
+ Object object_ref();
int sig_id();
Address target();
@@ -93,11 +91,11 @@ class ImportedFunctionEntry {
void SetWasmToJs(Isolate*, Handle<JSReceiver> callable,
const wasm::WasmCode* wasm_to_js_wrapper);
// Initialize this entry as a WASM to WASM call.
- void SetWasmToWasm(WasmInstanceObject* target_instance, Address call_target);
+ void SetWasmToWasm(WasmInstanceObject target_instance, Address call_target);
- WasmInstanceObject* instance();
- JSReceiver* callable();
- Object* object_ref();
+ WasmInstanceObject instance();
+ JSReceiver callable();
+ Object object_ref();
Address target();
private:
@@ -111,12 +109,13 @@ class WasmModuleObject : public JSObject {
DECL_CAST(WasmModuleObject)
DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
- DECL_ACCESSORS2(export_wrappers, FixedArray)
+ DECL_ACCESSORS(export_wrappers, FixedArray)
DECL_ACCESSORS(script, Script)
DECL_ACCESSORS(weak_instance_list, WeakArrayList)
- DECL_OPTIONAL_ACCESSORS2(asm_js_offset_table, ByteArray)
- DECL_OPTIONAL_ACCESSORS2(breakpoint_infos, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
+ DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
inline wasm::NativeModule* native_module() const;
+ inline std::shared_ptr<wasm::NativeModule> shared_native_module() const;
inline const wasm::WasmModule* module() const;
inline void reset_breakpoint_infos();
@@ -125,13 +124,13 @@ class WasmModuleObject : public JSObject {
DECL_VERIFIER(WasmModuleObject)
// Layout description.
-#define WASM_MODULE_OBJECT_FIELDS(V) \
- V(kNativeModuleOffset, kPointerSize) \
- V(kExportWrappersOffset, kPointerSize) \
- V(kScriptOffset, kPointerSize) \
- V(kWeakInstanceListOffset, kPointerSize) \
- V(kAsmJsOffsetTableOffset, kPointerSize) \
- V(kBreakPointInfosOffset, kPointerSize) \
+#define WASM_MODULE_OBJECT_FIELDS(V) \
+ V(kNativeModuleOffset, kTaggedSize) \
+ V(kExportWrappersOffset, kTaggedSize) \
+ V(kScriptOffset, kTaggedSize) \
+ V(kWeakInstanceListOffset, kTaggedSize) \
+ V(kAsmJsOffsetTableOffset, kTaggedSize) \
+ V(kBreakPointInfosOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -243,6 +242,8 @@ class WasmModuleObject : public JSObject {
static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
Handle<WasmModuleObject>,
int position);
+
+ OBJECT_CONSTRUCTORS(WasmModuleObject, JSObject)
};
// Representation of a WebAssembly.Table JavaScript-level object.
@@ -250,16 +251,16 @@ class WasmTableObject : public JSObject {
public:
DECL_CAST(WasmTableObject)
- DECL_ACCESSORS2(functions, FixedArray)
+ DECL_ACCESSORS(functions, FixedArray)
// TODO(titzer): introduce DECL_I64_ACCESSORS macro
DECL_ACCESSORS(maximum_length, Object)
- DECL_ACCESSORS2(dispatch_tables, FixedArray)
+ DECL_ACCESSORS(dispatch_tables, FixedArray)
// Layout description.
-#define WASM_TABLE_OBJECT_FIELDS(V) \
- V(kFunctionsOffset, kPointerSize) \
- V(kMaximumLengthOffset, kPointerSize) \
- V(kDispatchTablesOffset, kPointerSize) \
+#define WASM_TABLE_OBJECT_FIELDS(V) \
+ V(kFunctionsOffset, kTaggedSize) \
+ V(kMaximumLengthOffset, kTaggedSize) \
+ V(kDispatchTablesOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, WASM_TABLE_OBJECT_FIELDS)
@@ -269,14 +270,14 @@ class WasmTableObject : public JSObject {
void Grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
- int64_t maximum,
+ uint32_t maximum,
Handle<FixedArray>* js_functions);
static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance,
int table_index);
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t index, Handle<JSFunction> function);
+ uint32_t index, Handle<JSFunction> function);
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
@@ -286,6 +287,8 @@ class WasmTableObject : public JSObject {
static void ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table, int index);
+
+ OBJECT_CONSTRUCTORS(WasmTableObject, JSObject)
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -298,10 +301,10 @@ class WasmMemoryObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
// Layout description.
-#define WASM_MEMORY_OBJECT_FIELDS(V) \
- V(kArrayBufferOffset, kPointerSize) \
- V(kMaximumPagesOffset, kPointerSize) \
- V(kInstancesOffset, kPointerSize) \
+#define WASM_MEMORY_OBJECT_FIELDS(V) \
+ V(kArrayBufferOffset, kTaggedSize) \
+ V(kMaximumPagesOffset, kTaggedSize) \
+ V(kInstancesOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -311,10 +314,6 @@ class WasmMemoryObject : public JSObject {
// Add an instance to the internal (weak) list.
static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
- // Remove an instance from the internal (weak) list.
- static void RemoveInstance(Handle<WasmMemoryObject> memory,
- Handle<WasmInstanceObject> object);
- uint32_t current_pages();
inline bool has_maximum_pages();
// Return whether the underlying backing store has guard regions large enough
@@ -322,9 +321,11 @@ class WasmMemoryObject : public JSObject {
bool has_full_guard_region(Isolate* isolate);
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, int32_t maximum);
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
+
+ OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject)
};
// Representation of a WebAssembly.Global JavaScript-level object.
@@ -332,7 +333,8 @@ class WasmGlobalObject : public JSObject {
public:
DECL_CAST(WasmGlobalObject)
- DECL_ACCESSORS(array_buffer, JSArrayBuffer)
+ DECL_ACCESSORS(untagged_buffer, JSArrayBuffer)
+ DECL_ACCESSORS(tagged_buffer, FixedArray)
DECL_INT32_ACCESSORS(offset)
DECL_INT_ACCESSORS(flags)
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
@@ -347,10 +349,11 @@ class WasmGlobalObject : public JSObject {
#undef WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS
// Layout description.
-#define WASM_GLOBAL_OBJECT_FIELDS(V) \
- V(kArrayBufferOffset, kPointerSize) \
- V(kOffsetOffset, kPointerSize) \
- V(kFlagsOffset, kPointerSize) \
+#define WASM_GLOBAL_OBJECT_FIELDS(V) \
+ V(kUntaggedBufferOffset, kTaggedSize) \
+ V(kTaggedBufferOffset, kTaggedSize) \
+ V(kOffsetOffset, kTaggedSize) \
+ V(kFlagsOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -358,7 +361,8 @@ class WasmGlobalObject : public JSObject {
#undef WASM_GLOBAL_OBJECT_FIELDS
V8_EXPORT_PRIVATE static MaybeHandle<WasmGlobalObject> New(
- Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, wasm::ValueType type,
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_untagged_buffer,
+ MaybeHandle<FixedArray> maybe_tagged_buffer, wasm::ValueType type,
int32_t offset, bool is_mutable);
inline int type_size() const;
@@ -367,17 +371,21 @@ class WasmGlobalObject : public JSObject {
inline int64_t GetI64();
inline float GetF32();
inline double GetF64();
+ inline Handle<Object> GetAnyRef();
inline void SetI32(int32_t value);
inline void SetI64(int64_t value);
inline void SetF32(float value);
inline void SetF64(double value);
+ inline void SetAnyRef(Handle<Object> value);
private:
// This function returns the address of the global's data in the
// JSArrayBuffer. This buffer may be allocated on-heap, in which case it may
// not have a fixed address.
inline Address address() const;
+
+ OBJECT_CONSTRUCTORS(WasmGlobalObject, JSObject)
};
// Representation of a WebAssembly.Instance JavaScript-level object.
@@ -387,19 +395,20 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(module_object, WasmModuleObject)
DECL_ACCESSORS(exports_object, JSObject)
- DECL_ACCESSORS2(native_context, Context)
+ DECL_ACCESSORS(native_context, Context)
DECL_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject)
- DECL_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer)
- DECL_OPTIONAL_ACCESSORS2(imported_mutable_globals_buffers, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(untagged_globals_buffer, JSArrayBuffer)
+ DECL_OPTIONAL_ACCESSORS(tagged_globals_buffer, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
- DECL_ACCESSORS2(imported_function_refs, FixedArray)
- DECL_OPTIONAL_ACCESSORS2(indirect_function_table_refs, FixedArray)
+ DECL_ACCESSORS(imported_function_refs, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
- DECL_OPTIONAL_ACCESSORS2(exceptions_table, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
DECL_ACCESSORS(undefined_value, Oddball)
DECL_ACCESSORS(null_value, Oddball)
- DECL_ACCESSORS2(centry_stub, Code)
+ DECL_ACCESSORS(centry_stub, Code)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
@@ -413,49 +422,68 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
+ DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*)
+ DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(dropped_data_segments, byte*)
+ DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
+
+ V8_INLINE void clear_padding();
// Dispatched behavior.
DECL_PRINTER(WasmInstanceObject)
DECL_VERIFIER(WasmInstanceObject)
// Layout description.
-#define WASM_INSTANCE_OBJECT_FIELDS(V) \
- V(kModuleObjectOffset, kPointerSize) \
- V(kExportsObjectOffset, kPointerSize) \
- V(kNativeContextOffset, kPointerSize) \
- V(kMemoryObjectOffset, kPointerSize) \
- V(kGlobalsBufferOffset, kPointerSize) \
- V(kImportedMutableGlobalsBuffersOffset, kPointerSize) \
- V(kDebugInfoOffset, kPointerSize) \
- V(kTableObjectOffset, kPointerSize) \
- V(kImportedFunctionRefsOffset, kPointerSize) \
- V(kIndirectFunctionTableRefsOffset, kPointerSize) \
- V(kManagedNativeAllocationsOffset, kPointerSize) \
- V(kExceptionsTableOffset, kPointerSize) \
- V(kUndefinedValueOffset, kPointerSize) \
- V(kNullValueOffset, kPointerSize) \
- V(kCEntryStubOffset, kPointerSize) \
- V(kFirstUntaggedOffset, 0) /* marker */ \
- V(kMemoryStartOffset, kPointerSize) /* untagged */ \
- V(kMemorySizeOffset, kSizetSize) /* untagged */ \
- V(kMemoryMaskOffset, kSizetSize) /* untagged */ \
- V(kIsolateRootOffset, kPointerSize) /* untagged */ \
- V(kStackLimitAddressOffset, kPointerSize) /* untagged */ \
- V(kRealStackLimitAddressOffset, kPointerSize) /* untagged */ \
- V(kImportedFunctionTargetsOffset, kPointerSize) /* untagged */ \
- V(kGlobalsStartOffset, kPointerSize) /* untagged */ \
- V(kImportedMutableGlobalsOffset, kPointerSize) /* untagged */ \
- V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \
- V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \
- V(kJumpTableStartOffset, kPointerSize) /* untagged */ \
- V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \
- V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \
+#define WASM_INSTANCE_OBJECT_FIELDS(V) \
+ /* Tagged values. */ \
+ V(kModuleObjectOffset, kTaggedSize) \
+ V(kExportsObjectOffset, kTaggedSize) \
+ V(kNativeContextOffset, kTaggedSize) \
+ V(kMemoryObjectOffset, kTaggedSize) \
+ V(kUntaggedGlobalsBufferOffset, kTaggedSize) \
+ V(kTaggedGlobalsBufferOffset, kTaggedSize) \
+ V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
+ V(kDebugInfoOffset, kTaggedSize) \
+ V(kTableObjectOffset, kTaggedSize) \
+ V(kImportedFunctionRefsOffset, kTaggedSize) \
+ V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
+ V(kManagedNativeAllocationsOffset, kTaggedSize) \
+ V(kExceptionsTableOffset, kTaggedSize) \
+ V(kUndefinedValueOffset, kTaggedSize) \
+ V(kNullValueOffset, kTaggedSize) \
+ V(kCEntryStubOffset, kTaggedSize) \
+ V(kEndOfTaggedFieldsOffset, 0) \
+ /* Raw data. */ \
+ V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
+ /* Optional padding to align system pointer size fields */ \
+ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
+ V(kFirstSystemPointerFieldOffset, 0) \
+ V(kMemoryStartOffset, kSystemPointerSize) \
+ V(kMemorySizeOffset, kSizetSize) \
+ V(kMemoryMaskOffset, kSizetSize) \
+ V(kIsolateRootOffset, kSystemPointerSize) \
+ V(kStackLimitAddressOffset, kSystemPointerSize) \
+ V(kRealStackLimitAddressOffset, kSystemPointerSize) \
+ V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
+ V(kGlobalsStartOffset, kSystemPointerSize) \
+ V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
+ V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
+ V(kJumpTableStartOffset, kSystemPointerSize) \
+ V(kDataSegmentStartsOffset, kSystemPointerSize) \
+ V(kDataSegmentSizesOffset, kSystemPointerSize) \
+ V(kDroppedDataSegmentsOffset, kSystemPointerSize) \
+ V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
+ /* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
WASM_INSTANCE_OBJECT_FIELDS)
#undef WASM_INSTANCE_OBJECT_FIELDS
+ STATIC_ASSERT(IsAligned(kFirstSystemPointerFieldOffset, kSystemPointerSize));
+ STATIC_ASSERT(IsAligned(kSize, kTaggedSize));
+
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
static bool EnsureIndirectFunctionTableWithMinimumSize(
@@ -473,8 +501,22 @@ class WasmInstanceObject : public JSObject {
Address GetCallTarget(uint32_t func_index);
+ // Copies table entries. Returns {false} if the ranges are out-of-bounds.
+ static bool CopyTableEntries(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index, uint32_t dst, uint32_t src,
+ uint32_t count) V8_WARN_UNUSED_RESULT;
+
// Iterates all fields in the object except the untagged fields.
class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject)
+
+ private:
+ static void InitDataSegmentArrays(Handle<WasmInstanceObject>,
+ Handle<WasmModuleObject>);
+ static void InitElemSegmentArrays(Handle<WasmInstanceObject>,
+ Handle<WasmModuleObject>);
};
// Representation of WebAssembly.Exception JavaScript-level object.
@@ -482,13 +524,13 @@ class WasmExceptionObject : public JSObject {
public:
DECL_CAST(WasmExceptionObject)
- DECL_ACCESSORS2(serialized_signature, PodArray<wasm::ValueType>)
+ DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
DECL_ACCESSORS(exception_tag, HeapObject)
// Layout description.
-#define WASM_EXCEPTION_OBJECT_FIELDS(V) \
- V(kSerializedSignatureOffset, kPointerSize) \
- V(kExceptionTagOffset, kPointerSize) \
+#define WASM_EXCEPTION_OBJECT_FIELDS(V) \
+ V(kSerializedSignatureOffset, kTaggedSize) \
+ V(kExceptionTagOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
@@ -502,16 +544,17 @@ class WasmExceptionObject : public JSObject {
static Handle<WasmExceptionObject> New(Isolate* isolate,
const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag);
+
+ OBJECT_CONSTRUCTORS(WasmExceptionObject, JSObject)
};
// A WASM function that is wrapped and exported to JavaScript.
class WasmExportedFunction : public JSFunction {
public:
- WasmInstanceObject* instance();
+ WasmInstanceObject instance();
V8_EXPORT_PRIVATE int function_index();
- V8_EXPORT_PRIVATE static WasmExportedFunction* cast(Object* object);
- static bool IsWasmExportedFunction(Object* object);
+ V8_EXPORT_PRIVATE static bool IsWasmExportedFunction(Object object);
static Handle<WasmExportedFunction> New(Isolate* isolate,
Handle<WasmInstanceObject> instance,
@@ -522,6 +565,9 @@ class WasmExportedFunction : public JSFunction {
Address GetWasmCallTarget();
wasm::FunctionSig* sig();
+
+ DECL_CAST(WasmExportedFunction)
+ OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction)
};
// Information for a WasmExportedFunction which is referenced as the function
@@ -529,7 +575,7 @@ class WasmExportedFunction : public JSFunction {
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
class WasmExportedFunctionData : public Struct {
public:
- DECL_ACCESSORS2(wrapper_code, Code);
+ DECL_ACCESSORS(wrapper_code, Code);
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset);
DECL_INT_ACCESSORS(function_index);
@@ -541,25 +587,28 @@ class WasmExportedFunctionData : public Struct {
DECL_VERIFIER(WasmExportedFunctionData)
// Layout description.
-#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
- V(kWrapperCodeOffset, kPointerSize) \
- V(kInstanceOffset, kPointerSize) \
- V(kJumpTableOffsetOffset, kPointerSize) /* Smi */ \
- V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
+#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
+ V(kWrapperCodeOffset, kTaggedSize) \
+ V(kInstanceOffset, kTaggedSize) \
+ V(kJumpTableOffsetOffset, kTaggedSize) /* Smi */ \
+ V(kFunctionIndexOffset, kTaggedSize) /* Smi */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
WASM_EXPORTED_FUNCTION_DATA_FIELDS)
#undef WASM_EXPORTED_FUNCTION_DATA_FIELDS
+
+ OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct)
};
-class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
+class WasmDebugInfo : public Struct {
public:
+ NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
DECL_ACCESSORS(interpreter_handle, Object); // Foreign or undefined
- DECL_ACCESSORS2(interpreted_functions, FixedArray);
- DECL_OPTIONAL_ACCESSORS2(locals_names, FixedArray)
- DECL_OPTIONAL_ACCESSORS2(c_wasm_entries, FixedArray)
+ DECL_ACCESSORS(interpreted_functions, FixedArray);
+ DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
DECL_CAST(WasmDebugInfo)
@@ -569,13 +618,13 @@ class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
DECL_VERIFIER(WasmDebugInfo)
// Layout description.
-#define WASM_DEBUG_INFO_FIELDS(V) \
- V(kInstanceOffset, kPointerSize) \
- V(kInterpreterHandleOffset, kPointerSize) \
- V(kInterpretedFunctionsOffset, kPointerSize) \
- V(kLocalsNamesOffset, kPointerSize) \
- V(kCWasmEntriesOffset, kPointerSize) \
- V(kCWasmEntryMapOffset, kPointerSize) \
+#define WASM_DEBUG_INFO_FIELDS(V) \
+ V(kInstanceOffset, kTaggedSize) \
+ V(kInterpreterHandleOffset, kTaggedSize) \
+ V(kInterpretedFunctionsOffset, kTaggedSize) \
+ V(kLocalsNamesOffset, kTaggedSize) \
+ V(kCWasmEntriesOffset, kTaggedSize) \
+ V(kCWasmEntryMapOffset, kTaggedSize) \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WASM_DEBUG_INFO_FIELDS)
@@ -646,6 +695,37 @@ class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
static Handle<JSFunction> GetCWasmEntry(Handle<WasmDebugInfo>,
wasm::FunctionSig*);
+
+ OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct)
+};
+
+// Tags provide an object identity for each exception defined in a wasm module
+// header. They are referenced by the following fields:
+// - {WasmExceptionObject::exception_tag} : The tag of the exception object.
+// - {WasmInstanceObject::exceptions_table}: List of tags used by an instance.
+class WasmExceptionTag : public Struct {
+ public:
+ static Handle<WasmExceptionTag> New(Isolate* isolate, int index);
+
+ // Note that this index is only useful for debugging purposes and it is not
+ // unique across modules. The GC however does not allow objects without at
+ // least one field, hence this also serves as a padding field for now.
+ DECL_INT_ACCESSORS(index);
+
+ DECL_CAST(WasmExceptionTag)
+ DECL_PRINTER(WasmExceptionTag)
+ DECL_VERIFIER(WasmExceptionTag)
+
+// Layout description.
+#define WASM_EXCEPTION_TAG_FIELDS(V) \
+ V(kIndexOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, WASM_EXCEPTION_TAG_FIELDS)
+#undef WASM_EXCEPTION_TAG_FIELDS
+
+ OBJECT_CONSTRUCTORS(WasmExceptionTag, Struct)
};
class AsmWasmData : public Struct {
@@ -656,8 +736,8 @@ class AsmWasmData : public Struct {
Handle<HeapNumber> uses_bitset);
DECL_ACCESSORS(managed_native_module, Managed<wasm::NativeModule>)
- DECL_ACCESSORS2(export_wrappers, FixedArray)
- DECL_ACCESSORS2(asm_js_offset_table, ByteArray)
+ DECL_ACCESSORS(export_wrappers, FixedArray)
+ DECL_ACCESSORS(asm_js_offset_table, ByteArray)
DECL_ACCESSORS(uses_bitset, HeapNumber)
DECL_CAST(AsmWasmData)
@@ -665,16 +745,18 @@ class AsmWasmData : public Struct {
DECL_VERIFIER(AsmWasmData)
// Layout description.
-#define ASM_WASM_DATA_FIELDS(V) \
- V(kManagedNativeModuleOffset, kPointerSize) \
- V(kExportWrappersOffset, kPointerSize) \
- V(kAsmJsOffsetTableOffset, kPointerSize) \
- V(kUsesBitsetOffset, kPointerSize) \
- /* Total size. */ \
+#define ASM_WASM_DATA_FIELDS(V) \
+ V(kManagedNativeModuleOffset, kTaggedSize) \
+ V(kExportWrappersOffset, kTaggedSize) \
+ V(kAsmJsOffsetTableOffset, kTaggedSize) \
+ V(kUsesBitsetOffset, kTaggedSize) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, ASM_WASM_DATA_FIELDS)
#undef ASM_WASM_DATA_FIELDS
+
+ OBJECT_CONSTRUCTORS(AsmWasmData, Struct)
};
#undef DECL_OPTIONAL_ACCESSORS
diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc
index 274f51a7784..c8dfcf50e61 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.cc
+++ b/chromium/v8/src/wasm/wasm-opcodes.cc
@@ -158,12 +158,12 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(StoreMem32, "store32")
CASE_S128_OP(StoreMem, "store128")
- // Non-standard opcodes.
+ // Exception handling opcodes.
CASE_OP(Try, "try")
+ CASE_OP(Catch, "catch")
CASE_OP(Throw, "throw")
CASE_OP(Rethrow, "rethrow")
- CASE_OP(Catch, "catch")
- CASE_OP(CatchAll, "catch_all")
+ CASE_OP(BrOnExn, "br_on_exn")
// asm.js-only opcodes.
CASE_F64_OP(Acos, "acos")
@@ -264,7 +264,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// Atomic operations.
CASE_OP(AtomicWake, "atomic_wake")
- CASE_I32_OP(AtomicWait, "atomic_wait")
+ CASE_INT_OP(AtomicWait, "atomic_wait")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic_load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic_store")
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic_add")
@@ -381,11 +381,18 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
return os;
}
-bool IsJSCompatibleSignature(const FunctionSig* sig) {
+bool IsJSCompatibleSignature(const FunctionSig* sig, bool has_bigint_feature) {
+ if (sig->return_count() > 1) {
+ return false;
+ }
for (auto type : sig->all()) {
- if (type == kWasmI64 || type == kWasmS128) return false;
+ if (!has_bigint_feature && type == kWasmI64) {
+ return false;
+ }
+
+ if (type == kWasmS128) return false;
}
- return sig->return_count() <= 1;
+ return true;
}
namespace {
@@ -468,20 +475,8 @@ constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
base::make_array<256>(GetNumericOpcodeSigIndex{});
-// Computes a direct pointer to a cached signature for a simple opcode.
-struct GetSimpleOpcodeSig {
- constexpr const FunctionSig* operator()(byte opcode) const {
-#define CASE(name, opc, sig) opcode == opc ? &kSig_##sig:
- return FOREACH_SIMPLE_OPCODE(CASE) nullptr;
-#undef CASE
- }
-};
-
} // namespace
-const std::array<const FunctionSig*, 256> kSimpleOpcodeSigs =
- base::make_array<256>(GetSimpleOpcodeSig{});
-
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
switch (opcode >> 8) {
case 0:
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index c9c4467bb2a..b4ed83474f1 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -16,25 +16,25 @@ namespace internal {
namespace wasm {
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool IsJSCompatibleSignature(const FunctionSig* sig);
+bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
// Control expressions and blocks.
-#define FOREACH_CONTROL_OPCODE(V) \
- V(Unreachable, 0x00, _) \
- V(Nop, 0x01, _) \
- V(Block, 0x02, _) \
- V(Loop, 0x03, _) \
- V(If, 0x004, _) \
- V(Else, 0x05, _) \
- V(Try, 0x06, _ /* eh_prototype */) \
- V(Catch, 0x07, _ /* eh_prototype */) \
- V(Throw, 0x08, _ /* eh_prototype */) \
- V(Rethrow, 0x09, _ /* eh_prototype */) \
- V(CatchAll, 0x0a, _ /* eh prototype */) \
- V(End, 0x0b, _) \
- V(Br, 0x0c, _) \
- V(BrIf, 0x0d, _) \
- V(BrTable, 0x0e, _) \
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Unreachable, 0x00, _) \
+ V(Nop, 0x01, _) \
+ V(Block, 0x02, _) \
+ V(Loop, 0x03, _) \
+ V(If, 0x04, _) \
+ V(Else, 0x05, _) \
+ V(Try, 0x06, _ /* eh_prototype */) \
+ V(Catch, 0x07, _ /* eh_prototype */) \
+ V(Throw, 0x08, _ /* eh_prototype */) \
+ V(Rethrow, 0x09, _ /* eh_prototype */) \
+ V(BrOnExn, 0x0a, _ /* eh prototype */) \
+ V(End, 0x0b, _) \
+ V(Br, 0x0c, _) \
+ V(BrIf, 0x0d, _) \
+ V(BrTable, 0x0e, _) \
V(Return, 0x0f, _)
// Constants, locals, globals, and calls.
@@ -420,6 +420,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
#define FOREACH_ATOMIC_OPCODE(V) \
V(AtomicWake, 0xfe00, i_ii) \
V(I32AtomicWait, 0xfe01, i_iil) \
+ V(I64AtomicWait, 0xfe02, i_ill) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I64AtomicLoad, 0xfe11, l_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
@@ -540,6 +541,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig);
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
+ V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
V(i_r, kWasmI32, kWasmAnyRef)
#define FOREACH_SIMD_SIGNATURE(V) \
@@ -574,8 +576,6 @@ enum TrapReason {
#undef DECLARE_ENUM
};
-extern const std::array<const FunctionSig*, 256> kSimpleOpcodeSigs;
-
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
diff --git a/chromium/v8/src/wasm/wasm-result.cc b/chromium/v8/src/wasm/wasm-result.cc
index 251563e15b1..80b7b4a6ad5 100644
--- a/chromium/v8/src/wasm/wasm-result.cc
+++ b/chromium/v8/src/wasm/wasm-result.cc
@@ -50,7 +50,7 @@ void PrintFToString(std::string& str, size_t str_offset, const char* format,
} // namespace
// static
-std::string ResultBase::FormatError(const char* format, va_list args) {
+std::string WasmError::FormatError(const char* format, va_list args) {
std::string result;
VPrintFToString(result, 0, format, args);
return result;
diff --git a/chromium/v8/src/wasm/wasm-result.h b/chromium/v8/src/wasm/wasm-result.h
index 0c41a974f99..824e838ae21 100644
--- a/chromium/v8/src/wasm/wasm-result.h
+++ b/chromium/v8/src/wasm/wasm-result.h
@@ -22,42 +22,44 @@ class Handle;
namespace wasm {
-// Base class for Result<T>.
-class V8_EXPORT_PRIVATE ResultBase {
- protected:
- ResultBase() = default;
+class V8_EXPORT_PRIVATE WasmError {
+ public:
+ WasmError() = default;
- ResultBase& operator=(ResultBase&& other) V8_NOEXCEPT = default;
+ WasmError(uint32_t offset, std::string message)
+ : offset_(offset), message_(std::move(message)) {
+ // The error message must not be empty, otherwise {empty()} would be true.
+ DCHECK(!message_.empty());
+ }
- public:
- ResultBase(ResultBase&& other) V8_NOEXCEPT
- : error_offset_(other.error_offset_),
- error_msg_(std::move(other.error_msg_)) {}
+ PRINTF_FORMAT(3, 4)
+ WasmError(uint32_t offset, const char* format, ...) : offset_(offset) {
+ va_list args;
+ va_start(args, format);
+ message_ = FormatError(format, args);
+ va_end(args);
+ // The error message must not be empty, otherwise {empty()} would be true.
+ DCHECK(!message_.empty());
+ }
- bool ok() const { return error_msg_.empty(); }
- bool failed() const { return !ok(); }
+ bool empty() const { return message_.empty(); }
+ bool has_error() const { return !message_.empty(); }
- uint32_t error_offset() const { return error_offset_; }
- const std::string& error_msg() const & { return error_msg_; }
- std::string&& error_msg() && { return std::move(error_msg_); }
+ uint32_t offset() const { return offset_; }
+ const std::string& message() const& { return message_; }
+ std::string&& message() && { return std::move(message_); }
protected:
- ResultBase(uint32_t error_offset, std::string error_msg)
- : error_offset_(error_offset), error_msg_(std::move(error_msg)) {
- // The error message must not be empty, otherwise {failed()} will be false.
- DCHECK(!error_msg_.empty());
- }
-
static std::string FormatError(const char* format, va_list args);
private:
- uint32_t error_offset_ = 0;
- std::string error_msg_;
+ uint32_t offset_ = 0;
+ std::string message_;
};
-// The overall result of decoding a function or a module.
+// Either a result of type T, or a WasmError.
template <typename T>
-class Result : public ResultBase {
+class Result {
public:
Result() = default;
@@ -65,33 +67,22 @@ class Result : public ResultBase {
explicit Result(S&& value) : value_(std::forward<S>(value)) {}
template <typename S>
- Result(Result<S>&& other) V8_NOEXCEPT : ResultBase(std::move(other)),
- value_(std::move(other).value()) {}
+ Result(Result<S>&& other) V8_NOEXCEPT : value_(std::move(other.value_)),
+ error_(std::move(other.error_)) {}
- Result& operator=(Result&& other) V8_NOEXCEPT = default;
+ explicit Result(WasmError error) : error_(std::move(error)) {}
- static Result<T> PRINTF_FORMAT(2, 3)
- Error(uint32_t offset, const char* format, ...) {
- va_list args;
- va_start(args, format);
- Result<T> error_result{offset, FormatError(format, args)};
- va_end(args);
- return error_result;
- }
-
- static Result<T> Error(uint32_t error_offset, std::string error_msg) {
- // Call private constructor.
- return Result<T>{error_offset, std::move(error_msg)};
- }
-
- static Result<T> ErrorFrom(ResultBase&& error_result) {
- return Error(error_result.error_offset(),
- std::move(error_result).error_msg());
+ template <typename S>
+ Result& operator=(Result<S>&& other) V8_NOEXCEPT {
+ value_ = std::move(other.value_);
+ error_ = std::move(other.error_);
+ return *this;
}
- static Result<T> ErrorFrom(const ResultBase& error_result) {
- return Error(error_result.error_offset(), error_result.error_msg());
- }
+ bool ok() const { return error_.empty(); }
+ bool failed() const { return error_.has_error(); }
+ const WasmError& error() const& { return error_; }
+ WasmError&& error() && { return std::move(error_); }
// Accessor for the value. Returns const reference if {this} is l-value or
// const, and returns r-value reference if {this} is r-value. This allows to
@@ -107,10 +98,11 @@ class Result : public ResultBase {
}
private:
- T value_ = T{};
+ template <typename S>
+ friend class Result;
- Result(uint32_t error_offset, std::string error_msg)
- : ResultBase(error_offset, std::move(error_msg)) {}
+ T value_ = T{};
+ WasmError error_;
DISALLOW_COPY_AND_ASSIGN(Result);
};
@@ -130,15 +122,15 @@ class V8_EXPORT_PRIVATE ErrorThrower {
PRINTF_FORMAT(2, 3) void LinkError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
- void CompileFailed(const char* error, const ResultBase& result) {
- DCHECK(result.failed());
- CompileError("%s: %s @+%u", error, result.error_msg().c_str(),
- result.error_offset());
+ void CompileFailed(const char* context, const WasmError& error) {
+ DCHECK(error.has_error());
+ CompileError("%s: %s @+%u", context, error.message().c_str(),
+ error.offset());
}
- void CompileFailed(const ResultBase& result) {
- DCHECK(result.failed());
- CompileError("%s @+%u", result.error_msg().c_str(), result.error_offset());
+ void CompileFailed(const WasmError& error) {
+ DCHECK(error.has_error());
+ CompileError("%s @+%u", error.message().c_str(), error.offset());
}
// Create and return exception object.
@@ -175,10 +167,10 @@ class V8_EXPORT_PRIVATE ErrorThrower {
ErrorType error_type_ = kNone;
std::string error_msg_;
- DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
// ErrorThrower should always be stack-allocated, since it constitutes a scope
// (things happen in the destructor).
DISALLOW_NEW_AND_DELETE();
+ DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
// Use {nullptr_t} as data value to indicate that this only stores the error,
diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc
index cccc5e9bdec..a167b81cbda 100644
--- a/chromium/v8/src/wasm/wasm-serialization.cc
+++ b/chromium/v8/src/wasm/wasm-serialization.cc
@@ -8,6 +8,7 @@
#include "src/external-reference-table.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/ostreams.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
#include "src/utils.h"
@@ -119,9 +120,8 @@ class Reader {
constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
-void WriteVersion(Isolate* isolate, Writer* writer) {
- writer->Write(
- SerializedData::ComputeMagicNumber(isolate->external_reference_table()));
+void WriteVersion(Writer* writer) {
+ writer->Write(SerializedData::kMagicNumber);
writer->Write(Version::Hash());
writer->Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer->Write(FlagList::Hash());
@@ -134,6 +134,7 @@ void WriteVersion(Isolate* isolate, Writer* writer) {
// Other platforms simply require accessing the target address.
void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ DCHECK(rinfo->HasTargetAddressAddress());
*(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
@@ -191,6 +192,8 @@ constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // offset of constant pool
sizeof(size_t) + // offset of safepoint table
sizeof(size_t) + // offset of handler table
+ sizeof(size_t) + // offset of code comments
+ sizeof(size_t) + // unpadded binary size
sizeof(uint32_t) + // stack slots
sizeof(size_t) + // code size
sizeof(size_t) + // reloc size
@@ -198,13 +201,68 @@ constexpr size_t kCodeHeaderSize =
sizeof(size_t) + // protected instructions size
sizeof(WasmCode::Tier); // tier
+// A List of all isolate-independent external references. This is used to create
+// a tag from the Address of an external reference and vice versa.
+class ExternalReferenceList {
+ public:
+ uint32_t tag_from_address(Address ext_ref_address) const {
+ auto tag_addr_less_than = [this](uint32_t tag, Address searched_addr) {
+ return external_reference_by_tag_[tag] < searched_addr;
+ };
+ auto it = std::lower_bound(std::begin(tags_ordered_by_address_),
+ std::end(tags_ordered_by_address_),
+ ext_ref_address, tag_addr_less_than);
+ DCHECK_NE(std::end(tags_ordered_by_address_), it);
+ uint32_t tag = *it;
+ DCHECK_EQ(address_from_tag(tag), ext_ref_address);
+ return tag;
+ }
+
+ Address address_from_tag(uint32_t tag) const {
+ DCHECK_GT(kNumExternalReferences, tag);
+ return external_reference_by_tag_[tag];
+ }
+
+ static const ExternalReferenceList& Get() {
+ static ExternalReferenceList list; // Lazily initialized.
+ return list;
+ }
+
+ private:
+ // Private constructor. There will only be a single instance of this object.
+ ExternalReferenceList() {
+ for (uint32_t i = 0; i < kNumExternalReferences; ++i) {
+ tags_ordered_by_address_[i] = i;
+ }
+ auto addr_by_tag_less_than = [this](uint32_t a, uint32_t b) {
+ return external_reference_by_tag_[a] < external_reference_by_tag_[b];
+ };
+ std::sort(std::begin(tags_ordered_by_address_),
+ std::end(tags_ordered_by_address_), addr_by_tag_less_than);
+ }
+
+#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
+ static constexpr uint32_t kNumExternalReferences =
+ EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE);
+#undef COUNT_EXTERNAL_REFERENCE
+
+#define EXT_REF_ADDR(name, desc) ExternalReference::name().address(),
+ Address external_reference_by_tag_[kNumExternalReferences] = {
+ EXTERNAL_REFERENCE_LIST(EXT_REF_ADDR)};
+#undef EXT_REF_ADDR
+ uint32_t tags_ordered_by_address_[kNumExternalReferences];
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceList);
+};
+
+static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
+ "static destructors not allowed");
+
} // namespace
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
NativeModuleSerializer() = delete;
- NativeModuleSerializer(Isolate*, const NativeModule*,
- Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModule*, Vector<WasmCode* const>);
size_t Measure() const;
bool Write(Writer* writer);
@@ -214,26 +272,19 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
void WriteHeader(Writer* writer);
void WriteCode(const WasmCode*, Writer* writer);
- Isolate* const isolate_;
const NativeModule* const native_module_;
Vector<WasmCode* const> code_table_;
bool write_called_;
// Reverse lookup tables for embedded addresses.
std::map<Address, uint32_t> wasm_stub_targets_lookup_;
- std::map<Address, uint32_t> reference_table_lookup_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
NativeModuleSerializer::NativeModuleSerializer(
- Isolate* isolate, const NativeModule* module,
- Vector<WasmCode* const> code_table)
- : isolate_(isolate),
- native_module_(module),
- code_table_(code_table),
- write_called_(false) {
- DCHECK_NOT_NULL(isolate_);
+ const NativeModule* module, Vector<WasmCode* const> code_table)
+ : native_module_(module), code_table_(code_table), write_called_(false) {
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
@@ -243,11 +294,6 @@ NativeModuleSerializer::NativeModuleSerializer(
->instruction_start();
wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
}
- ExternalReferenceTable* table = isolate_->external_reference_table();
- for (uint32_t i = 0; i < table->size(); ++i) {
- Address addr = table->address(i);
- reference_table_lookup_.insert(std::make_pair(addr, i));
- }
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
@@ -286,6 +332,8 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->constant_pool_offset());
writer->Write(code->safepoint_table_offset());
writer->Write(code->handler_table_offset());
+ writer->Write(code->code_comments_offset());
+ writer->Write(code->unpadded_binary_size());
writer->Write(code->stack_slots());
writer->Write(code->instructions().size());
writer->Write(code->reloc_info().size());
@@ -343,10 +391,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
} break;
case RelocInfo::EXTERNAL_REFERENCE: {
Address orig_target = orig_iter.rinfo()->target_external_reference();
- auto ref_iter = reference_table_lookup_.find(orig_target);
- DCHECK(ref_iter != reference_table_lookup_.end());
- uint32_t tag = ref_iter->second;
- SetWasmCalleeTag(iter.rinfo(), tag);
+ uint32_t ext_ref_tag =
+ ExternalReferenceList::Get().tag_from_address(orig_target);
+ SetWasmCalleeTag(iter.rinfo(), ext_ref_tag);
} break;
case RelocInfo::INTERNAL_REFERENCE:
case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
@@ -377,25 +424,22 @@ bool NativeModuleSerializer::Write(Writer* writer) {
return true;
}
-WasmSerializer::WasmSerializer(Isolate* isolate, NativeModule* native_module)
- : isolate_(isolate),
- native_module_(native_module),
+WasmSerializer::WasmSerializer(NativeModule* native_module)
+ : native_module_(native_module),
code_table_(native_module->SnapshotCodeTable()) {}
size_t WasmSerializer::GetSerializedNativeModuleSize() const {
- NativeModuleSerializer serializer(isolate_, native_module_,
- VectorOf(code_table_));
+ NativeModuleSerializer serializer(native_module_, VectorOf(code_table_));
return kVersionSize + serializer.Measure();
}
bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
- NativeModuleSerializer serializer(isolate_, native_module_,
- VectorOf(code_table_));
+ NativeModuleSerializer serializer(native_module_, VectorOf(code_table_));
size_t measured_size = kVersionSize + serializer.Measure();
if (buffer.size() < measured_size) return false;
Writer writer(buffer);
- WriteVersion(isolate_, &writer);
+ WriteVersion(&writer);
if (!serializer.Write(&writer)) return false;
DCHECK_EQ(measured_size, writer.bytes_written());
@@ -405,7 +449,7 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
NativeModuleDeserializer() = delete;
- NativeModuleDeserializer(Isolate*, NativeModule*);
+ explicit NativeModuleDeserializer(NativeModule*);
bool Read(Reader* reader);
@@ -413,16 +457,14 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
bool ReadHeader(Reader* reader);
bool ReadCode(uint32_t fn_index, Reader* reader);
- Isolate* const isolate_;
NativeModule* const native_module_;
bool read_called_;
DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
};
-NativeModuleDeserializer::NativeModuleDeserializer(Isolate* isolate,
- NativeModule* native_module)
- : isolate_(isolate), native_module_(native_module), read_called_(false) {}
+NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
+ : native_module_(native_module), read_called_(false) {}
bool NativeModuleDeserializer::Read(Reader* reader) {
DCHECK(!read_called_);
@@ -450,6 +492,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
+ size_t code_comment_offset = reader->Read<size_t>();
+ size_t unpadded_binary_size = reader->Read<size_t>();
uint32_t stack_slot_count = reader->Read<uint32_t>();
size_t code_size = reader->Read<size_t>();
size_t reloc_size = reader->Read<size_t>();
@@ -471,9 +515,9 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
WasmCode* code = native_module_->AddDeserializedCode(
fn_index, code_buffer, stack_slot_count, safepoint_table_offset,
- handler_table_offset, constant_pool_offset,
- std::move(protected_instructions), std::move(reloc_info),
- std::move(source_pos), tier);
+ handler_table_offset, constant_pool_offset, code_comment_offset,
+ unpadded_binary_size, std::move(protected_instructions),
+ std::move(reloc_info), std::move(source_pos), tier);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
@@ -504,7 +548,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
}
case RelocInfo::EXTERNAL_REFERENCE: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
- Address address = isolate_->external_reference_table()->address(tag);
+ Address address = ExternalReferenceList::Get().address_from_tag(tag);
iter.rinfo()->set_target_external_reference(address, SKIP_ICACHE_FLUSH);
break;
}
@@ -521,7 +565,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
}
}
- if (FLAG_print_code || FLAG_print_wasm_code) code->Print();
+ code->MaybePrint();
code->Validate();
// Finally, flush the icache for that code.
@@ -531,22 +575,21 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
return true;
}
-bool IsSupportedVersion(Isolate* isolate, Vector<const byte> version) {
+bool IsSupportedVersion(Vector<const byte> version) {
if (version.size() < kVersionSize) return false;
byte current_version[kVersionSize];
Writer writer({current_version, kVersionSize});
- WriteVersion(isolate, &writer);
+ WriteVersion(&writer);
return memcmp(version.start(), current_version, kVersionSize) == 0;
}
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
- Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
- if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
- return {};
- }
- if (!IsSupportedVersion(isolate, data)) {
- return {};
- }
+ Isolate* isolate, Vector<const byte> data,
+ Vector<const byte> wire_bytes_vec) {
+ if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) return {};
+ if (!IsSupportedVersion(data)) return {};
+
+ ModuleWireBytes wire_bytes(wire_bytes_vec);
// TODO(titzer): module features should be part of the serialization format.
WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
ModuleResult decode_result = DecodeWasmModule(
@@ -558,7 +601,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Handle<Script> script =
CreateWasmScript(isolate, wire_bytes, module->source_map_url);
- OwnedVector<uint8_t> wire_bytes_copy = OwnedVector<uint8_t>::Of(wire_bytes);
+ OwnedVector<uint8_t> wire_bytes_copy =
+ OwnedVector<uint8_t>::Of(wire_bytes_vec);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
isolate, enabled_features, std::move(decode_result).value(),
@@ -568,12 +612,12 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
if (FLAG_wasm_lazy_compilation) {
native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
}
- NativeModuleDeserializer deserializer(isolate, native_module);
+ NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize);
if (!deserializer.Read(&reader)) return {};
- CompileJsToWasmWrappers(isolate, native_module,
+ CompileJsToWasmWrappers(isolate, native_module->module(),
handle(module_object->export_wrappers(), isolate));
// Log the code within the generated module for profiling.
diff --git a/chromium/v8/src/wasm/wasm-serialization.h b/chromium/v8/src/wasm/wasm-serialization.h
index 93f79a59ded..eaa1ee7ffe8 100644
--- a/chromium/v8/src/wasm/wasm-serialization.h
+++ b/chromium/v8/src/wasm/wasm-serialization.h
@@ -16,7 +16,7 @@ namespace wasm {
// the module after that won't affect the serialized result.
class WasmSerializer {
public:
- WasmSerializer(Isolate* isolate, NativeModule* native_module);
+ explicit WasmSerializer(NativeModule* native_module);
// Measure the required buffer size needed for serialization.
size_t GetSerializedNativeModuleSize() const;
@@ -26,16 +26,15 @@ class WasmSerializer {
bool SerializeNativeModule(Vector<byte> buffer) const;
private:
- Isolate* isolate_;
NativeModule* native_module_;
std::vector<WasmCode*> code_table_;
};
// Support for deserializing WebAssembly {NativeModule} objects.
// Checks the version header of the data against the current version.
-bool IsSupportedVersion(Isolate* isolate, Vector<const byte> data);
+bool IsSupportedVersion(Vector<const byte> data);
-// Deserializes the given data to create a compiled Wasm module.
+// Deserializes the given data to create a Wasm module object.
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes);
diff --git a/chromium/v8/src/wasm/wasm-text.cc b/chromium/v8/src/wasm/wasm-text.cc
index 2e690b8e8ee..1bd0b0ce89b 100644
--- a/chromium/v8/src/wasm/wasm-text.cc
+++ b/chromium/v8/src/wasm/wasm-text.cc
@@ -81,7 +81,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
for (; i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
- if (opcode == kExprElse || opcode == kExprEnd) --control_depth;
+ if (opcode == kExprElse || opcode == kExprCatch || opcode == kExprEnd) {
+ --control_depth;
+ }
DCHECK_LE(0, control_depth);
const int kMaxIndentation = 64;
@@ -113,12 +115,21 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
}
case kExprBr:
case kExprBrIf: {
- BreakDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.depth;
break;
}
+ case kExprBrOnExn: {
+ BranchDepthImmediate<Decoder::kNoValidate> imm_br(&i, i.pc());
+ ExceptionIndexImmediate<Decoder::kNoValidate> imm_idx(
+ &i, i.pc() + imm_br.length);
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm_br.depth << ' '
+ << imm_idx.index;
+ break;
+ }
case kExprElse:
- os << "else";
+ case kExprCatch:
+ os << WasmOpcodes::OpcodeName(opcode);
control_depth++;
break;
case kExprEnd:
@@ -149,8 +160,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
- case kExprThrow:
- case kExprCatch: {
+ case kExprThrow: {
ExceptionIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;