diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2018-10-24 11:30:15 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2018-10-30 12:56:19 +0000 |
commit | 6036726eb981b6c4b42047513b9d3f4ac865daac (patch) | |
tree | 673593e70678e7789766d1f732eb51f613a2703b /chromium/v8/src/wasm | |
parent | 466052c4e7c052268fd931888cd58961da94c586 (diff) | |
download | qtwebengine-chromium-6036726eb981b6c4b42047513b9d3f4ac865daac.tar.gz |
BASELINE: Update Chromium to 70.0.3538.78
Change-Id: Ie634710bf039e26c1957f4ae45e101bd4c434ae7
Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/wasm')
62 files changed, 2375 insertions, 1728 deletions
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h index ca55fe5d523..725bed590f5 100644 --- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, BAILOUT("Store"); } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - BAILOUT("ChangeEndiannessLoad"); -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - BAILOUT("ChangeEndiannessStore"); -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { @@ -249,7 +238,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, } void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { - UNREACHABLE(); + // This is a nop on arm. } bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index a8928210bb0..cdc2dc2a452 100644 --- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -281,17 +281,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, } } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - // Nop. -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - // Nop. -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index ae8c9e012f0..1fef62542a5 100644 --- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -227,9 +227,10 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, // Wasm memory is limited to a size <2GB, so all offsets can be encoded as // immediate value (in 31 bits, interpreted as signed value). // If the offset is bigger, we always trap and this code is not reached. - DCHECK(is_uint31(offset_imm)); + // Note: We shouldn't have memories larger than 2GiB on 32-bit, but if we + // did, we encode {offset_im} as signed, and it will simply wrap around. Operand src_op = offset_reg == no_reg - ? Operand(src_addr, offset_imm) + ? Operand(src_addr, bit_cast<int32_t>(offset_imm)) : Operand(src_addr, offset_reg, times_1, offset_imm); if (protected_load_pc) *protected_load_pc = pc_offset(); @@ -278,10 +279,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI64Load: { // Compute the operand for the load of the upper half. - DCHECK(is_uint31(offset_imm + 4)); Operand upper_src_op = offset_reg == no_reg - ? Operand(src_addr, offset_imm + 4) + ? Operand(src_addr, bit_cast<int32_t>(offset_imm + 4)) : Operand(src_addr, offset_reg, times_1, offset_imm + 4); // The high word has to be mov'ed first, such that this is the protected // instruction. The mov of the low word cannot segfault. @@ -308,9 +308,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, // Wasm memory is limited to a size <2GB, so all offsets can be encoded as // immediate value (in 31 bits, interpreted as signed value). // If the offset is bigger, we always trap and this code is not reached. - DCHECK(is_uint31(offset_imm)); Operand dst_op = offset_reg == no_reg - ? Operand(dst_addr, offset_imm) + ? Operand(dst_addr, bit_cast<int32_t>(offset_imm)) : Operand(dst_addr, offset_reg, times_1, offset_imm); if (protected_store_pc) *protected_store_pc = pc_offset(); @@ -342,10 +341,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI64Store: { // Compute the operand for the store of the upper half. - DCHECK(is_uint31(offset_imm + 4)); Operand upper_dst_op = offset_reg == no_reg - ? Operand(dst_addr, offset_imm + 4) + ? Operand(dst_addr, bit_cast<int32_t>(offset_imm + 4)) : Operand(dst_addr, offset_reg, times_1, offset_imm + 4); // The high word has to be mov'ed first, such that this is the protected // instruction. The mov of the low word cannot segfault. @@ -364,17 +362,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, } } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - // Nop. -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - // Nop. -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { @@ -893,7 +880,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, } void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { - UNREACHABLE(); + // This is a nop on ia32. } void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs, diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h index b7fdf5fe605..c8d8dab1d91 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h +++ b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h @@ -5,18 +5,9 @@ #ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_ #define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_ +#include "src/assembler-arch.h" #include "src/reglist.h" -#if V8_TARGET_ARCH_IA32 -#include "src/ia32/assembler-ia32.h" -#elif V8_TARGET_ARCH_X64 -#include "src/x64/assembler-x64.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/mips/assembler-mips.h" -#elif V8_TARGET_ARCH_MIPS64 -#include "src/mips64/assembler-mips64.h" -#endif - namespace v8 { namespace internal { namespace wasm { diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc index 0e913c19dc4..1d604925cc7 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc +++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc @@ -349,7 +349,7 @@ constexpr AssemblerOptions DefaultLiftoffOptions() { LiftoffAssembler::LiftoffAssembler() : TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0, CodeObjectRequired::kNo) { - set_trap_on_abort(true); // Avoid calls to Abort. + set_abort_hard(true); // Avoid calls to Abort. } LiftoffAssembler::~LiftoffAssembler() { @@ -446,7 +446,7 @@ void LiftoffAssembler::SpillAllRegisters() { cache_state_.reset_used_registers(); } -void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, +void LiftoffAssembler::PrepareCall(FunctionSig* sig, compiler::CallDescriptor* call_descriptor, Register* target, LiftoffRegister* target_instance) { @@ -555,7 +555,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, } } -void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig, +void LiftoffAssembler::FinishCall(FunctionSig* sig, compiler::CallDescriptor* call_descriptor) { const size_t return_count = sig->return_count(); if (return_count != 0) { diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h index 822c620b82d..cfc412d6716 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h +++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h @@ -314,11 +314,11 @@ class LiftoffAssembler : public TurboAssembler { // Load parameters into the right registers / stack slots for the call. // Move {*target} into another register if needed and update {*target} to that // register, or {no_reg} if target was spilled to the stack. - void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*, + void PrepareCall(FunctionSig*, compiler::CallDescriptor*, Register* target = nullptr, LiftoffRegister* target_instance = nullptr); // Process return values of the call. - void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*); + void FinishCall(FunctionSig*, compiler::CallDescriptor*); // Move {src} into {dst}. {src} and {dst} must be different. void Move(LiftoffRegister dst, LiftoffRegister src, ValueType); @@ -362,10 +362,6 @@ class LiftoffAssembler : public TurboAssembler { LiftoffRegister src, StoreType type, LiftoffRegList pinned, uint32_t* protected_store_pc = nullptr, bool is_store_mem = false); - inline void ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned); - inline void ChangeEndiannessStore(LiftoffRegister src, StoreType type, - LiftoffRegList pinned); inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ValueType); inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType); @@ -448,6 +444,14 @@ class LiftoffAssembler : public TurboAssembler { emit_i32_add(dst, lhs, rhs); } } + inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) { + if (kPointerSize == 8) { + emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs), + LiftoffRegister(rhs)); + } else { + emit_i32_sub(dst, lhs, rhs); + } + } // f32 binops. inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, @@ -532,13 +536,13 @@ class LiftoffAssembler : public TurboAssembler { // this is the return value of the C function, stored in {rets[0]}. Further // outputs (specified in {sig->returns()}) are read from the buffer and stored // in the remaining {rets} registers. - inline void CallC(wasm::FunctionSig* sig, const LiftoffRegister* args, + inline void CallC(FunctionSig* sig, const LiftoffRegister* args, const LiftoffRegister* rets, ValueType out_argument_type, int stack_bytes, ExternalReference ext_ref); inline void CallNativeWasmCode(Address addr); // Indirect call: If {target == no_reg}, then pop the target from the stack. - inline void CallIndirect(wasm::FunctionSig* sig, + inline void CallIndirect(FunctionSig* sig, compiler::CallDescriptor* call_descriptor, Register target); inline void CallRuntimeStub(WasmCode::RuntimeStubId sid); diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc index 1130cf0cdd5..dbd106d4816 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc @@ -16,6 +16,7 @@ #include "src/wasm/function-body-decoder-impl.h" #include "src/wasm/function-compiler.h" #include "src/wasm/memory-tracing.h" +#include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-linkage.h" #include "src/wasm/wasm-objects.h" #include "src/wasm/wasm-opcodes.h" @@ -62,9 +63,11 @@ constexpr LoadType::LoadTypeValue kPointerLoadType = // thus store the label on the heap and keep a unique_ptr. class MovableLabel { public: - Label* get() { return label_.get(); } + MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel); MovableLabel() : label_(new Label()) {} + Label* get() { return label_.get(); } + private: std::unique_ptr<Label> label_; }; @@ -72,6 +75,8 @@ class MovableLabel { // On all other platforms, just store the Label directly. class MovableLabel { public: + MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel); + Label* get() { return &label_; } private: @@ -93,8 +98,7 @@ class LiftoffCompiler { MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler); // TODO(clemensh): Make this a template parameter. - static constexpr wasm::Decoder::ValidateFlag validate = - wasm::Decoder::kValidate; + static constexpr Decoder::ValidateFlag validate = Decoder::kValidate; using Value = ValueBase; @@ -111,7 +115,7 @@ class LiftoffCompiler { MovableLabel label; }; - using Decoder = WasmFullDecoder<validate, LiftoffCompiler>; + using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>; struct OutOfLineCode { MovableLabel label; @@ -137,11 +141,6 @@ class LiftoffCompiler { : descriptor_( GetLoweredCallDescriptor(compilation_zone, call_descriptor)), env_(env), - min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize), - max_size_(uint64_t{env_->module->has_maximum_pages - ? env_->module->maximum_pages - : wasm::kV8MaxWasmMemoryPages} * - wasm::kWasmPageSize), compilation_zone_(compilation_zone), safepoint_table_builder_(compilation_zone_) {} @@ -165,20 +164,20 @@ class LiftoffCompiler { return __ GetTotalFrameSlotCount(); } - void unsupported(Decoder* decoder, const char* reason) { + void unsupported(FullDecoder* decoder, const char* reason) { ok_ = false; TRACE("unsupported: %s\n", reason); decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason); BindUnboundLabels(decoder); } - bool DidAssemblerBailout(Decoder* decoder) { + bool DidAssemblerBailout(FullDecoder* decoder) { if (decoder->failed() || !__ did_bailout()) return false; unsupported(decoder, __ bailout_reason()); return true; } - bool CheckSupportedType(Decoder* decoder, + bool CheckSupportedType(FullDecoder* decoder, Vector<const ValueType> supported_types, ValueType type, const char* context) { char buffer[128]; @@ -195,7 +194,7 @@ class LiftoffCompiler { return safepoint_table_builder_.GetCodeOffset(); } - void BindUnboundLabels(Decoder* decoder) { + void BindUnboundLabels(FullDecoder* decoder) { #ifdef DEBUG // Bind all labels now, otherwise their destructor will fire a DCHECK error // if they where referenced before. @@ -215,7 +214,7 @@ class LiftoffCompiler { #endif } - void StartFunction(Decoder* decoder) { + void StartFunction(FullDecoder* decoder) { int num_locals = decoder->NumLocals(); __ set_num_locals(num_locals); for (int i = 0; i < num_locals; ++i) { @@ -306,7 +305,7 @@ class LiftoffCompiler { __ bind(ool.continuation.get()); } - void StartFunctionBody(Decoder* decoder, Control* block) { + void StartFunctionBody(FullDecoder* decoder, Control* block) { for (uint32_t i = 0; i < __ num_locals(); ++i) { if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param")) return; @@ -422,7 +421,7 @@ class LiftoffCompiler { } } - void FinishFunction(Decoder* decoder) { + void FinishFunction(FullDecoder* decoder) { if (DidAssemblerBailout(decoder)) return; for (OutOfLineCode& ool : out_of_line_code_) { GenerateOutOfLineCode(ool); @@ -435,23 +434,23 @@ class LiftoffCompiler { DidAssemblerBailout(decoder); } - void OnFirstError(Decoder* decoder) { + void OnFirstError(FullDecoder* decoder) { ok_ = false; BindUnboundLabels(decoder); asm_.AbortCompilation(); } - void NextInstruction(Decoder* decoder, WasmOpcode opcode) { + void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) { TraceCacheState(decoder); SLOW_DCHECK(__ ValidateCacheState()); DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode)); } - void Block(Decoder* decoder, Control* block) { + void Block(FullDecoder* decoder, Control* block) { block->label_state.stack_base = __ cache_state()->stack_height(); } - void Loop(Decoder* decoder, Control* loop) { + void Loop(FullDecoder* decoder, Control* loop) { loop->label_state.stack_base = __ cache_state()->stack_height(); // Before entering a loop, spill all locals to the stack, in order to free @@ -471,9 +470,11 @@ class LiftoffCompiler { StackCheck(decoder->position()); } - void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); } + void Try(FullDecoder* decoder, Control* block) { + unsupported(decoder, "try"); + } - void If(Decoder* decoder, const Value& cond, Control* if_block) { + void If(FullDecoder* decoder, const Value& cond, Control* if_block) { DCHECK_EQ(if_block, decoder->control_at(0)); DCHECK(if_block->is_if()); @@ -493,7 +494,7 @@ class LiftoffCompiler { if_block->else_state->state.Split(*__ cache_state()); } - void FallThruTo(Decoder* decoder, Control* c) { + void FallThruTo(FullDecoder* decoder, Control* c) { if (c->end_merge.reached) { __ MergeFullStackWith(c->label_state); } else if (c->is_onearmed_if()) { @@ -506,7 +507,7 @@ class LiftoffCompiler { TraceCacheState(decoder); } - void PopControl(Decoder* decoder, Control* c) { + void PopControl(FullDecoder* decoder, Control* c) { if (!c->is_loop() && c->end_merge.reached) { __ cache_state()->Steal(c->label_state); } @@ -515,7 +516,7 @@ class LiftoffCompiler { } } - void EndControl(Decoder* decoder, Control* c) {} + void EndControl(FullDecoder* decoder, Control* c) {} enum CCallReturn : bool { kHasReturn = true, kNoReturn = false }; @@ -588,7 +589,7 @@ class LiftoffCompiler { LiftoffRegister src = __ PopToRegister(); LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src}) : __ GetUnusedRegister(dst_rc); - DCHECK_EQ(can_trap, trap_position > 0); + DCHECK_EQ(!!can_trap, trap_position > 0); Label* trap = can_trap ? AddOutOfLineTrap( trap_position, WasmCode::kThrowWasmTrapFloatUnrepresentable) @@ -614,7 +615,7 @@ class LiftoffCompiler { __ PushRegister(dst_type, dst); } - void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*, + void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*, const Value& value, Value* result) { #define CASE_I32_UNOP(opcode, fn) \ case WasmOpcode::kExpr##opcode: \ @@ -747,7 +748,7 @@ class LiftoffCompiler { } } - void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*, + void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*, const Value& lhs, const Value& rhs, Value* result) { #define CASE_I32_BINOP(opcode, fn) \ case WasmOpcode::kExpr##opcode: \ @@ -994,11 +995,11 @@ class LiftoffCompiler { #undef CASE_CCALL_BINOP } - void I32Const(Decoder* decoder, Value* result, int32_t value) { + void I32Const(FullDecoder* decoder, Value* result, int32_t value) { __ cache_state()->stack_state.emplace_back(kWasmI32, value); } - void I64Const(Decoder* decoder, Value* result, int64_t value) { + void I64Const(FullDecoder* decoder, Value* result, int64_t value) { // The {VarState} stores constant values as int32_t, thus we only store // 64-bit constants in this field if it fits in an int32_t. Larger values // cannot be used as immediate value anyway, so we can also just put them in @@ -1013,30 +1014,30 @@ class LiftoffCompiler { } } - void F32Const(Decoder* decoder, Value* result, float value) { + void F32Const(FullDecoder* decoder, Value* result, float value) { LiftoffRegister reg = __ GetUnusedRegister(kFpReg); __ LoadConstant(reg, WasmValue(value)); __ PushRegister(kWasmF32, reg); } - void F64Const(Decoder* decoder, Value* result, double value) { + void F64Const(FullDecoder* decoder, Value* result, double value) { LiftoffRegister reg = __ GetUnusedRegister(kFpReg); __ LoadConstant(reg, WasmValue(value)); __ PushRegister(kWasmF64, reg); } - void RefNull(Decoder* decoder, Value* result) { + void RefNull(FullDecoder* decoder, Value* result) { unsupported(decoder, "ref_null"); } - void Drop(Decoder* decoder, const Value& value) { + void Drop(FullDecoder* decoder, const Value& value) { auto& slot = __ cache_state()->stack_state.back(); // If the dropped slot contains a register, decrement it's use count. if (slot.is_reg()) __ cache_state()->dec_used(slot.reg()); __ cache_state()->stack_state.pop_back(); } - void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) { + void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) { if (implicit) { DCHECK_EQ(1, decoder->control_depth()); Control* func_block = decoder->control_at(0); @@ -1060,7 +1061,7 @@ class LiftoffCompiler { static_cast<uint32_t>(descriptor_->StackParameterCount())); } - void GetLocal(Decoder* decoder, Value* result, + void GetLocal(FullDecoder* decoder, Value* result, const LocalIndexImmediate<validate>& imm) { auto& slot = __ cache_state()->stack_state[imm.index]; DCHECK_EQ(slot.type(), imm.type); @@ -1123,12 +1124,12 @@ class LiftoffCompiler { if (!is_tee) __ cache_state()->stack_state.pop_back(); } - void SetLocal(Decoder* decoder, const Value& value, + void SetLocal(FullDecoder* decoder, const Value& value, const LocalIndexImmediate<validate>& imm) { SetLocal(imm.index, false); } - void TeeLocal(Decoder* decoder, const Value& value, Value* result, + void TeeLocal(FullDecoder* decoder, const Value& value, Value* result, const LocalIndexImmediate<validate>& imm) { SetLocal(imm.index, true); } @@ -1138,7 +1139,6 @@ class LiftoffCompiler { uint32_t* offset) { LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg)); if (global->mutability && global->imported) { - DCHECK(FLAG_experimental_wasm_mut_global); LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerLoadType); __ Load(addr, addr.gp(), no_reg, global->index * sizeof(Address), kPointerLoadType, pinned); @@ -1150,7 +1150,7 @@ class LiftoffCompiler { return addr; } - void GetGlobal(Decoder* decoder, Value* result, + void GetGlobal(FullDecoder* decoder, Value* result, const GlobalIndexImmediate<validate>& imm) { const auto* global = &env_->module->globals[imm.index]; if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global")) @@ -1165,7 +1165,7 @@ class LiftoffCompiler { __ PushRegister(global->type, value); } - void SetGlobal(Decoder* decoder, const Value& value, + void SetGlobal(FullDecoder* decoder, const Value& value, const GlobalIndexImmediate<validate>& imm) { auto* global = &env_->module->globals[imm.index]; if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global")) @@ -1178,14 +1178,14 @@ class LiftoffCompiler { __ Store(addr.gp(), no_reg, offset, reg, type, pinned); } - void Unreachable(Decoder* decoder) { + void Unreachable(FullDecoder* decoder) { Label* unreachable_label = AddOutOfLineTrap( decoder->position(), WasmCode::kThrowWasmTrapUnreachable); __ emit_jump(unreachable_label); __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap); } - void Select(Decoder* decoder, const Value& cond, const Value& fval, + void Select(FullDecoder* decoder, const Value& cond, const Value& fval, const Value& tval, Value* result) { LiftoffRegList pinned; Register condition = pinned.set(__ PopToRegister()).gp(); @@ -1219,11 +1219,9 @@ class LiftoffCompiler { __ jmp(target->label.get()); } - void Br(Decoder* decoder, Control* target) { - Br(target); - } + void Br(FullDecoder* decoder, Control* target) { Br(target); } - void BrIf(Decoder* decoder, const Value& cond, Control* target) { + void BrIf(FullDecoder* decoder, const Value& cond, Control* target) { Label cont_false; Register value = __ PopToRegister().gp(); __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value); @@ -1234,7 +1232,7 @@ class LiftoffCompiler { // Generate a branch table case, potentially reusing previously generated // stack transfer code. - void GenerateBrCase(Decoder* decoder, uint32_t br_depth, + void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth, std::map<uint32_t, MovableLabel>& br_targets) { MovableLabel& label = br_targets[br_depth]; if (label.get()->is_bound()) { @@ -1247,7 +1245,7 @@ class LiftoffCompiler { // Generate a branch table for input in [min, max). // TODO(wasm): Generate a real branch table (like TF TableSwitch). - void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp, + void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp, LiftoffRegister value, uint32_t min, uint32_t max, BranchTableIterator<validate>& table_iterator, std::map<uint32_t, MovableLabel>& br_targets) { @@ -1273,7 +1271,7 @@ class LiftoffCompiler { br_targets); } - void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm, + void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm, const Value& key) { LiftoffRegList pinned; LiftoffRegister value = pinned.set(__ PopToRegister()); @@ -1298,7 +1296,7 @@ class LiftoffCompiler { DCHECK(!table_iterator.has_next()); } - void Else(Decoder* decoder, Control* if_block) { + void Else(FullDecoder* decoder, Control* if_block) { if (if_block->reachable()) __ emit_jump(if_block->label.get()); __ bind(if_block->else_state->label.get()); __ cache_state()->Steal(if_block->else_state->state); @@ -1318,17 +1316,17 @@ class LiftoffCompiler { // Returns true if the memory access is statically known to be out of bounds // (a jump to the trap was generated then); return false otherwise. - bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset, - Register index, LiftoffRegList pinned) { - const bool statically_oob = - access_size > max_size_ || offset > max_size_ - access_size; + bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size, + uint32_t offset, Register index, LiftoffRegList pinned) { + const bool statically_oob = access_size > env_->max_memory_size || + offset > env_->max_memory_size - access_size; if (!statically_oob && (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) { return false; } - // TODO(eholk): This adds protected instruction information for the jump + // TODO(wasm): This adds protected instruction information for the jump // instruction we are about to generate. It would be better to just not add // protected instruction info when the pc is 0. Label* trap_label = AddOutOfLineTrap( @@ -1347,7 +1345,7 @@ class LiftoffCompiler { DCHECK(!env_->use_trap_handler); DCHECK(!FLAG_wasm_no_bounds_checks); - uint32_t end_offset = offset + access_size - 1; + uint64_t end_offset = uint64_t{offset} + access_size - 1u; // If the end offset is larger than the smallest memory, dynamically check // the end offset against the actual memory size, which is not known at @@ -1355,19 +1353,30 @@ class LiftoffCompiler { LiftoffRegister end_offset_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned); - LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load); - __ LoadConstant(end_offset_reg, WasmValue(end_offset)); - if (end_offset >= min_size_) { - __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, - end_offset_reg.gp(), mem_size.gp()); + LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerLoadType); + + if (kPointerSize == 8) { + __ LoadConstant(end_offset_reg, WasmValue(end_offset)); + } else { + __ LoadConstant(end_offset_reg, + WasmValue(static_cast<uint32_t>(end_offset))); + } + + if (end_offset >= env_->min_memory_size) { + __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, + LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(), + mem_size.gp()); } // Just reuse the end_offset register for computing the effective size. LiftoffRegister effective_size_reg = end_offset_reg; - __ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(), - end_offset_reg.gp()); + __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(), + end_offset_reg.gp()); + + __ emit_i32_to_intptr(index, index); - __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index, + __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, + LiftoffAssembler::kWasmIntPtr, index, effective_size_reg.gp()); return false; } @@ -1385,27 +1394,27 @@ class LiftoffCompiler { __ LoadConstant(address, WasmValue(offset)); __ emit_i32_add(address.gp(), address.gp(), index); - // Get a register to hold the stack slot for wasm::MemoryTracingInfo. + // Get a register to hold the stack slot for MemoryTracingInfo. LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - // Allocate stack slot for wasm::MemoryTracingInfo. - __ AllocateStackSlot(info.gp(), sizeof(wasm::MemoryTracingInfo)); + // Allocate stack slot for MemoryTracingInfo. + __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo)); - // Now store all information into the wasm::MemoryTracingInfo struct. - __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, address), - address, StoreType::kI32Store, pinned); + // Now store all information into the MemoryTracingInfo struct. + __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address, + StoreType::kI32Store, pinned); __ LoadConstant(address, WasmValue(is_store ? 1 : 0)); - __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, is_store), - address, StoreType::kI32Store8, pinned); + __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address, + StoreType::kI32Store8, pinned); __ LoadConstant(address, WasmValue(static_cast<int>(rep))); - __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep), - address, StoreType::kI32Store8, pinned); + __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address, + StoreType::kI32Store8, pinned); source_position_table_builder_.AddPosition(__ pc_offset(), SourcePosition(position), false); Register args[] = {info.gp()}; GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args); - __ DeallocateStackSlot(sizeof(wasm::MemoryTracingInfo)); + __ DeallocateStackSlot(sizeof(MemoryTracingInfo)); } void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args, @@ -1462,7 +1471,7 @@ class LiftoffCompiler { return index; } - void LoadMem(Decoder* decoder, LoadType type, + void LoadMem(FullDecoder* decoder, LoadType type, const MemoryAccessImmediate<validate>& imm, const Value& index_val, Value* result) { ValueType value_type = type.value_type(); @@ -1495,7 +1504,7 @@ class LiftoffCompiler { } } - void StoreMem(Decoder* decoder, StoreType type, + void StoreMem(FullDecoder* decoder, StoreType type, const MemoryAccessImmediate<validate>& imm, const Value& index_val, const Value& value_val) { ValueType value_type = type.value_type(); @@ -1525,7 +1534,7 @@ class LiftoffCompiler { } } - void CurrentMemoryPages(Decoder* decoder, Value* result) { + void CurrentMemoryPages(FullDecoder* decoder, Value* result) { LiftoffRegList pinned; LiftoffRegister mem_size = pinned.set(__ GetUnusedRegister(kGpReg)); LiftoffRegister tmp_const = @@ -1533,12 +1542,12 @@ class LiftoffCompiler { LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load); // TODO(clemensh): Shift by immediate directly. __ LoadConstant(tmp_const, - WasmValue(int32_t{WhichPowerOf2(wasm::kWasmPageSize)})); + WasmValue(int32_t{WhichPowerOf2(kWasmPageSize)})); __ emit_i32_shr(mem_size.gp(), mem_size.gp(), tmp_const.gp(), pinned); __ PushRegister(kWasmI32, mem_size); } - void GrowMemory(Decoder* decoder, const Value& value, Value* result_val) { + void GrowMemory(FullDecoder* decoder, const Value& value, Value* result_val) { // Pop the input, then spill all cache registers to make the runtime call. LiftoffRegList pinned; LiftoffRegister input = pinned.set(__ PopToRegister()); @@ -1559,7 +1568,7 @@ class LiftoffCompiler { Register param_reg = descriptor.GetRegisterParameter(0); if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32); - __ CallRuntimeStub(wasm::WasmCode::kWasmGrowMemory); + __ CallRuntimeStub(WasmCode::kWasmGrowMemory); safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0, Safepoint::kNoLazyDeopt); @@ -1570,7 +1579,8 @@ class LiftoffCompiler { __ PushRegister(kWasmI32, result); } - void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm, + void CallDirect(FullDecoder* decoder, + const CallFunctionImmediate<validate>& imm, const Value args[], Value returns[]) { if (imm.sig->return_count() > 1) return unsupported(decoder, "multi-return"); @@ -1634,7 +1644,7 @@ class LiftoffCompiler { } } - void CallIndirect(Decoder* decoder, const Value& index_val, + void CallIndirect(FullDecoder* decoder, const Value& index_val, const CallIndirectImmediate<validate>& imm, const Value args[], Value returns[]) { if (imm.sig->return_count() > 1) { @@ -1758,36 +1768,36 @@ class LiftoffCompiler { __ FinishCall(imm.sig, call_descriptor); } - void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args, + void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, Value* result) { unsupported(decoder, "simd"); } - void SimdLaneOp(Decoder* decoder, WasmOpcode opcode, + void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, const Vector<Value> inputs, Value* result) { unsupported(decoder, "simd"); } - void SimdShiftOp(Decoder* decoder, WasmOpcode opcode, + void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode, const SimdShiftImmediate<validate>& imm, const Value& input, Value* result) { unsupported(decoder, "simd"); } - void Simd8x16ShuffleOp(Decoder* decoder, + void Simd8x16ShuffleOp(FullDecoder* decoder, const Simd8x16ShuffleImmediate<validate>& imm, const Value& input0, const Value& input1, Value* result) { unsupported(decoder, "simd"); } - void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>&, + void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&, Control* block, const Vector<Value>& args) { unsupported(decoder, "throw"); } - void CatchException(Decoder* decoder, + void CatchException(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm, Control* block, Vector<Value> caught_values) { unsupported(decoder, "catch"); } - void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args, + void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, const MemoryAccessImmediate<validate>& imm, Value* result) { unsupported(decoder, "atomicop"); } @@ -1796,9 +1806,6 @@ class LiftoffCompiler { LiftoffAssembler asm_; compiler::CallDescriptor* const descriptor_; ModuleEnv* const env_; - // {min_size_} and {max_size_} are cached values computed from the ModuleEnv. - const uint64_t min_size_; - const uint64_t max_size_; bool ok_ = true; std::vector<OutOfLineCode> out_of_line_code_; SourcePositionTableBuilder source_position_table_builder_; @@ -1812,7 +1819,7 @@ class LiftoffCompiler { // patch the actually needed stack size in the end. uint32_t pc_offset_stack_frame_construction_ = 0; - void TraceCacheState(Decoder* decoder) const { + void TraceCacheState(FullDecoder* decoder) const { #ifdef DEBUG if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return; StdoutStream os; @@ -1832,7 +1839,7 @@ class LiftoffCompiler { } // namespace -bool LiftoffCompilationUnit::ExecuteCompilation() { +bool LiftoffCompilationUnit::ExecuteCompilation(WasmFeatures* detected) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteLiftoffCompilation"); base::ElapsedTimer compile_timer; @@ -1841,18 +1848,18 @@ bool LiftoffCompilationUnit::ExecuteCompilation() { } Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone"); - const wasm::WasmModule* module = + const WasmModule* module = wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr; auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig); base::Optional<TimedHistogramScope> liftoff_compile_time_scope( base::in_place, wasm_unit_->counters_->liftoff_compile_time()); - wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler> - decoder(&zone, module, wasm_unit_->func_body_, call_descriptor, - wasm_unit_->env_, &zone); + WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder( + &zone, module, wasm_unit_->native_module_->enabled_features(), detected, + wasm_unit_->func_body_, call_descriptor, wasm_unit_->env_, &zone); decoder.Decode(); liftoff_compile_time_scope.reset(); - wasm::LiftoffCompiler* compiler = &decoder.interface(); + LiftoffCompiler* compiler = &decoder.interface(); if (decoder.failed()) return false; // validation error if (!compiler->ok()) { // Liftoff compilation failed. @@ -1883,13 +1890,13 @@ bool LiftoffCompilationUnit::ExecuteCompilation() { code_ = wasm_unit_->native_module_->AddCode( wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset, 0, std::move(protected_instructions), std::move(source_positions), - wasm::WasmCode::kLiftoff); + WasmCode::kLiftoff); wasm_unit_->native_module_->PublishCode(code_); return true; } -wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(wasm::ErrorThrower*) { +WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) { return code_; } diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.h b/chromium/v8/src/wasm/baseline/liftoff-compiler.h index ce828c459b3..c7696cbb566 100644 --- a/chromium/v8/src/wasm/baseline/liftoff-compiler.h +++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.h @@ -11,6 +11,7 @@ namespace v8 { namespace internal { namespace wasm { +struct WasmFeatures; class ErrorThrower; class WasmCode; class WasmCompilationUnit; @@ -20,8 +21,8 @@ class LiftoffCompilationUnit final { explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit) : wasm_unit_(wasm_unit) {} - bool ExecuteCompilation(); - wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*); + bool ExecuteCompilation(WasmFeatures* detected); + WasmCode* FinishCompilation(ErrorThrower*); private: WasmCompilationUnit* const wasm_unit_; diff --git a/chromium/v8/src/wasm/baseline/mips/OWNERS b/chromium/v8/src/wasm/baseline/mips/OWNERS index cf2df277c96..c653ce404d6 100644 --- a/chromium/v8/src/wasm/baseline/mips/OWNERS +++ b/chromium/v8/src/wasm/baseline/mips/OWNERS @@ -1,3 +1,2 @@ -ivica.bogosavljevic@mips.com -Miran.Karic@mips.com -sreten.kovacevic@mips.com +ibogosavljevic@wavecomp.com +skovacevic@wavecomp.com diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h index d2ea65211b5..bb189946182 100644 --- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h @@ -98,6 +98,135 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { } } +#if defined(V8_TARGET_BIG_ENDIAN) +inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, + LoadType type, LiftoffRegList pinned) { + bool is_float = false; + LiftoffRegister tmp = dst; + switch (type.value()) { + case LoadType::kI64Load8U: + case LoadType::kI64Load8S: + // Swap low and high registers. + assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); + assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp()); + assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg); + V8_FALLTHROUGH; + case LoadType::kI32Load8U: + case LoadType::kI32Load8S: + // No need to change endianness for byte size. + return; + case LoadType::kF32Load: + is_float = true; + tmp = assm->GetUnusedRegister(kGpReg, pinned); + assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); + V8_FALLTHROUGH; + case LoadType::kI32Load: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + break; + case LoadType::kI32Load16S: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + break; + case LoadType::kI32Load16U: + assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); + break; + case LoadType::kF64Load: + is_float = true; + tmp = assm->GetUnusedRegister(kGpRegPair, pinned); + assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); + V8_FALLTHROUGH; + case LoadType::kI64Load: + assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); + assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); + assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); + break; + case LoadType::kI64Load16U: + assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2); + assm->TurboAssembler::Move(tmp.high_gp(), zero_reg); + break; + case LoadType::kI64Load16S: + assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2); + assm->sra(tmp.high_gp(), tmp.high_gp(), 31); + break; + case LoadType::kI64Load32U: + assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); + assm->TurboAssembler::Move(tmp.high_gp(), zero_reg); + break; + case LoadType::kI64Load32S: + assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); + assm->sra(tmp.high_gp(), tmp.high_gp(), 31); + break; + default: + UNREACHABLE(); + } + + if (is_float) { + switch (type.value()) { + case LoadType::kF32Load: + assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp); + break; + case LoadType::kF64Load: + assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp); + break; + default: + UNREACHABLE(); + } + } +} + +inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, + StoreType type, LiftoffRegList pinned) { + bool is_float = false; + LiftoffRegister tmp = src; + switch (type.value()) { + case StoreType::kI64Store8: + // Swap low and high registers. + assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); + assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp()); + assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg); + V8_FALLTHROUGH; + case StoreType::kI32Store8: + // No need to change endianness for byte size. + return; + case StoreType::kF32Store: + is_float = true; + tmp = assm->GetUnusedRegister(kGpReg, pinned); + assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); + V8_FALLTHROUGH; + case StoreType::kI32Store: + case StoreType::kI32Store16: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + break; + case StoreType::kF64Store: + is_float = true; + tmp = assm->GetUnusedRegister(kGpRegPair, pinned); + assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); + V8_FALLTHROUGH; + case StoreType::kI64Store: + case StoreType::kI64Store32: + case StoreType::kI64Store16: + assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); + assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); + assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); + break; + default: + UNREACHABLE(); + } + + if (is_float) { + switch (type.value()) { + case StoreType::kF32Store: + assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp); + break; + case StoreType::kF64Store: + assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp); + break; + default: + UNREACHABLE(); + } + } +} +#endif // V8_TARGET_BIG_ENDIAN + } // namespace liftoff int LiftoffAssembler::PrepareStackFrame() { @@ -248,7 +377,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, #if defined(V8_TARGET_BIG_ENDIAN) if (is_load_mem) { - ChangeEndiannessLoad(dst, type, pinned); + liftoff::ChangeEndiannessLoad(this, dst, type, pinned); } #endif } @@ -273,7 +402,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, src = tmp; pinned.set(tmp); - ChangeEndiannessStore(src, type, pinned); + liftoff::ChangeEndiannessStore(this, src, type, pinned); } #endif @@ -316,134 +445,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, } } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - bool is_float = false; - LiftoffRegister tmp = dst; - switch (type.value()) { - case LoadType::kI64Load8U: - case LoadType::kI64Load8S: - // Swap low and high registers. - TurboAssembler::Move(kScratchReg, tmp.low_gp()); - TurboAssembler::Move(tmp.low_gp(), tmp.high_gp()); - TurboAssembler::Move(tmp.high_gp(), kScratchReg); - V8_FALLTHROUGH; - case LoadType::kI32Load8U: - case LoadType::kI32Load8S: - // No need to change endianness for byte size. - return; - case LoadType::kF32Load: - is_float = true; - tmp = GetUnusedRegister(kGpReg, pinned); - emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); - V8_FALLTHROUGH; - case LoadType::kI32Load: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); - break; - case LoadType::kI32Load16S: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); - break; - case LoadType::kI32Load16U: - TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); - break; - case LoadType::kF64Load: - is_float = true; - tmp = GetUnusedRegister(kGpRegPair, pinned); - emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); - V8_FALLTHROUGH; - case LoadType::kI64Load: - TurboAssembler::Move(kScratchReg, tmp.low_gp()); - TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); - TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); - break; - case LoadType::kI64Load16U: - TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2); - TurboAssembler::Move(tmp.high_gp(), zero_reg); - break; - case LoadType::kI64Load16S: - TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2); - sra(tmp.high_gp(), tmp.high_gp(), 31); - break; - case LoadType::kI64Load32U: - TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); - TurboAssembler::Move(tmp.high_gp(), zero_reg); - break; - case LoadType::kI64Load32S: - TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); - sra(tmp.high_gp(), tmp.high_gp(), 31); - break; - default: - UNREACHABLE(); - } - - if (is_float) { - switch (type.value()) { - case LoadType::kF32Load: - emit_type_conversion(kExprF32ReinterpretI32, dst, tmp); - break; - case LoadType::kF64Load: - emit_type_conversion(kExprF64ReinterpretI64, dst, tmp); - break; - default: - UNREACHABLE(); - } - } -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - bool is_float = false; - LiftoffRegister tmp = src; - switch (type.value()) { - case StoreType::kI64Store8: - // Swap low and high registers. - TurboAssembler::Move(kScratchReg, tmp.low_gp()); - TurboAssembler::Move(tmp.low_gp(), tmp.high_gp()); - TurboAssembler::Move(tmp.high_gp(), kScratchReg); - V8_FALLTHROUGH; - case StoreType::kI32Store8: - // No need to change endianness for byte size. - return; - case StoreType::kF32Store: - is_float = true; - tmp = GetUnusedRegister(kGpReg, pinned); - emit_type_conversion(kExprI32ReinterpretF32, tmp, src); - V8_FALLTHROUGH; - case StoreType::kI32Store: - case StoreType::kI32Store16: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); - break; - case StoreType::kF64Store: - is_float = true; - tmp = GetUnusedRegister(kGpRegPair, pinned); - emit_type_conversion(kExprI64ReinterpretF64, tmp, src); - V8_FALLTHROUGH; - case StoreType::kI64Store: - case StoreType::kI64Store32: - case StoreType::kI64Store16: - TurboAssembler::Move(kScratchReg, tmp.low_gp()); - TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); - TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); - break; - default: - UNREACHABLE(); - } - - if (is_float) { - switch (type.value()) { - case StoreType::kF32Store: - emit_type_conversion(kExprF32ReinterpretI32, src, tmp); - break; - case StoreType::kF64Store: - emit_type_conversion(kExprF64ReinterpretI64, src, tmp); - break; - default: - UNREACHABLE(); - } - } -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { @@ -745,7 +746,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, } void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { - UNREACHABLE(); + // This is a nop on mips32. } void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { diff --git a/chromium/v8/src/wasm/baseline/mips64/OWNERS b/chromium/v8/src/wasm/baseline/mips64/OWNERS index cf2df277c96..c653ce404d6 100644 --- a/chromium/v8/src/wasm/baseline/mips64/OWNERS +++ b/chromium/v8/src/wasm/baseline/mips64/OWNERS @@ -1,3 +1,2 @@ -ivica.bogosavljevic@mips.com -Miran.Karic@mips.com -sreten.kovacevic@mips.com +ibogosavljevic@wavecomp.com +skovacevic@wavecomp.com diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index fdbbe0f7d46..4bbfc182517 100644 --- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -88,6 +88,115 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { } } +#if defined(V8_TARGET_BIG_ENDIAN) +inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, + LoadType type, LiftoffRegList pinned) { + bool is_float = false; + LiftoffRegister tmp = dst; + switch (type.value()) { + case LoadType::kI64Load8U: + case LoadType::kI64Load8S: + case LoadType::kI32Load8U: + case LoadType::kI32Load8S: + // No need to change endianness for byte size. + return; + case LoadType::kF32Load: + is_float = true; + tmp = assm->GetUnusedRegister(kGpReg, pinned); + assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); + V8_FALLTHROUGH; + case LoadType::kI64Load32U: + assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); + assm->dsrl32(tmp.gp(), tmp.gp(), 0); + break; + case LoadType::kI32Load: + case LoadType::kI64Load32S: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->dsra32(tmp.gp(), tmp.gp(), 0); + break; + case LoadType::kI32Load16S: + case LoadType::kI64Load16S: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->dsra32(tmp.gp(), tmp.gp(), 0); + break; + case LoadType::kI32Load16U: + case LoadType::kI64Load16U: + assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); + assm->dsrl32(tmp.gp(), tmp.gp(), 0); + break; + case LoadType::kF64Load: + is_float = true; + tmp = assm->GetUnusedRegister(kGpReg, pinned); + assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); + V8_FALLTHROUGH; + case LoadType::kI64Load: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); + break; + default: + UNREACHABLE(); + } + + if (is_float) { + switch (type.value()) { + case LoadType::kF32Load: + assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp); + break; + case LoadType::kF64Load: + assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp); + break; + default: + UNREACHABLE(); + } + } +} + +inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, + StoreType type, LiftoffRegList pinned) { + bool is_float = false; + LiftoffRegister tmp = src; + switch (type.value()) { + case StoreType::kI64Store8: + case StoreType::kI32Store8: + // No need to change endianness for byte size. + return; + case StoreType::kF32Store: + is_float = true; + tmp = assm->GetUnusedRegister(kGpReg, pinned); + assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); + V8_FALLTHROUGH; + case StoreType::kI32Store: + case StoreType::kI32Store16: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + break; + case StoreType::kF64Store: + is_float = true; + tmp = assm->GetUnusedRegister(kGpReg, pinned); + assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); + V8_FALLTHROUGH; + case StoreType::kI64Store: + case StoreType::kI64Store32: + case StoreType::kI64Store16: + assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); + break; + default: + UNREACHABLE(); + } + + if (is_float) { + switch (type.value()) { + case StoreType::kF32Store: + assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp); + break; + case StoreType::kF64Store: + assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp); + break; + default: + UNREACHABLE(); + } + } +} +#endif // V8_TARGET_BIG_ENDIAN + } // namespace liftoff int LiftoffAssembler::PrepareStackFrame() { @@ -212,7 +321,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, #if defined(V8_TARGET_BIG_ENDIAN) if (is_load_mem) { - ChangeEndiannessLoad(dst, type, pinned); + liftoff::ChangeEndiannessLoad(this, dst, type, pinned); } #endif } @@ -237,7 +346,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, src = tmp; pinned.set(tmp); - ChangeEndiannessStore(src, type, pinned); + liftoff::ChangeEndiannessStore(this, src, type, pinned); } #endif @@ -269,114 +378,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, } } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - bool is_float = false; - LiftoffRegister tmp = dst; - switch (type.value()) { - case LoadType::kI64Load8U: - case LoadType::kI64Load8S: - case LoadType::kI32Load8U: - case LoadType::kI32Load8S: - // No need to change endianness for byte size. - return; - case LoadType::kF32Load: - is_float = true; - tmp = GetUnusedRegister(kGpReg, pinned); - emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); - V8_FALLTHROUGH; - case LoadType::kI64Load32U: - TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); - dsrl32(tmp.gp(), tmp.gp(), 0); - break; - case LoadType::kI32Load: - case LoadType::kI64Load32S: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); - dsra32(tmp.gp(), tmp.gp(), 0); - break; - case LoadType::kI32Load16S: - case LoadType::kI64Load16S: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); - dsra32(tmp.gp(), tmp.gp(), 0); - break; - case LoadType::kI32Load16U: - case LoadType::kI64Load16U: - TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); - dsrl32(tmp.gp(), tmp.gp(), 0); - break; - case LoadType::kF64Load: - is_float = true; - tmp = GetUnusedRegister(kGpReg, pinned); - emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); - V8_FALLTHROUGH; - case LoadType::kI64Load: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); - break; - default: - UNREACHABLE(); - } - - if (is_float) { - switch (type.value()) { - case LoadType::kF32Load: - emit_type_conversion(kExprF32ReinterpretI32, dst, tmp); - break; - case LoadType::kF64Load: - emit_type_conversion(kExprF64ReinterpretI64, dst, tmp); - break; - default: - UNREACHABLE(); - } - } -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - bool is_float = false; - LiftoffRegister tmp = src; - switch (type.value()) { - case StoreType::kI64Store8: - case StoreType::kI32Store8: - // No need to change endianness for byte size. - return; - case StoreType::kF32Store: - is_float = true; - tmp = GetUnusedRegister(kGpReg, pinned); - emit_type_conversion(kExprI32ReinterpretF32, tmp, src); - V8_FALLTHROUGH; - case StoreType::kI32Store: - case StoreType::kI32Store16: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); - break; - case StoreType::kF64Store: - is_float = true; - tmp = GetUnusedRegister(kGpReg, pinned); - emit_type_conversion(kExprI64ReinterpretF64, tmp, src); - V8_FALLTHROUGH; - case StoreType::kI64Store: - case StoreType::kI64Store32: - case StoreType::kI64Store16: - TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); - break; - default: - UNREACHABLE(); - } - - if (is_float) { - switch (type.value()) { - case StoreType::kF32Store: - emit_type_conversion(kExprF32ReinterpretI32, src, tmp); - break; - case StoreType::kF64Store: - emit_type_conversion(kExprF64ReinterpretI64, src, tmp); - break; - default: - UNREACHABLE(); - } - } -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index a4bd20622e9..9164db21889 100644 --- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, BAILOUT("Store"); } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - BAILOUT("ChangeEndiannessLoad"); -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - BAILOUT("ChangeEndiannessStore"); -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { @@ -258,7 +247,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, } void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { - UNREACHABLE(); +#ifdef V8_TARGET_ARCH_PPC64 + BAILOUT("emit_i32_to_intptr"); +#else +// This is a nop on ppc32. +#endif } bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h index ee142c7be47..e39dd90166e 100644 --- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, BAILOUT("Store"); } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - BAILOUT("ChangeEndiannessLoad"); -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - BAILOUT("ChangeEndiannessStore"); -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { @@ -258,7 +247,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, } void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { - UNREACHABLE(); +#ifdef V8_TARGET_ARCH_S390X + BAILOUT("emit_i32_to_intptr"); +#else +// This is a nop on s390. +#endif } bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h index b8d08c56aa5..f6a8e09b4ec 100644 --- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -23,6 +23,17 @@ namespace wasm { namespace liftoff { +static_assert((kLiftoffAssemblerGpCacheRegs & + Register::ListOf<kScratchRegister>()) == 0, + "scratch register must not be used as cache registers"); + +constexpr DoubleRegister kScratchDoubleReg2 = xmm14; +static_assert(kScratchDoubleReg != kScratchDoubleReg2, "collision"); +static_assert( + (kLiftoffAssemblerFpCacheRegs & + DoubleRegister::ListOf<kScratchDoubleReg, kScratchDoubleReg2>()) == 0, + "scratch registers must not be used as cache registers"); + // rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack // slot is located at rbp-24. constexpr int32_t kConstantStackSpace = 16; @@ -38,13 +49,18 @@ inline Operand GetStackSlot(uint32_t index) { inline Operand GetInstanceOperand() { return Operand(rbp, -16); } inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, - uint32_t offset_imm, LiftoffRegList pinned) { - // Wasm memory is limited to a size <2GB, so all offsets can be encoded as - // immediate value (in 31 bits, interpreted as signed value). - // If the offset is bigger, we always trap and this code is not reached. - DCHECK(is_uint31(offset_imm)); - if (offset == no_reg) return Operand(addr, offset_imm); - return Operand(addr, offset, times_1, offset_imm); + uint32_t offset_imm) { + if (is_uint31(offset_imm)) { + if (offset == no_reg) return Operand(addr, offset_imm); + return Operand(addr, offset, times_1, offset_imm); + } + // Offset immediate does not fit in 31 bits. + Register scratch = kScratchRegister; + assm->movl(scratch, Immediate(offset_imm)); + if (offset != no_reg) { + assm->addq(scratch, offset); + } + return Operand(addr, scratch, times_1, 0); } inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, @@ -192,8 +208,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, if (emit_debug_code() && offset_reg != no_reg) { AssertZeroExtended(offset_reg); } - Operand src_op = - liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned); + Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); if (protected_load_pc) *protected_load_pc = pc_offset(); switch (type.value()) { case LoadType::kI32Load8U: @@ -244,8 +259,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, if (emit_debug_code() && offset_reg != no_reg) { AssertZeroExtended(offset_reg); } - Operand dst_op = - liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned); + Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); if (protected_store_pc) *protected_store_pc = pc_offset(); switch (type.value()) { case StoreType::kI32Store8: @@ -274,17 +288,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, } } -void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, - LiftoffRegList pinned) { - // Nop. -} - -void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, - StoreType type, - LiftoffRegList pinned) { - // Nop. -} - void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, uint32_t caller_slot_idx, ValueType type) { @@ -296,9 +299,8 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType type) { DCHECK_NE(dst_index, src_index); if (cache_state_.has_unused_register(kGpReg)) { - LiftoffRegister reg = GetUnusedRegister(kGpReg); - Fill(reg, src_index, type); - Spill(dst_index, reg, type); + Fill(LiftoffRegister{kScratchRegister}, src_index, type); + Spill(dst_index, LiftoffRegister{kScratchRegister}, type); } else { pushq(liftoff::GetStackSlot(src_index)); popq(liftoff::GetStackSlot(dst_index)); @@ -465,10 +467,8 @@ void EmitIntDivOrRem(LiftoffAssembler* assm, Register dst, Register lhs, // unconditionally, as the cache state will also be modified unconditionally. liftoff::SpillRegisters(assm, rdx, rax); if (rhs == rax || rhs == rdx) { - LiftoffRegList unavailable = LiftoffRegList::ForRegs(rax, rdx, lhs); - Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp(); - iop(mov, tmp, rhs); - rhs = tmp; + iop(mov, kScratchRegister, rhs); + rhs = kScratchRegister; } // Check for division by zero. @@ -1098,10 +1098,8 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst, } CpuFeatureScope feature(assm, SSE4_1); - LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst); - DoubleRegister rounded = - pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp(); - DoubleRegister converted_back = assm->GetUnusedRegister(kFpReg, pinned).fp(); + DoubleRegister rounded = kScratchDoubleReg; + DoubleRegister converted_back = kScratchDoubleReg2; if (std::is_same<double, src_type>::value) { // f64 assm->Roundsd(rounded, src, kRoundToZero); @@ -1380,14 +1378,8 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig, } DCHECK_LE(arg_bytes, stack_bytes); -// Pass a pointer to the buffer with the arguments to the C function. -// On win64, the first argument is in {rcx}, otherwise it is {rdi}. -#ifdef _WIN64 - constexpr Register kFirstArgReg = rcx; -#else - constexpr Register kFirstArgReg = rdi; -#endif - movp(kFirstArgReg, rsp); + // Pass a pointer to the buffer with the arguments to the C function. + movp(arg_reg_1, rsp); constexpr int kNumCCallArgs = 1; diff --git a/chromium/v8/src/wasm/decoder.h b/chromium/v8/src/wasm/decoder.h index cca823b84d0..3dd9aff9c63 100644 --- a/chromium/v8/src/wasm/decoder.h +++ b/chromium/v8/src/wasm/decoder.h @@ -11,7 +11,7 @@ #include "src/base/compiler-specific.h" #include "src/flags.h" #include "src/signature.h" -#include "src/utils.h" +#include "src/v8memory.h" #include "src/wasm/wasm-result.h" #include "src/zone/zone-containers.h" diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h index 621f905d447..3e0a0da46ef 100644 --- a/chromium/v8/src/wasm/function-body-decoder-impl.h +++ b/chromium/v8/src/wasm/function-body-decoder-impl.h @@ -8,9 +8,11 @@ // Do only include this header for implementing new Interface of the // WasmFullDecoder. +#include "src/base/platform/elapsed-timer.h" #include "src/bit-vector.h" #include "src/wasm/decoder.h" #include "src/wasm/function-body-decoder.h" +#include "src/wasm/wasm-features.h" #include "src/wasm/wasm-limits.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-opcodes.h" @@ -37,17 +39,21 @@ struct WasmException; return true; \ }()) -#define RET_ON_PROTOTYPE_OPCODE(flag) \ +#define RET_ON_PROTOTYPE_OPCODE(feat) \ DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \ - if (!FLAG_experimental_wasm_##flag) { \ - this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \ + if (!this->enabled_.feat) { \ + this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \ + } else { \ + this->detected_->feat = true; \ } -#define CHECK_PROTOTYPE_OPCODE(flag) \ +#define CHECK_PROTOTYPE_OPCODE(feat) \ DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \ - if (!FLAG_experimental_wasm_##flag) { \ - this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \ + if (!this->enabled_.feat) { \ + this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \ break; \ + } else { \ + this->detected_->feat = true; \ } #define OPCODE_ERROR(opcode, message) \ @@ -208,14 +214,16 @@ struct BlockTypeImmediate { uint32_t sig_index = 0; FunctionSig* sig = nullptr; - inline BlockTypeImmediate(Decoder* decoder, const byte* pc) { + inline BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder, + const byte* pc) { uint8_t val = decoder->read_u8<validate>(pc + 1, "block type"); if (!decode_local_type(val, &type)) { // Handle multi-value blocks. - if (!VALIDATE(FLAG_experimental_wasm_mv)) { + if (!VALIDATE(enabled.mv)) { decoder->error(pc + 1, "invalid block type"); return; } + if (!VALIDATE(decoder->ok())) return; int32_t index = decoder->read_i32v<validate>(pc + 1, &length, "block arity"); if (!VALIDATE(length > 0 && index >= 0)) { @@ -660,13 +668,18 @@ struct ControlWithNamedConstructors : public ControlBase<Value> { template <Decoder::ValidateFlag validate> class WasmDecoder : public Decoder { public: - WasmDecoder(const WasmModule* module, FunctionSig* sig, const byte* start, + WasmDecoder(const WasmModule* module, const WasmFeatures& enabled, + WasmFeatures* detected, FunctionSig* sig, const byte* start, const byte* end, uint32_t buffer_offset = 0) : Decoder(start, end, buffer_offset), module_(module), + enabled_(enabled), + detected_(detected), sig_(sig), local_types_(nullptr) {} const WasmModule* module_; + const WasmFeatures enabled_; + WasmFeatures* detected_; FunctionSig* sig_; ZoneVector<ValueType>* local_types_; @@ -677,7 +690,8 @@ class WasmDecoder : public Decoder { : static_cast<uint32_t>(local_types_->size()); } - static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig, + static bool DecodeLocals(const WasmFeatures& enabled, Decoder* decoder, + const FunctionSig* sig, ZoneVector<ValueType>* type_list) { DCHECK_NOT_NULL(type_list); DCHECK_EQ(0, type_list->size()); @@ -717,14 +731,14 @@ class WasmDecoder : public Decoder { type = kWasmF64; break; case kLocalAnyRef: - if (FLAG_experimental_wasm_anyref) { + if (enabled.anyref) { type = kWasmAnyRef; break; } decoder->error(decoder->pc() - 1, "invalid local type"); return false; case kLocalS128: - if (FLAG_experimental_wasm_simd) { + if (enabled.simd) { type = kWasmS128; break; } @@ -1007,7 +1021,7 @@ class WasmDecoder : public Decoder { case kExprIf: // fall through case kExprLoop: case kExprBlock: { - BlockTypeImmediate<validate> imm(decoder, pc); + BlockTypeImmediate<validate> imm(kAllWasmFeatures, decoder, pc); return 1 + imm.length; } @@ -1213,10 +1227,11 @@ class WasmFullDecoder : public WasmDecoder<validate> { public: template <typename... InterfaceArgs> - WasmFullDecoder(Zone* zone, const wasm::WasmModule* module, + WasmFullDecoder(Zone* zone, const WasmModule* module, + const WasmFeatures& enabled, WasmFeatures* detected, const FunctionBody& body, InterfaceArgs&&... interface_args) - : WasmDecoder<validate>(module, body.sig, body.start, body.end, - body.offset), + : WasmDecoder<validate>(module, enabled, detected, body.sig, body.start, + body.end, body.offset), zone_(zone), interface_(std::forward<InterfaceArgs>(interface_args)...), local_type_vec_(zone), @@ -1244,7 +1259,8 @@ class WasmFullDecoder : public WasmDecoder<validate> { } DCHECK_EQ(0, this->local_types_->size()); - WasmDecoder<validate>::DecodeLocals(this, this->sig_, this->local_types_); + WasmDecoder<validate>::DecodeLocals(this->enabled_, this, this->sig_, + this->local_types_); CALL_INTERFACE(StartFunction); DecodeFunctionBody(); if (!this->failed()) CALL_INTERFACE(FinishFunction); @@ -1300,7 +1316,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { return local_type_vec_[index]; } - inline wasm::WasmCodePosition position() { + inline WasmCodePosition position() { int offset = static_cast<int>(this->pc_ - this->start_); DCHECK_EQ(this->pc_ - this->start_, offset); // overflows cannot happen return offset; @@ -1432,7 +1448,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { case kExprNop: break; case kExprBlock: { - BlockTypeImmediate<validate> imm(this, this->pc_); + BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_); if (!this->Validate(imm)) break; PopArgs(imm.sig); auto* block = PushBlock(); @@ -1461,7 +1477,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { } case kExprTry: { CHECK_PROTOTYPE_OPCODE(eh); - BlockTypeImmediate<validate> imm(this, this->pc_); + BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_); if (!this->Validate(imm)) break; PopArgs(imm.sig); auto* try_block = PushTry(); @@ -1514,7 +1530,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { break; } case kExprLoop: { - BlockTypeImmediate<validate> imm(this, this->pc_); + BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_); if (!this->Validate(imm)) break; PopArgs(imm.sig); auto* block = PushLoop(); @@ -1525,7 +1541,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { break; } case kExprIf: { - BlockTypeImmediate<validate> imm(this, this->pc_); + BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_); if (!this->Validate(imm)) break; auto cond = Pop(0, kWasmI32); PopArgs(imm.sig); @@ -2475,14 +2491,13 @@ class WasmFullDecoder : public WasmDecoder<validate> { class EmptyInterface { public: - static constexpr wasm::Decoder::ValidateFlag validate = - wasm::Decoder::kValidate; + static constexpr Decoder::ValidateFlag validate = Decoder::kValidate; using Value = ValueBase; using Control = ControlBase<Value>; - using Decoder = WasmFullDecoder<validate, EmptyInterface>; + using FullDecoder = WasmFullDecoder<validate, EmptyInterface>; #define DEFINE_EMPTY_CALLBACK(name, ...) \ - void name(Decoder* decoder, ##__VA_ARGS__) {} + void name(FullDecoder* decoder, ##__VA_ARGS__) {} INTERFACE_FUNCTIONS(DEFINE_EMPTY_CALLBACK) #undef DEFINE_EMPTY_CALLBACK }; diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc index 41398eba258..beb8716d9af 100644 --- a/chromium/v8/src/wasm/function-body-decoder.cc +++ b/chromium/v8/src/wasm/function-body-decoder.cc @@ -61,9 +61,8 @@ constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1); class WasmGraphBuildingInterface { public: - static constexpr wasm::Decoder::ValidateFlag validate = - wasm::Decoder::kValidate; - using Decoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>; + static constexpr Decoder::ValidateFlag validate = Decoder::kValidate; + using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>; struct Value : public ValueWithNamedConstructors<Value> { TFNode* node; @@ -85,7 +84,7 @@ class WasmGraphBuildingInterface { explicit WasmGraphBuildingInterface(TFBuilder* builder) : builder_(builder) {} - void StartFunction(Decoder* decoder) { + void StartFunction(FullDecoder* decoder) { SsaEnv* ssa_env = reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv))); uint32_t num_locals = decoder->NumLocals(); @@ -101,8 +100,7 @@ class WasmGraphBuildingInterface { TFNode* start = builder_->Start( static_cast<int>(decoder->sig_->parameter_count() + 1 + 1)); // Initialize the instance parameter (index 0). - builder_->set_instance_node( - builder_->Param(wasm::kWasmInstanceParameterIndex)); + builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex)); // Initialize local variables. Parameters are shifted by 1 because of the // the instance parameter. uint32_t index = 0; @@ -132,25 +130,25 @@ class WasmGraphBuildingInterface { builder_->InitInstanceCache(&ssa_env->instance_cache); } - void StartFunctionBody(Decoder* decoder, Control* block) { + void StartFunctionBody(FullDecoder* decoder, Control* block) { SsaEnv* break_env = ssa_env_; SetEnv(Steal(decoder->zone(), break_env)); block->end_env = break_env; } - void FinishFunction(Decoder*) { builder_->PatchInStackCheckIfNeeded(); } + void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); } - void OnFirstError(Decoder*) {} + void OnFirstError(FullDecoder*) {} - void NextInstruction(Decoder*, WasmOpcode) {} + void NextInstruction(FullDecoder*, WasmOpcode) {} - void Block(Decoder* decoder, Control* block) { + void Block(FullDecoder* decoder, Control* block) { // The break environment is the outer environment. block->end_env = ssa_env_; SetEnv(Steal(decoder->zone(), ssa_env_)); } - void Loop(Decoder* decoder, Control* block) { + void Loop(FullDecoder* decoder, Control* block) { SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_); block->end_env = finish_try_env; // The continue environment is the inner environment. @@ -164,7 +162,7 @@ class WasmGraphBuildingInterface { } } - void Try(Decoder* decoder, Control* block) { + void Try(FullDecoder* decoder, Control* block) { SsaEnv* outer_env = ssa_env_; SsaEnv* catch_env = Split(decoder, outer_env); // Mark catch environment as unreachable, since only accessable @@ -179,7 +177,7 @@ class WasmGraphBuildingInterface { current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1); } - void If(Decoder* decoder, const Value& cond, Control* if_block) { + void If(FullDecoder* decoder, const Value& cond, Control* if_block) { TFNode* if_true = nullptr; TFNode* if_false = nullptr; if (ssa_env_->go()) BUILD(BranchNoHint, cond.node, &if_true, &if_false); @@ -193,51 +191,51 @@ class WasmGraphBuildingInterface { SetEnv(true_env); } - void FallThruTo(Decoder* decoder, Control* c) { + void FallThruTo(FullDecoder* decoder, Control* c) { DCHECK(!c->is_loop()); MergeValuesInto(decoder, c, &c->end_merge); } - void PopControl(Decoder* decoder, Control* block) { + void PopControl(FullDecoder* decoder, Control* block) { if (!block->is_loop()) SetEnv(block->end_env); } - void EndControl(Decoder* decoder, Control* block) { ssa_env_->Kill(); } + void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); } - void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig, + void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig, const Value& value, Value* result) { result->node = BUILD(Unop, opcode, value.node, decoder->position()); } - void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig, + void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig, const Value& lhs, const Value& rhs, Value* result) { auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position()); if (result) result->node = node; } - void I32Const(Decoder* decoder, Value* result, int32_t value) { + void I32Const(FullDecoder* decoder, Value* result, int32_t value) { result->node = builder_->Int32Constant(value); } - void I64Const(Decoder* decoder, Value* result, int64_t value) { + void I64Const(FullDecoder* decoder, Value* result, int64_t value) { result->node = builder_->Int64Constant(value); } - void F32Const(Decoder* decoder, Value* result, float value) { + void F32Const(FullDecoder* decoder, Value* result, float value) { result->node = builder_->Float32Constant(value); } - void F64Const(Decoder* decoder, Value* result, double value) { + void F64Const(FullDecoder* decoder, Value* result, double value) { result->node = builder_->Float64Constant(value); } - void RefNull(Decoder* decoder, Value* result) { + void RefNull(FullDecoder* decoder, Value* result) { result->node = builder_->RefNull(); } - void Drop(Decoder* decoder, const Value& value) {} + void Drop(FullDecoder* decoder, const Value& value) {} - void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) { + void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) { if (implicit) { DCHECK_EQ(1, decoder->control_depth()); SetEnv(decoder->control_at(0)->end_env); @@ -250,40 +248,40 @@ class WasmGraphBuildingInterface { BUILD(Return, static_cast<unsigned>(values.size()), buffer); } - void GetLocal(Decoder* decoder, Value* result, + void GetLocal(FullDecoder* decoder, Value* result, const LocalIndexImmediate<validate>& imm) { if (!ssa_env_->locals) return; // unreachable result->node = ssa_env_->locals[imm.index]; } - void SetLocal(Decoder* decoder, const Value& value, + void SetLocal(FullDecoder* decoder, const Value& value, const LocalIndexImmediate<validate>& imm) { if (!ssa_env_->locals) return; // unreachable ssa_env_->locals[imm.index] = value.node; } - void TeeLocal(Decoder* decoder, const Value& value, Value* result, + void TeeLocal(FullDecoder* decoder, const Value& value, Value* result, const LocalIndexImmediate<validate>& imm) { result->node = value.node; if (!ssa_env_->locals) return; // unreachable ssa_env_->locals[imm.index] = value.node; } - void GetGlobal(Decoder* decoder, Value* result, + void GetGlobal(FullDecoder* decoder, Value* result, const GlobalIndexImmediate<validate>& imm) { result->node = BUILD(GetGlobal, imm.index); } - void SetGlobal(Decoder* decoder, const Value& value, + void SetGlobal(FullDecoder* decoder, const Value& value, const GlobalIndexImmediate<validate>& imm) { BUILD(SetGlobal, imm.index, value.node); } - void Unreachable(Decoder* decoder) { + void Unreachable(FullDecoder* decoder) { BUILD(Unreachable, decoder->position()); } - void Select(Decoder* decoder, const Value& cond, const Value& fval, + void Select(FullDecoder* decoder, const Value& cond, const Value& fval, const Value& tval, Value* result) { TFNode* controls[2]; BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]); @@ -294,11 +292,11 @@ class WasmGraphBuildingInterface { ssa_env_->control = merge; } - void Br(Decoder* decoder, Control* target) { + void Br(FullDecoder* decoder, Control* target) { MergeValuesInto(decoder, target, target->br_merge()); } - void BrIf(Decoder* decoder, const Value& cond, Control* target) { + void BrIf(FullDecoder* decoder, const Value& cond, Control* target) { SsaEnv* fenv = ssa_env_; SsaEnv* tenv = Split(decoder, fenv); fenv->SetNotMerged(); @@ -308,7 +306,7 @@ class WasmGraphBuildingInterface { ssa_env_ = fenv; } - void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm, + void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm, const Value& key) { if (imm.table_count == 0) { // Only a default target. Do the equivalent of br. @@ -336,11 +334,11 @@ class WasmGraphBuildingInterface { ssa_env_ = break_env; } - void Else(Decoder* decoder, Control* if_block) { + void Else(FullDecoder* decoder, Control* if_block) { SetEnv(if_block->false_env); } - void LoadMem(Decoder* decoder, LoadType type, + void LoadMem(FullDecoder* decoder, LoadType type, const MemoryAccessImmediate<validate>& imm, const Value& index, Value* result) { result->node = @@ -348,56 +346,57 @@ class WasmGraphBuildingInterface { imm.offset, imm.alignment, decoder->position()); } - void StoreMem(Decoder* decoder, StoreType type, + void StoreMem(FullDecoder* decoder, StoreType type, const MemoryAccessImmediate<validate>& imm, const Value& index, const Value& value) { BUILD(StoreMem, type.mem_rep(), index.node, imm.offset, imm.alignment, value.node, decoder->position(), type.value_type()); } - void CurrentMemoryPages(Decoder* decoder, Value* result) { + void CurrentMemoryPages(FullDecoder* decoder, Value* result) { result->node = BUILD(CurrentMemoryPages); } - void GrowMemory(Decoder* decoder, const Value& value, Value* result) { + void GrowMemory(FullDecoder* decoder, const Value& value, Value* result) { result->node = BUILD(GrowMemory, value.node); // Always reload the instance cache after growing memory. LoadContextIntoSsa(ssa_env_); } - void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm, + void CallDirect(FullDecoder* decoder, + const CallFunctionImmediate<validate>& imm, const Value args[], Value returns[]) { DoCall(decoder, nullptr, imm.sig, imm.index, args, returns); } - void CallIndirect(Decoder* decoder, const Value& index, + void CallIndirect(FullDecoder* decoder, const Value& index, const CallIndirectImmediate<validate>& imm, const Value args[], Value returns[]) { DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns); } - void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args, + void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, Value* result) { TFNode** inputs = GetNodes(args); TFNode* node = BUILD(SimdOp, opcode, inputs); if (result) result->node = node; } - void SimdLaneOp(Decoder* decoder, WasmOpcode opcode, + void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode, const SimdLaneImmediate<validate> imm, Vector<Value> inputs, Value* result) { TFNode** nodes = GetNodes(inputs); result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes); } - void SimdShiftOp(Decoder* decoder, WasmOpcode opcode, + void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode, const SimdShiftImmediate<validate> imm, const Value& input, Value* result) { TFNode* inputs[] = {input.node}; result->node = BUILD(SimdShiftOp, opcode, imm.shift, inputs); } - void Simd8x16ShuffleOp(Decoder* decoder, + void Simd8x16ShuffleOp(FullDecoder* decoder, const Simd8x16ShuffleImmediate<validate>& imm, const Value& input0, const Value& input1, Value* result) { @@ -405,14 +404,14 @@ class WasmGraphBuildingInterface { result->node = BUILD(Simd8x16ShuffleOp, imm.shuffle, input_nodes); } - TFNode* GetExceptionTag(Decoder* decoder, + TFNode* GetExceptionTag(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm) { // TODO(kschimpf): Need to get runtime exception tag values. This // code only handles non-imported/exported exceptions. return BUILD(Int32Constant, imm.index); } - void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>& imm, + void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm, Control* block, const Vector<Value>& value_args) { int count = value_args.length(); ZoneVector<TFNode*> args(count, decoder->zone()); @@ -424,7 +423,7 @@ class WasmGraphBuildingInterface { EndControl(decoder, block); } - void CatchException(Decoder* decoder, + void CatchException(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm, Control* block, Vector<Value> values) { DCHECK(block->is_try_catch()); @@ -483,7 +482,7 @@ class WasmGraphBuildingInterface { } } - void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args, + void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args, const MemoryAccessImmediate<validate>& imm, Value* result) { TFNode** inputs = GetNodes(args); TFNode* node = BUILD(AtomicOp, opcode, inputs, imm.alignment, imm.offset, @@ -496,7 +495,7 @@ class WasmGraphBuildingInterface { TFBuilder* builder_; uint32_t current_catch_ = kNullCatch; - TryInfo* current_try_info(Decoder* decoder) { + TryInfo* current_try_info(FullDecoder* decoder) { return decoder->control_at(decoder->control_depth() - 1 - current_catch_) ->try_info; } @@ -548,7 +547,7 @@ class WasmGraphBuildingInterface { builder_->set_instance_cache(&env->instance_cache); } - TFNode* CheckForException(Decoder* decoder, TFNode* node) { + TFNode* CheckForException(FullDecoder* decoder, TFNode* node) { if (node == nullptr) return nullptr; const bool inside_try_scope = current_catch_ != kNullCatch; @@ -600,7 +599,7 @@ class WasmGraphBuildingInterface { } } - void MergeValuesInto(Decoder* decoder, Control* c, Merge<Value>* merge) { + void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) { DCHECK(merge == &c->start_merge || merge == &c->end_merge); if (!ssa_env_->go()) return; @@ -623,7 +622,7 @@ class WasmGraphBuildingInterface { } } - void Goto(Decoder* decoder, SsaEnv* from, SsaEnv* to) { + void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) { DCHECK_NOT_NULL(to); if (!from->go()) return; switch (to->state) { @@ -685,7 +684,7 @@ class WasmGraphBuildingInterface { return from->Kill(); } - SsaEnv* PrepareForLoop(Decoder* decoder, SsaEnv* env) { + SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) { if (!env->go()) return Split(decoder, env); env->state = SsaEnv::kMerged; @@ -732,7 +731,7 @@ class WasmGraphBuildingInterface { } // Create a complete copy of {from}. - SsaEnv* Split(Decoder* decoder, SsaEnv* from) { + SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) { DCHECK_NOT_NULL(from); SsaEnv* result = reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv))); @@ -782,9 +781,8 @@ class WasmGraphBuildingInterface { return result; } - void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder, - TFNode* index_node, FunctionSig* sig, uint32_t index, - const Value args[], Value returns[]) { + void DoCall(FullDecoder* decoder, TFNode* index_node, FunctionSig* sig, + uint32_t index, const Value args[], Value returns[]) { int param_count = static_cast<int>(sig->parameter_count()); TFNode** arg_nodes = builder_->Buffer(param_count + 1); TFNode** return_nodes = nullptr; @@ -811,10 +809,10 @@ class WasmGraphBuildingInterface { } // namespace -bool DecodeLocalDecls(BodyLocalDecls* decls, const byte* start, - const byte* end) { +bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls, + const byte* start, const byte* end) { Decoder decoder(start, end); - if (WasmDecoder<Decoder::kValidate>::DecodeLocals(&decoder, nullptr, + if (WasmDecoder<Decoder::kValidate>::DecodeLocals(enabled, &decoder, nullptr, &decls->type_list)) { DCHECK(decoder.ok()); decls->encoded_size = decoder.pc_offset(); @@ -827,7 +825,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end, BodyLocalDecls* decls) : Decoder(start, end) { if (decls != nullptr) { - if (DecodeLocalDecls(decls, start, end)) { + if (DecodeLocalDecls(kAllWasmFeatures, decls, start, end)) { pc_ += decls->encoded_size; if (pc_ > end_) pc_ = end_; } @@ -835,33 +833,24 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end, } DecodeResult VerifyWasmCode(AccountingAllocator* allocator, - const wasm::WasmModule* module, + const WasmFeatures& enabled, + const WasmModule* module, WasmFeatures* detected, FunctionBody& body) { Zone zone(allocator, ZONE_NAME); - WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(&zone, module, - body); + WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder( + &zone, module, enabled, detected, body); decoder.Decode(); return decoder.toResult(nullptr); } -DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator, - const wasm::WasmModule* module, - FunctionBody& body, ModuleOrigin origin, - Counters* counters) { - CHECK_LE(0, body.end - body.start); - auto time_counter = origin == kWasmOrigin - ? counters->wasm_decode_wasm_function_time() - : counters->wasm_decode_asm_function_time(); - TimedHistogramScope wasm_decode_function_time_scope(time_counter); - return VerifyWasmCode(allocator, module, body); -} - -DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder, - FunctionBody& body, +DecodeResult BuildTFGraph(AccountingAllocator* allocator, + const WasmFeatures& enabled, + const wasm::WasmModule* module, TFBuilder* builder, + WasmFeatures* detected, FunctionBody& body, compiler::NodeOriginTable* node_origins) { Zone zone(allocator, ZONE_NAME); WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder( - &zone, builder->module(), body, builder); + &zone, module, enabled, detected, body, builder); if (node_origins) { builder->AddBytecodePositionDecorator(node_origins, &decoder); } @@ -880,7 +869,9 @@ unsigned OpcodeLength(const byte* pc, const byte* end) { std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module, FunctionSig* sig, const byte* pc, const byte* end) { - WasmDecoder<Decoder::kNoValidate> decoder(module, sig, pc, end); + WasmFeatures unused_detected_features; + WasmDecoder<Decoder::kNoValidate> decoder( + module, kAllWasmFeatures, &unused_detected_features, sig, pc, end); return decoder.StackEffect(pc); } @@ -906,18 +897,19 @@ const char* RawOpcodeName(WasmOpcode opcode) { } // namespace bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body, - const wasm::WasmModule* module, - PrintLocals print_locals) { + const WasmModule* module, PrintLocals print_locals) { StdoutStream os; return PrintRawWasmCode(allocator, body, module, print_locals, os); } bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body, - const wasm::WasmModule* module, PrintLocals print_locals, + const WasmModule* module, PrintLocals print_locals, std::ostream& os, std::vector<int>* line_numbers) { Zone zone(allocator, ZONE_NAME); - WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start, - body.end); + WasmFeatures unused_detected_features; + WasmDecoder<Decoder::kNoValidate> decoder(module, kAllWasmFeatures, + &unused_detected_features, body.sig, + body.start, body.end); int line_nr = 0; constexpr int kNoByteCode = -1; @@ -1015,7 +1007,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body, case kExprIf: case kExprBlock: case kExprTry: { - BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc()); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i, + i.pc()); os << " // @" << i.pc_offset(); if (decoder.Complete(imm)) { for (unsigned i = 0; i < imm.out_arity(); i++) { diff --git a/chromium/v8/src/wasm/function-body-decoder.h b/chromium/v8/src/wasm/function-body-decoder.h index 7dbb800399e..13a3ae2d0c1 100644 --- a/chromium/v8/src/wasm/function-body-decoder.h +++ b/chromium/v8/src/wasm/function-body-decoder.h @@ -17,7 +17,6 @@ namespace v8 { namespace internal { class BitVector; // forward declaration -class Counters; namespace compiler { // external declarations from compiler. class NodeOriginTable; @@ -26,9 +25,10 @@ class WasmGraphBuilder; namespace wasm { -typedef compiler::WasmGraphBuilder TFBuilder; struct WasmModule; // forward declaration of module interface. -enum ModuleOrigin : uint8_t; +struct WasmFeatures; + +typedef compiler::WasmGraphBuilder TFBuilder; // A wrapper around the signature and bytes of a function. struct FunctionBody { @@ -43,47 +43,30 @@ struct FunctionBody { }; V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator, - const wasm::WasmModule* module, + const WasmFeatures& enabled, + const WasmModule* module, + WasmFeatures* detected, FunctionBody& body); -// Note: If run in the background thread, must follow protocol using -// isolate::async_counters() to guarantee usability of counters argument. -DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator, - const wasm::WasmModule* module, - FunctionBody& body, ModuleOrigin origin, - Counters* counters); - -DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder, +DecodeResult BuildTFGraph(AccountingAllocator* allocator, + const WasmFeatures& enabled, const WasmModule* module, + TFBuilder* builder, WasmFeatures* detected, FunctionBody& body, compiler::NodeOriginTable* node_origins); enum PrintLocals { kPrintLocals, kOmitLocals }; V8_EXPORT_PRIVATE bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body, - const wasm::WasmModule* module, PrintLocals print_locals); + const WasmModule* module, PrintLocals print_locals); V8_EXPORT_PRIVATE bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body, - const wasm::WasmModule* module, PrintLocals print_locals, + const WasmModule* module, PrintLocals print_locals, std::ostream& out, std::vector<int>* line_numbers = nullptr); // A simplified form of AST printing, e.g. from a debugger. void PrintRawWasmCode(const byte* start, const byte* end); -inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator, - const WasmModule* module, FunctionSig* sig, - const byte* start, const byte* end) { - FunctionBody body(sig, 0, start, end); - return VerifyWasmCode(allocator, module, body); -} - -inline DecodeResult BuildTFGraph(AccountingAllocator* allocator, - TFBuilder* builder, FunctionSig* sig, - const byte* start, const byte* end) { - FunctionBody body(sig, 0, start, end); - return BuildTFGraph(allocator, builder, body, nullptr); -} - struct BodyLocalDecls { // The size of the encoded declarations. uint32_t encoded_size = 0; // size of encoded declarations @@ -93,7 +76,8 @@ struct BodyLocalDecls { explicit BodyLocalDecls(Zone* zone) : type_list(zone) {} }; -V8_EXPORT_PRIVATE bool DecodeLocalDecls(BodyLocalDecls* decls, +V8_EXPORT_PRIVATE bool DecodeLocalDecls(const WasmFeatures& enabled, + BodyLocalDecls* decls, const byte* start, const byte* end); V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc index 2e47f82ca3d..c4209d8c9ca 100644 --- a/chromium/v8/src/wasm/function-compiler.cc +++ b/chromium/v8/src/wasm/function-compiler.cc @@ -15,48 +15,56 @@ namespace internal { namespace wasm { namespace { -const char* GetCompilationModeAsString( - WasmCompilationUnit::CompilationMode mode) { + +const char* GetExecutionTierAsString(ExecutionTier mode) { switch (mode) { - case WasmCompilationUnit::CompilationMode::kLiftoff: + case ExecutionTier::kBaseline: return "liftoff"; - case WasmCompilationUnit::CompilationMode::kTurbofan: + case ExecutionTier::kOptimized: return "turbofan"; + case ExecutionTier::kInterpreter: + return "interpreter"; } UNREACHABLE(); } + +void RecordStats(const WasmCode* code, Counters* counters) { + counters->wasm_generated_code_size()->Increment( + static_cast<int>(code->instructions().size())); + counters->wasm_reloc_size()->Increment( + static_cast<int>(code->reloc_info().size())); +} + } // namespace // static -WasmCompilationUnit::CompilationMode -WasmCompilationUnit::GetDefaultCompilationMode() { - return FLAG_liftoff ? CompilationMode::kLiftoff : CompilationMode::kTurbofan; +ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier() { + return FLAG_liftoff ? ExecutionTier::kBaseline : ExecutionTier::kOptimized; } -WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env, - wasm::NativeModule* native_module, - wasm::FunctionBody body, - wasm::WasmName name, int index, - CompilationMode mode, - Counters* counters, bool lower_simd) +WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine, + ModuleEnv* env, + NativeModule* native_module, + FunctionBody body, WasmName name, + int index, Counters* counters, + ExecutionTier mode) : env_(env), - wasm_engine_(isolate->wasm_engine()), + wasm_engine_(wasm_engine), func_body_(body), func_name_(name), - counters_(counters ? counters : isolate->counters()), + counters_(counters), func_index_(index), native_module_(native_module), - lower_simd_(lower_simd), mode_(mode) { DCHECK_GE(index, env->module->num_imported_functions); DCHECK_LT(index, env->module->functions.size()); // Always disable Liftoff for asm.js, for two reasons: // 1) asm-specific opcodes are not implemented, and // 2) tier-up does not work with lazy compilation. - if (env->module->origin == kAsmJsOrigin) mode = CompilationMode::kTurbofan; + if (env->module->origin == kAsmJsOrigin) mode = ExecutionTier::kOptimized; if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 && (FLAG_wasm_tier_mask_for_testing & (1 << index))) { - mode = CompilationMode::kTurbofan; + mode = ExecutionTier::kOptimized; } SwitchMode(mode); } @@ -65,7 +73,7 @@ WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env, // {TurbofanWasmCompilationUnit} can be opaque in the header file. WasmCompilationUnit::~WasmCompilationUnit() {} -void WasmCompilationUnit::ExecuteCompilation() { +void WasmCompilationUnit::ExecuteCompilation(WasmFeatures* detected) { auto size_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin, wasm, function_size_bytes); size_histogram->AddSample( @@ -76,75 +84,80 @@ void WasmCompilationUnit::ExecuteCompilation() { if (FLAG_trace_wasm_compiler) { PrintF("Compiling wasm function %d with %s\n\n", func_index_, - GetCompilationModeAsString(mode_)); + GetExecutionTierAsString(mode_)); } switch (mode_) { - case WasmCompilationUnit::CompilationMode::kLiftoff: - if (liftoff_unit_->ExecuteCompilation()) break; + case ExecutionTier::kBaseline: + if (liftoff_unit_->ExecuteCompilation(detected)) break; // Otherwise, fall back to turbofan. - SwitchMode(CompilationMode::kTurbofan); + SwitchMode(ExecutionTier::kOptimized); V8_FALLTHROUGH; - case WasmCompilationUnit::CompilationMode::kTurbofan: - turbofan_unit_->ExecuteCompilation(); + case ExecutionTier::kOptimized: + turbofan_unit_->ExecuteCompilation(detected); break; + case ExecutionTier::kInterpreter: + UNREACHABLE(); // TODO(titzer): compile interpreter entry stub. } } -wasm::WasmCode* WasmCompilationUnit::FinishCompilation( - wasm::ErrorThrower* thrower) { - wasm::WasmCode* ret; +WasmCode* WasmCompilationUnit::FinishCompilation(ErrorThrower* thrower) { + WasmCode* ret; switch (mode_) { - case CompilationMode::kLiftoff: + case ExecutionTier::kBaseline: ret = liftoff_unit_->FinishCompilation(thrower); break; - case CompilationMode::kTurbofan: + case ExecutionTier::kOptimized: ret = turbofan_unit_->FinishCompilation(thrower); break; - default: - UNREACHABLE(); + case ExecutionTier::kInterpreter: + UNREACHABLE(); // TODO(titzer): finish interpreter entry stub. } if (ret == nullptr) { thrower->RuntimeError("Error finalizing code."); + } else { + RecordStats(ret, counters_); } return ret; } -void WasmCompilationUnit::SwitchMode(CompilationMode new_mode) { +void WasmCompilationUnit::SwitchMode(ExecutionTier new_mode) { // This method is being called in the constructor, where neither // {liftoff_unit_} nor {turbofan_unit_} are set, or to switch mode from // kLiftoff to kTurbofan, in which case {liftoff_unit_} is already set. mode_ = new_mode; switch (new_mode) { - case CompilationMode::kLiftoff: + case ExecutionTier::kBaseline: DCHECK(!turbofan_unit_); DCHECK(!liftoff_unit_); liftoff_unit_.reset(new LiftoffCompilationUnit(this)); return; - case CompilationMode::kTurbofan: + case ExecutionTier::kOptimized: DCHECK(!turbofan_unit_); liftoff_unit_.reset(); turbofan_unit_.reset(new compiler::TurbofanWasmCompilationUnit(this)); return; + case ExecutionTier::kInterpreter: + UNREACHABLE(); // TODO(titzer): allow compiling interpreter entry stub. } UNREACHABLE(); } // static -wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction( - wasm::NativeModule* native_module, wasm::ErrorThrower* thrower, - Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function, - CompilationMode mode) { +WasmCode* WasmCompilationUnit::CompileWasmFunction( + Isolate* isolate, NativeModule* native_module, WasmFeatures* detected, + ErrorThrower* thrower, ModuleEnv* env, const WasmFunction* function, + ExecutionTier mode) { ModuleWireBytes wire_bytes(native_module->wire_bytes()); - wasm::FunctionBody function_body{ - function->sig, function->code.offset(), - wire_bytes.start() + function->code.offset(), - wire_bytes.start() + function->code.end_offset()}; + FunctionBody function_body{function->sig, function->code.offset(), + wire_bytes.start() + function->code.offset(), + wire_bytes.start() + function->code.end_offset()}; - WasmCompilationUnit unit(isolate, env, native_module, function_body, + WasmCompilationUnit unit(isolate->wasm_engine(), env, native_module, + function_body, wire_bytes.GetNameOrNull(function, env->module), - function->func_index, mode); - unit.ExecuteCompilation(); + function->func_index, isolate->counters(), mode); + unit.ExecuteCompilation(detected); return unit.FinishCompilation(thrower); } diff --git a/chromium/v8/src/wasm/function-compiler.h b/chromium/v8/src/wasm/function-compiler.h index a270d36f788..7e19f4d12fa 100644 --- a/chromium/v8/src/wasm/function-compiler.h +++ b/chromium/v8/src/wasm/function-compiler.h @@ -6,10 +6,15 @@ #define V8_WASM_FUNCTION_COMPILER_H_ #include "src/wasm/function-body-decoder.h" +#include "src/wasm/wasm-limits.h" +#include "src/wasm/wasm-module.h" +#include "src/wasm/wasm-tier.h" namespace v8 { namespace internal { +class Counters; + namespace compiler { class TurbofanWasmCompilationUnit; } // namespace compiler @@ -30,6 +35,8 @@ enum RuntimeExceptionSupport : bool { enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false }; +enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false }; + // The {ModuleEnv} encapsulates the module data that is used during compilation. // ModuleEnvs are shareable across multiple compilations. struct ModuleEnv { @@ -45,40 +52,55 @@ struct ModuleEnv { // be generated differently. const RuntimeExceptionSupport runtime_exception_support; + // The smallest size of any memory that could be used with this module, in + // bytes. + const uint64_t min_memory_size; + + // The largest size of any memory that could be used with this module, in + // bytes. + const uint64_t max_memory_size; + + const LowerSimd lower_simd; + constexpr ModuleEnv(const WasmModule* module, UseTrapHandler use_trap_handler, - RuntimeExceptionSupport runtime_exception_support) + RuntimeExceptionSupport runtime_exception_support, + LowerSimd lower_simd = kNoLowerSimd) : module(module), use_trap_handler(use_trap_handler), - runtime_exception_support(runtime_exception_support) {} + runtime_exception_support(runtime_exception_support), + min_memory_size(module ? module->initial_pages * uint64_t{kWasmPageSize} + : 0), + max_memory_size(module && module->has_maximum_pages + ? (module->maximum_pages * uint64_t{kWasmPageSize}) + : kSpecMaxWasmMemoryBytes), + lower_simd(lower_simd) {} }; class WasmCompilationUnit final { public: - enum class CompilationMode : uint8_t { kLiftoff, kTurbofan }; - static CompilationMode GetDefaultCompilationMode(); + static ExecutionTier GetDefaultExecutionTier(); // If constructing from a background thread, pass in a Counters*, and ensure // that the Counters live at least as long as this compilation unit (which // typically means to hold a std::shared_ptr<Counters>). - // If no such pointer is passed, Isolate::counters() will be called. This is - // only allowed to happen on the foreground thread. - WasmCompilationUnit(Isolate*, ModuleEnv*, wasm::NativeModule*, - wasm::FunctionBody, wasm::WasmName, int index, - CompilationMode = GetDefaultCompilationMode(), - Counters* = nullptr, bool lower_simd = false); + // If used exclusively from a foreground thread, Isolate::counters() may be + // used by callers to pass Counters. + WasmCompilationUnit(WasmEngine* wasm_engine, ModuleEnv*, NativeModule*, + FunctionBody, WasmName, int index, Counters*, + ExecutionTier = GetDefaultExecutionTier()); ~WasmCompilationUnit(); - void ExecuteCompilation(); - wasm::WasmCode* FinishCompilation(wasm::ErrorThrower* thrower); + void ExecuteCompilation(WasmFeatures* detected); + WasmCode* FinishCompilation(ErrorThrower* thrower); - static wasm::WasmCode* CompileWasmFunction( - wasm::NativeModule* native_module, wasm::ErrorThrower* thrower, - Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function, - CompilationMode = GetDefaultCompilationMode()); + static WasmCode* CompileWasmFunction( + Isolate* isolate, NativeModule* native_module, WasmFeatures* detected, + ErrorThrower* thrower, ModuleEnv* env, const WasmFunction* function, + ExecutionTier = GetDefaultExecutionTier()); - wasm::NativeModule* native_module() const { return native_module_; } - CompilationMode mode() const { return mode_; } + NativeModule* native_module() const { return native_module_; } + ExecutionTier mode() const { return mode_; } private: friend class LiftoffCompilationUnit; @@ -86,20 +108,18 @@ class WasmCompilationUnit final { ModuleEnv* env_; WasmEngine* wasm_engine_; - wasm::FunctionBody func_body_; - wasm::WasmName func_name_; + FunctionBody func_body_; + WasmName func_name_; Counters* counters_; int func_index_; - wasm::NativeModule* native_module_; - // TODO(wasm): Put {lower_simd_} inside the {ModuleEnv}. - bool lower_simd_; - CompilationMode mode_; + NativeModule* native_module_; + ExecutionTier mode_; // LiftoffCompilationUnit, set if {mode_ == kLiftoff}. std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_; // TurbofanWasmCompilationUnit, set if {mode_ == kTurbofan}. std::unique_ptr<compiler::TurbofanWasmCompilationUnit> turbofan_unit_; - void SwitchMode(CompilationMode new_mode); + void SwitchMode(ExecutionTier new_mode); DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit); }; diff --git a/chromium/v8/src/wasm/jump-table-assembler.cc b/chromium/v8/src/wasm/jump-table-assembler.cc index 3ac9d13e89f..029044c005b 100644 --- a/chromium/v8/src/wasm/jump-table-assembler.cc +++ b/chromium/v8/src/wasm/jump-table-assembler.cc @@ -36,6 +36,8 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, pushq(Immediate(func_index)); // max 5 bytes movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes jmp(kScratchRegister); // 3 bytes + + PatchConstPool(); // force patching entries for partial const pool } void JumpTableAssembler::EmitJumpSlot(Address target) { @@ -80,18 +82,9 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, } void JumpTableAssembler::EmitJumpSlot(Address target) { - int offset = - target - reinterpret_cast<Address>(pc_) - Instruction::kPcLoadDelta; - DCHECK_EQ(0, offset % kInstrSize); - // If the offset is within 64 MB, emit a direct jump. Otherwise jump - // indirectly. - if (is_int26(offset)) { - b(offset); // 1 instr - } else { - // {Move32BitImmediate} emits either [movw, movt, mov] or [ldr, constant]. - Move32BitImmediate(pc, Operand(target)); - } - + // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation + // mode used below, we need this to allow concurrent patching of this slot. + Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL)); CheckConstPool(true, false); // force emit of const pool } @@ -111,13 +104,16 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, } void JumpTableAssembler::EmitJumpSlot(Address target) { + // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is + // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make + // sure concurrent patching is still supported. Jump(target, RelocInfo::NONE); } void JumpTableAssembler::NopBytes(int bytes) { DCHECK_LE(0, bytes); - DCHECK_EQ(0, bytes % kInstructionSize); - for (; bytes > 0; bytes -= kInstructionSize) { + DCHECK_EQ(0, bytes % kInstrSize); + for (; bytes > 0; bytes -= kInstrSize) { nop(); } } diff --git a/chromium/v8/src/wasm/jump-table-assembler.h b/chromium/v8/src/wasm/jump-table-assembler.h index 1ef1a82f41b..a83a7d5b210 100644 --- a/chromium/v8/src/wasm/jump-table-assembler.h +++ b/chromium/v8/src/wasm/jump-table-assembler.h @@ -12,38 +12,122 @@ namespace v8 { namespace internal { namespace wasm { +// The jump table is the central dispatch point for all (direct and indirect) +// invocations in WebAssembly. It holds one slot per function in a module, with +// each slot containing a dispatch to the currently published {WasmCode} that +// corresponds to the function. +// +// Note that the table is split into lines of fixed size, with lines laid out +// consecutively within the executable memory of the {NativeModule}. The slots +// in turn are consecutive within a line, but do not cross line boundaries. +// +// +- L1 -------------------+ +- L2 -------------------+ +- L3 ... +// | S1 | S2 | ... | Sn | x | | S1 | S2 | ... | Sn | x | | S1 ... +// +------------------------+ +------------------------+ +---- ... +// +// The above illustrates jump table lines {Li} containing slots {Si} with each +// line containing {n} slots and some padding {x} for alignment purposes. class JumpTableAssembler : public TurboAssembler { public: + // Translate an offset into the continuous jump table to a jump table index. + static uint32_t SlotOffsetToIndex(uint32_t slot_offset) { + uint32_t line_index = slot_offset / kJumpTableLineSize; + uint32_t line_offset = slot_offset % kJumpTableLineSize; + DCHECK_EQ(0, line_offset % kJumpTableSlotSize); + return line_index * kJumpTableSlotsPerLine + + line_offset / kJumpTableSlotSize; + } + + // Translate a jump table index to an offset into the continuous jump table. + static uint32_t SlotIndexToOffset(uint32_t slot_index) { + uint32_t line_index = slot_index / kJumpTableSlotsPerLine; + uint32_t line_offset = + (slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize; + return line_index * kJumpTableLineSize + line_offset; + } + + // Determine the size of a jump table containing the given number of slots. + static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) { + // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values, + // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize} + return ((slot_count + kJumpTableSlotsPerLine - 1) / + kJumpTableSlotsPerLine) * + kJumpTableLineSize; + } + + static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index, + uint32_t func_index, + Address lazy_compile_target, + WasmCode::FlushICache flush_i_cache) { + Address slot = base + SlotIndexToOffset(slot_index); + JumpTableAssembler jtasm(slot); + jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target); + jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset()); + if (flush_i_cache) { + Assembler::FlushICache(slot, kJumpTableSlotSize); + } + } + + static void PatchJumpTableSlot(Address base, uint32_t slot_index, + Address new_target, + WasmCode::FlushICache flush_i_cache) { + Address slot = base + SlotIndexToOffset(slot_index); + JumpTableAssembler jtasm(slot); + jtasm.EmitJumpSlot(new_target); + jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset()); + if (flush_i_cache) { + Assembler::FlushICache(slot, kJumpTableSlotSize); + } + } + + private: // Instantiate a {JumpTableAssembler} for patching. explicit JumpTableAssembler(Address slot_addr, int size = 256) : TurboAssembler(nullptr, JumpTableAssemblerOptions(), reinterpret_cast<void*>(slot_addr), size, CodeObjectRequired::kNo) {} +// To allow concurrent patching of the jump table entries, we need to ensure +// that the instruction containing the call target does not cross cache-line +// boundaries. The jump table line size has been chosen to satisfy this. #if V8_TARGET_ARCH_X64 + static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableSlotSize = 18; #elif V8_TARGET_ARCH_IA32 + static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableSlotSize = 10; #elif V8_TARGET_ARCH_ARM + static constexpr int kJumpTableLineSize = 5 * kInstrSize; static constexpr int kJumpTableSlotSize = 5 * kInstrSize; #elif V8_TARGET_ARCH_ARM64 - static constexpr int kJumpTableSlotSize = 3 * kInstructionSize; + static constexpr int kJumpTableLineSize = 3 * kInstrSize; + static constexpr int kJumpTableSlotSize = 3 * kInstrSize; #elif V8_TARGET_ARCH_S390X + static constexpr int kJumpTableLineSize = 20; static constexpr int kJumpTableSlotSize = 20; #elif V8_TARGET_ARCH_S390 + static constexpr int kJumpTableLineSize = 14; static constexpr int kJumpTableSlotSize = 14; #elif V8_TARGET_ARCH_PPC64 + static constexpr int kJumpTableLineSize = 48; static constexpr int kJumpTableSlotSize = 48; #elif V8_TARGET_ARCH_PPC + static constexpr int kJumpTableLineSize = 24; static constexpr int kJumpTableSlotSize = 24; #elif V8_TARGET_ARCH_MIPS + static constexpr int kJumpTableLineSize = 6 * kInstrSize; static constexpr int kJumpTableSlotSize = 6 * kInstrSize; #elif V8_TARGET_ARCH_MIPS64 + static constexpr int kJumpTableLineSize = 8 * kInstrSize; static constexpr int kJumpTableSlotSize = 8 * kInstrSize; #else + static constexpr int kJumpTableLineSize = 1; static constexpr int kJumpTableSlotSize = 1; #endif + static constexpr int kJumpTableSlotsPerLine = + kJumpTableLineSize / kJumpTableSlotSize; + // {JumpTableAssembler} is never used during snapshot generation, and its code // must be independent of the code range of any isolate anyway. Just ensure // that no relocation information is recorded, there is no buffer to store it @@ -60,16 +144,6 @@ class JumpTableAssembler : public TurboAssembler { void EmitJumpSlot(Address target); void NopBytes(int bytes); - - static void PatchJumpTableSlot(Address slot, Address new_target, - WasmCode::FlushICache flush_i_cache) { - JumpTableAssembler jsasm(slot); - jsasm.EmitJumpSlot(new_target); - jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset()); - if (flush_i_cache) { - Assembler::FlushICache(slot, kJumpTableSlotSize); - } - } }; } // namespace wasm diff --git a/chromium/v8/src/wasm/memory-tracing.cc b/chromium/v8/src/wasm/memory-tracing.cc index 6fa7219cda9..885a5341987 100644 --- a/chromium/v8/src/wasm/memory-tracing.cc +++ b/chromium/v8/src/wasm/memory-tracing.cc @@ -5,12 +5,13 @@ #include "src/wasm/memory-tracing.h" #include "src/utils.h" +#include "src/v8memory.h" namespace v8 { namespace internal { namespace wasm { -void TraceMemoryOperation(ExecutionEngine engine, const MemoryTracingInfo* info, +void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info, int func_index, int position, uint8_t* mem_start) { EmbeddedVector<char, 64> value; auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep); @@ -33,20 +34,21 @@ void TraceMemoryOperation(ExecutionEngine engine, const MemoryTracingInfo* info, default: SNPrintF(value, "???"); } - char eng_c = '?'; - switch (engine) { - case ExecutionEngine::kTurbofan: - eng_c = 'T'; + const char* eng = "?"; + switch (tier) { + case ExecutionTier::kOptimized: + eng = "turbofan"; break; - case ExecutionEngine::kLiftoff: - eng_c = 'L'; + case ExecutionTier::kBaseline: + eng = "liftoff"; break; - case ExecutionEngine::kInterpreter: - eng_c = 'I'; + case ExecutionTier::kInterpreter: + eng = "interpreter"; break; } - printf("%c %8d+0x%-6x %s @%08x %s\n", eng_c, func_index, position, - info->is_store ? "store" : "load ", info->address, value.start()); + printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position, + info->is_store ? " store to" : "load from", info->address, + value.start()); } } // namespace wasm diff --git a/chromium/v8/src/wasm/memory-tracing.h b/chromium/v8/src/wasm/memory-tracing.h index 33170aefbe4..b5105c5327d 100644 --- a/chromium/v8/src/wasm/memory-tracing.h +++ b/chromium/v8/src/wasm/memory-tracing.h @@ -8,13 +8,12 @@ #include <cstdint> #include "src/machine-type.h" +#include "src/wasm/wasm-tier.h" namespace v8 { namespace internal { namespace wasm { -enum class ExecutionEngine { kTurbofan, kLiftoff, kInterpreter }; - // This struct is create in generated code, hence use low-level types. struct MemoryTracingInfo { uint32_t address; @@ -31,7 +30,7 @@ struct MemoryTracingInfo { // Callback for tracing a memory operation for debugging. // Triggered by --wasm-trace-memory. -void TraceMemoryOperation(ExecutionEngine, const MemoryTracingInfo* info, +void TraceMemoryOperation(ExecutionTier, const MemoryTracingInfo* info, int func_index, int position, uint8_t* mem_start); } // namespace wasm diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc index 59c7bedbc1c..b950c590b5c 100644 --- a/chromium/v8/src/wasm/module-compiler.cc +++ b/chromium/v8/src/wasm/module-compiler.cc @@ -52,8 +52,7 @@ namespace wasm { enum class CompilationEvent : uint8_t { kFinishedBaselineCompilation, kFinishedTopTierCompilation, - kFailedCompilation, - kDestroyed + kFailedCompilation }; enum class CompileMode : uint8_t { kRegular, kTiering }; @@ -67,10 +66,14 @@ class CompilationState { CompilationState(internal::Isolate*, const ModuleEnv&); ~CompilationState(); - // Needs to be set before {AddCompilationUnits} is run, which triggers - // background compilation. + // Set the number of compilations unit expected to be executed. Needs to be + // set before {AddCompilationUnits} is run, which triggers background + // compilation. void SetNumberOfFunctionsToCompile(size_t num_functions); - void AddCallback( + + // Set the callback function to be called on compilation events. Needs to be + // set before {AddCompilationUnits} is run. + void SetCallback( std::function<void(CompilationEvent, ErrorThrower*)> callback); // Inserts new functions to compile and kicks off compilation. @@ -85,10 +88,10 @@ class CompilationState { void OnError(ErrorThrower* thrower); void OnFinishedUnit(); void ScheduleUnitForFinishing(std::unique_ptr<WasmCompilationUnit> unit, - WasmCompilationUnit::CompilationMode mode); + ExecutionTier mode); - void CancelAndWait(); - void OnBackgroundTaskStopped(); + void OnBackgroundTaskStopped(const WasmFeatures& detected); + void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected); void RestartBackgroundTasks(size_t max = std::numeric_limits<size_t>::max()); // Only one foreground thread (finisher) is allowed to run at a time. // {SetFinisherIsRunning} returns whether the flag changed its state. @@ -108,8 +111,10 @@ class CompilationState { return baseline_compilation_finished_; } + WasmEngine* wasm_engine() const { return wasm_engine_; } CompileMode compile_mode() const { return compile_mode_; } ModuleEnv* module_env() { return &module_env_; } + WasmFeatures* detected_features() { return &detected_features_; } private: void NotifyOnEvent(CompilationEvent event, ErrorThrower* thrower); @@ -145,16 +150,16 @@ class CompilationState { std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_finish_units_; std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_finish_units_; + // Features detected to be used in this module. Features can be detected + // as a module is being compiled. + WasmFeatures detected_features_ = kNoWasmFeatures; + // End of fields protected by {mutex_}. ////////////////////////////////////////////////////////////////////////////// - // TODO(mstarzinger): We should make sure this allows at most one callback - // to exist for each {CompilationState} because reifying the error object on - // the given {ErrorThrower} can be done at most once. - std::vector<std::function<void(CompilationEvent, ErrorThrower*)>> callbacks_; + // Callback function to be called on compilation events. + std::function<void(CompilationEvent, ErrorThrower*)> callback_; - // When canceling the background_task_manager_, use {CancelAndWait} on - // the CompilationState in order to cleanly clean up. CancelableTaskManager background_task_manager_; CancelableTaskManager foreground_task_manager_; std::shared_ptr<v8::TaskRunner> foreground_task_runner_; @@ -167,15 +172,22 @@ class CompilationState { namespace { +void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) { + if (detected.threads) { + isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes); + } +} + class JSToWasmWrapperCache { public: - Handle<Code> GetOrCompileJSToWasmWrapper( - Isolate* isolate, const wasm::NativeModule* native_module, - uint32_t func_index, wasm::UseTrapHandler use_trap_handler) { - const wasm::WasmModule* module = native_module->module(); - const wasm::WasmFunction* func = &module->functions[func_index]; + Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, + const NativeModule* native_module, + uint32_t func_index, + UseTrapHandler use_trap_handler) { + const WasmModule* module = native_module->module(); + const WasmFunction* func = &module->functions[func_index]; bool is_import = func_index < module->num_imported_functions; - std::pair<bool, wasm::FunctionSig> key(is_import, *func->sig); + std::pair<bool, FunctionSig> key(is_import, *func->sig); Handle<Code>& cached = cache_[key]; if (!cached.is_null()) return cached; @@ -190,7 +202,7 @@ class JSToWasmWrapperCache { private: // We generate different code for calling imports than calling wasm functions // in this module. Both are cached separately. - using CacheKey = std::pair<bool, wasm::FunctionSig>; + using CacheKey = std::pair<bool, FunctionSig>; std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_; }; @@ -224,6 +236,7 @@ class InstanceBuilder { }; Isolate* isolate_; + const WasmFeatures enabled_; const WasmModule* const module_; ErrorThrower* thrower_; Handle<WasmModuleObject> module_object_; @@ -236,7 +249,7 @@ class InstanceBuilder { JSToWasmWrapperCache js_to_wasm_cache_; std::vector<SanitizedImport> sanitized_imports_; - wasm::UseTrapHandler use_trap_handler() const { + UseTrapHandler use_trap_handler() const { return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler; } @@ -327,9 +340,8 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject( return {}; } -wasm::WasmCode* LazyCompileFunction(Isolate* isolate, - NativeModule* native_module, - int func_index) { +WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module, + int func_index) { base::ElapsedTimer compilation_timer; DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index))); @@ -357,12 +369,13 @@ wasm::WasmCode* LazyCompileFunction(Isolate* isolate, module_start + func->code.end_offset()}; ErrorThrower thrower(isolate, "WasmLazyCompile"); - WasmCompilationUnit unit(isolate, module_env, native_module, body, func_name, - func_index); - unit.ExecuteCompilation(); - wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower); + WasmCompilationUnit unit(isolate->wasm_engine(), module_env, native_module, + body, func_name, func_index, isolate->counters()); + unit.ExecuteCompilation( + native_module->compilation_state()->detected_features()); + WasmCode* wasm_code = unit.FinishCompilation(&thrower); - if (wasm::WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate); + if (WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate); // If there is a pending error, something really went wrong. The module was // verified before starting execution with lazy compilation. @@ -378,11 +391,6 @@ wasm::WasmCode* LazyCompileFunction(Isolate* isolate, auto counters = isolate->counters(); counters->wasm_lazily_compiled_functions()->Increment(); - counters->wasm_generated_code_size()->Increment( - static_cast<int>(wasm_code->instructions().size())); - counters->wasm_reloc_size()->Increment( - static_cast<int>(wasm_code->reloc_info().size())); - counters->wasm_lazy_compilation_throughput()->AddSample( compilation_time != 0 ? static_cast<int>(func_size / compilation_time) : 0); @@ -399,8 +407,7 @@ Address CompileLazy(Isolate* isolate, NativeModule* native_module, NativeModuleModificationScope native_module_modification_scope(native_module); - wasm::WasmCode* result = - LazyCompileFunction(isolate, native_module, func_index); + WasmCode* result = LazyCompileFunction(isolate, native_module, func_index); DCHECK_NOT_NULL(result); DCHECK_EQ(func_index, result->index()); @@ -422,22 +429,6 @@ void RecordStats(const Code* code, Counters* counters) { counters->wasm_reloc_size()->Increment(code->relocation_info()->length()); } -void RecordStats(const wasm::WasmCode* code, Counters* counters) { - counters->wasm_generated_code_size()->Increment( - static_cast<int>(code->instructions().size())); - counters->wasm_reloc_size()->Increment( - static_cast<int>(code->reloc_info().size())); -} - -void RecordStats(const wasm::NativeModule* native_module, Counters* counters) { - for (uint32_t i = native_module->num_imported_functions(), - e = native_module->num_functions(); - i < e; ++i) { - const wasm::WasmCode* code = native_module->code(i); - if (code != nullptr) RecordStats(code, counters); - } -} - bool in_bounds(uint32_t offset, size_t size, size_t upper) { return offset + size <= upper && offset + size >= offset; } @@ -472,17 +463,15 @@ class CompilationUnitBuilder { Vector<const uint8_t> bytes, WasmName name) { switch (compilation_state_->compile_mode()) { case CompileMode::kTiering: - tiering_units_.emplace_back( - CreateUnit(function, buffer_offset, bytes, name, - WasmCompilationUnit::CompilationMode::kTurbofan)); - baseline_units_.emplace_back( - CreateUnit(function, buffer_offset, bytes, name, - WasmCompilationUnit::CompilationMode::kLiftoff)); + tiering_units_.emplace_back(CreateUnit( + function, buffer_offset, bytes, name, ExecutionTier::kOptimized)); + baseline_units_.emplace_back(CreateUnit( + function, buffer_offset, bytes, name, ExecutionTier::kBaseline)); return; case CompileMode::kRegular: baseline_units_.emplace_back( CreateUnit(function, buffer_offset, bytes, name, - WasmCompilationUnit::GetDefaultCompilationMode())); + WasmCompilationUnit::GetDefaultExecutionTier())); return; } UNREACHABLE(); @@ -501,17 +490,17 @@ class CompilationUnitBuilder { } private: - std::unique_ptr<WasmCompilationUnit> CreateUnit( - const WasmFunction* function, uint32_t buffer_offset, - Vector<const uint8_t> bytes, WasmName name, - WasmCompilationUnit::CompilationMode mode) { + std::unique_ptr<WasmCompilationUnit> CreateUnit(const WasmFunction* function, + uint32_t buffer_offset, + Vector<const uint8_t> bytes, + WasmName name, + ExecutionTier mode) { return base::make_unique<WasmCompilationUnit>( - compilation_state_->isolate(), compilation_state_->module_env(), + compilation_state_->wasm_engine(), compilation_state_->module_env(), native_module_, - wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(), - bytes.end()}, - name, function->func_index, mode, - compilation_state_->isolate()->async_counters().get()); + FunctionBody{function->sig, buffer_offset, bytes.begin(), bytes.end()}, + name, function->func_index, + compilation_state_->isolate()->async_counters().get(), mode); } NativeModule* native_module_; @@ -524,11 +513,9 @@ class CompilationUnitBuilder { // foreground and background threads). The no_finisher_callback is called // within the result_mutex_ lock when no finishing task is running, i.e. when // the finisher_is_running_ flag is not set. -bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) { - DisallowHeapAllocation no_allocation; - DisallowHandleAllocation no_handles; - DisallowHandleDereference no_deref; - DisallowCodeDependencyChange no_dependency_change; +bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state, + WasmFeatures* detected) { + DisallowHeapAccess no_heap_access; std::unique_ptr<WasmCompilationUnit> unit = compilation_state->GetNextCompilationUnit(); @@ -539,8 +526,8 @@ bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) { // to Turbofan if it cannot be compiled using Liftoff. This can be removed // later as soon as Liftoff can compile any function. Then, we can directly // access {unit->mode()} within {ScheduleUnitForFinishing()}. - WasmCompilationUnit::CompilationMode mode = unit->mode(); - unit->ExecuteCompilation(); + ExecutionTier mode = unit->mode(); + unit->ExecuteCompilation(detected); compilation_state->ScheduleUnitForFinishing(std::move(unit), mode); return true; @@ -573,7 +560,7 @@ void FinishCompilationUnits(CompilationState* compilation_state, std::unique_ptr<WasmCompilationUnit> unit = compilation_state->GetNextExecutedUnit(); if (unit == nullptr) break; - wasm::WasmCode* result = unit->FinishCompilation(thrower); + WasmCode* result = unit->FinishCompilation(thrower); if (thrower->error()) { compilation_state->Abort(); @@ -641,8 +628,10 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module, // result is enqueued in {baseline_finish_units_}. // The foreground task bypasses waiting on memory threshold, because // its results will immediately be converted to code (below). - while (FetchAndExecuteCompilationUnit(compilation_state) && - !compilation_state->baseline_compilation_finished()) { + WasmFeatures detected_features; + while ( + FetchAndExecuteCompilationUnit(compilation_state, &detected_features) && + !compilation_state->baseline_compilation_finished()) { // 2.b) If {baseline_finish_units_} contains a compilation unit, the main // thread dequeues it and finishes the compilation unit. Compilation // units are finished concurrently to the background threads to save @@ -663,6 +652,9 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module, if (compilation_state->baseline_compilation_finished()) break; } + // Publish features from the foreground and background tasks. + compilation_state->PublishDetectedFeatures(isolate, detected_features); + // 4) If tiering-compilation is enabled, we need to set the finisher // to false, such that the background threads will spawn a foreground // thread to finish the top-tier compilation units. @@ -679,13 +671,14 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module, ModuleWireBytes wire_bytes(native_module->wire_bytes()); const WasmModule* module = module_env->module; + WasmFeatures detected = kNoWasmFeatures; for (uint32_t i = 0; i < module->functions.size(); ++i) { const WasmFunction& func = module->functions[i]; if (func.imported) continue; // Imports are compiled at instantiation time. // Compile the function. - wasm::WasmCode* code = WasmCompilationUnit::CompileWasmFunction( - native_module, thrower, isolate, module_env, &func); + WasmCode* code = WasmCompilationUnit::CompileWasmFunction( + isolate, native_module, &detected, thrower, module_env, &func); if (code == nullptr) { TruncatedUserString<> name(wire_bytes.GetName(&func, module)); thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(), @@ -693,6 +686,7 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module, break; } } + UpdateFeatureUseCounts(isolate, detected); } void ValidateSequentially(Isolate* isolate, NativeModule* native_module, @@ -709,9 +703,18 @@ void ValidateSequentially(Isolate* isolate, NativeModule* native_module, const byte* base = wire_bytes.start(); FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(), base + func.code.end_offset()}; - DecodeResult result = VerifyWasmCodeWithStats( - isolate->allocator(), module, body, module->origin, - isolate->async_counters().get()); + DecodeResult result; + { + auto time_counter = + SELECT_WASM_COUNTER(isolate->async_counters(), module->origin, + wasm_decode, function_time); + + TimedHistogramScope wasm_decode_function_time_scope(time_counter); + WasmFeatures detected; + result = VerifyWasmCode(isolate->allocator(), + native_module->enabled_features(), module, + &detected, body); + } if (result.failed()) { TruncatedUserString<> name(wire_bytes.GetName(&func, module)); thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i, @@ -755,8 +758,6 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower, CompileSequentially(isolate, native_module, env, thrower); } if (thrower->error()) return; - - RecordStats(native_module, isolate->counters()); } } @@ -801,7 +802,7 @@ class FinishCompileTask : public CancelableTask { } ErrorThrower thrower(compilation_state_->isolate(), "AsyncCompile"); - wasm::WasmCode* result = unit->FinishCompilation(&thrower); + WasmCode* result = unit->FinishCompilation(&thrower); if (thrower.error()) { DCHECK_NULL(result); @@ -818,11 +819,7 @@ class FinishCompileTask : public CancelableTask { DCHECK_EQ(CompileMode::kTiering, compilation_state_->compile_mode()); DCHECK(!result->is_liftoff()); - if (wasm::WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate); - - // Update the counters to include the top-tier code. - RecordStats(result, - compilation_state_->isolate()->async_counters().get()); + if (WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate); } // Update the compilation state, and possibly notify @@ -855,26 +852,34 @@ class BackgroundCompileTask : public CancelableTask { // The number of currently running background tasks is reduced in // {OnBackgroundTaskStopped}. while (!compilation_state_->failed()) { - if (!FetchAndExecuteCompilationUnit(compilation_state_)) { + if (!FetchAndExecuteCompilationUnit(compilation_state_, + &detected_features_)) { break; } } - compilation_state_->OnBackgroundTaskStopped(); + compilation_state_->OnBackgroundTaskStopped(detected_features_); } private: CompilationState* compilation_state_; + WasmFeatures detected_features_ = kNoWasmFeatures; }; } // namespace MaybeHandle<WasmModuleObject> CompileToModuleObject( - Isolate* isolate, ErrorThrower* thrower, + Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower, std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes) { const WasmModule* wasm_module = module.get(); TimedHistogramScope wasm_compile_module_time_scope(SELECT_WASM_COUNTER( isolate->counters(), wasm_module->origin, wasm_compile, module_time)); + + // Embedder usage count for declared shared memories. + if (wasm_module->has_shared_memory) { + isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory); + } + // TODO(6792): No longer needed once WebAssembly code is off heap. Use // base::Optional to be able to close the scope before notifying the debugger. base::Optional<CodeSpaceMemoryModificationScope> modification_scope( @@ -902,7 +907,6 @@ MaybeHandle<WasmModuleObject> CompileToModuleObject( // TODO(clemensh): For the same module (same bytes / same hash), we should // only have one WasmModuleObject. Otherwise, we might only set // breakpoints on a (potentially empty) subset of the instances. - ModuleEnv env = CreateDefaultModuleEnv(wasm_module); // Create the compiled module object and populate with compiled functions @@ -910,8 +914,8 @@ MaybeHandle<WasmModuleObject> CompileToModuleObject( // serializable. Instantiation may occur off a deserialized version of this // object. Handle<WasmModuleObject> module_object = WasmModuleObject::New( - isolate, std::move(module), env, std::move(wire_bytes_copy), script, - asm_js_offset_table); + isolate, enabled, std::move(module), env, std::move(wire_bytes_copy), + script, asm_js_offset_table); CompileNativeModule(isolate, thrower, module_object, wasm_module, &env); if (thrower->error()) return {}; @@ -938,6 +942,7 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower, MaybeHandle<JSReceiver> ffi, MaybeHandle<JSArrayBuffer> memory) : isolate_(isolate), + enabled_(module_object->native_module()->enabled_features()), module_(module_object->module()), thrower_(thrower), module_object_(module_object), @@ -987,13 +992,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { Handle<JSArrayBuffer> memory = memory_.ToHandleChecked(); memory->set_is_neuterable(false); - DCHECK_IMPLIES(use_trap_handler(), - module_->origin == kAsmJsOrigin || - memory->is_wasm_memory() || - memory->backing_store() == nullptr || - // TODO(836800) Remove once is_wasm_memory transfers over - // post-message. - (FLAG_experimental_wasm_threads && memory->is_shared())); + DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin || + memory->is_wasm_memory() || + memory->backing_store() == nullptr); } else if (initial_pages > 0 || use_trap_handler()) { // We need to unconditionally create a guard region if using trap handlers, // even when the size is zero to prevent null-dereference issues @@ -1037,11 +1038,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { //-------------------------------------------------------------------------- // Create the WebAssembly.Instance object. //-------------------------------------------------------------------------- - wasm::NativeModule* native_module = module_object_->native_module(); + NativeModule* native_module = module_object_->native_module(); TRACE("New module instantiation for %p\n", native_module); Handle<WasmInstanceObject> instance = WasmInstanceObject::New(isolate_, module_object_); - wasm::NativeModuleModificationScope native_modification_scope(native_module); + NativeModuleModificationScope native_modification_scope(native_module); //-------------------------------------------------------------------------- // Set up the globals for the new instance. @@ -1075,7 +1076,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { // Set up the array of references to imported globals' array buffers. //-------------------------------------------------------------------------- if (module_->num_imported_mutable_globals > 0) { - DCHECK(FLAG_experimental_wasm_mut_global); // TODO(binji): This allocates one slot for each mutable global, which is // more than required if multiple globals are imported from the same // module. @@ -1127,9 +1127,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { if (!memory_.is_null()) { // Double-check the {memory} array buffer matches the instance. Handle<JSArrayBuffer> memory = memory_.ToHandleChecked(); - uint32_t mem_size = 0; - CHECK(memory->byte_length()->ToUint32(&mem_size)); - CHECK_EQ(instance->memory_size(), mem_size); + CHECK_EQ(instance->memory_size(), memory->byte_length()->Number()); CHECK_EQ(instance->memory_start(), memory->backing_store()); } } @@ -1319,7 +1317,8 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) { return expr.val.i32_const; case WasmInitExpr::kGlobalIndex: { uint32_t offset = module_->globals[expr.val.global_index].offset; - return *reinterpret_cast<uint32_t*>(raw_buffer_ptr(globals_, offset)); + return ReadLittleEndianValue<uint32_t>( + reinterpret_cast<Address>(raw_buffer_ptr(globals_, offset))); } default: UNREACHABLE(); @@ -1348,17 +1347,20 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) { num, ValueTypes::TypeName(global.type)); switch (global.type) { case kWasmI32: - *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num); + WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), + static_cast<int32_t>(num)); break; case kWasmI64: // TODO(titzer): initialization of imported i64 globals. UNREACHABLE(); break; case kWasmF32: - *GetRawGlobalPtr<float>(global) = static_cast<float>(num); + WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), + static_cast<float>(num)); break; case kWasmF64: - *GetRawGlobalPtr<double>(global) = static_cast<double>(num); + WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), + static_cast<double>(num)); break; default: UNREACHABLE(); @@ -1372,25 +1374,25 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, switch (global.type) { case kWasmI32: { int32_t num = value->GetI32(); - *GetRawGlobalPtr<int32_t>(global) = num; + WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num); TRACE("%d", num); break; } case kWasmI64: { int64_t num = value->GetI64(); - *GetRawGlobalPtr<int64_t>(global) = num; + WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num); TRACE("%" PRId64, num); break; } case kWasmF32: { float num = value->GetF32(); - *GetRawGlobalPtr<float>(global) = num; + WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num); TRACE("%f", num); break; } case kWasmF64: { double num = value->GetF64(); - *GetRawGlobalPtr<double>(global) = num; + WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num); TRACE("%lf", num); break; } @@ -1519,7 +1521,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) { RecordStats(*wrapper_code, isolate_->counters()); WasmCode* wasm_code = native_module->AddCodeCopy( - wrapper_code, wasm::WasmCode::kWasmToJsWrapper, func_index); + wrapper_code, WasmCode::kWasmToJsWrapper, func_index); ImportedFunctionEntry entry(instance, func_index); entry.set_wasm_to_js(*js_receiver, wasm_code); } @@ -1662,8 +1664,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) { // The mutable-global proposal allows importing i64 values, but only if // they are passed as a WebAssembly.Global object. - if (global.type == kWasmI64 && !(FLAG_experimental_wasm_mut_global && - value->IsWasmGlobalObject())) { + if (global.type == kWasmI64 && + !(enabled_.mut_global && value->IsWasmGlobalObject())) { ReportLinkError("global import cannot have type i64", index, module_name, import_name); return -1; @@ -1684,7 +1686,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) { } } } - if (FLAG_experimental_wasm_mut_global) { + if (enabled_.mut_global) { if (value->IsWasmGlobalObject()) { auto global_object = Handle<WasmGlobalObject>::cast(value); if (global_object->type() != global.type) { @@ -1759,22 +1761,25 @@ T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) { void InstanceBuilder::InitGlobals() { for (auto global : module_->globals) { if (global.mutability && global.imported) { - DCHECK(FLAG_experimental_wasm_mut_global); continue; } switch (global.init.kind) { case WasmInitExpr::kI32Const: - *GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const; + WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), + global.init.val.i32_const); break; case WasmInitExpr::kI64Const: - *GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const; + WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), + global.init.val.i64_const); break; case WasmInitExpr::kF32Const: - *GetRawGlobalPtr<float>(global) = global.init.val.f32_const; + WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), + global.init.val.f32_const); break; case WasmInitExpr::kF64Const: - *GetRawGlobalPtr<double>(global) = global.init.val.f64_const; + WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), + global.init.val.f64_const); break; case WasmInitExpr::kGlobalIndex: { // Initialize with another global. @@ -1805,8 +1810,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) { thrower_->RangeError("Out of memory: wasm memory too large"); return Handle<JSArrayBuffer>::null(); } - const bool is_shared_memory = - module_->has_shared_memory && i::FLAG_experimental_wasm_threads; + const bool is_shared_memory = module_->has_shared_memory && enabled_.threads; i::SharedFlag shared_flag = is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared; Handle<JSArrayBuffer> mem_buffer; @@ -1948,7 +1952,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) { } case kExternalGlobal: { const WasmGlobal& global = module_->globals[exp.index]; - if (FLAG_experimental_wasm_mut_global) { + if (enabled_.mut_global) { Handle<JSArrayBuffer> buffer; uint32_t offset; @@ -1985,13 +1989,16 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) { double num = 0; switch (global.type) { case kWasmI32: - num = *GetRawGlobalPtr<int32_t>(global); + num = ReadLittleEndianValue<int32_t>( + GetRawGlobalPtr<int32_t>(global)); break; case kWasmF32: - num = *GetRawGlobalPtr<float>(global); + num = + ReadLittleEndianValue<float>(GetRawGlobalPtr<float>(global)); break; case kWasmF64: - num = *GetRawGlobalPtr<double>(global); + num = ReadLittleEndianValue<double>( + GetRawGlobalPtr<double>(global)); break; case kWasmI64: thrower_->LinkError( @@ -2121,10 +2128,11 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) { } AsyncCompileJob::AsyncCompileJob( - Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length, - Handle<Context> context, - std::unique_ptr<CompilationResultResolver> resolver) + Isolate* isolate, const WasmFeatures& enabled, + std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context, + std::shared_ptr<CompilationResultResolver> resolver) : isolate_(isolate), + enabled_features_(enabled), async_counters_(isolate->async_counters()), bytes_copy_(std::move(bytes_copy)), wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length), @@ -2144,17 +2152,9 @@ void AsyncCompileJob::Start() { } void AsyncCompileJob::Abort() { - background_task_manager_.CancelAndWait(); - if (native_module_) native_module_->compilation_state()->Abort(); - if (num_pending_foreground_tasks_ == 0) { - // No task is pending, we can just remove the AsyncCompileJob. - isolate_->wasm_engine()->RemoveCompileJob(this); - } else { - // There is still a compilation task in the task queue. We enter the - // AbortCompilation state and wait for this compilation task to abort the - // AsyncCompileJob. - NextStep<AbortCompilation>(); - } + // Removing this job will trigger the destructor, which will cancel all + // compilation. + isolate_->wasm_engine()->RemoveCompileJob(this); } class AsyncStreamingProcessor final : public StreamingProcessor { @@ -2202,12 +2202,20 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() { AsyncCompileJob::~AsyncCompileJob() { background_task_manager_.CancelAndWait(); + if (native_module_) native_module_->compilation_state()->Abort(); + // Tell the streaming decoder that the AsyncCompileJob is not available + // anymore. + // TODO(ahaas): Is this notification really necessary? Check + // https://crbug.com/888170. + if (stream_) stream_->NotifyCompilationEnded(); + CancelPendingForegroundTask(); for (auto d : deferred_handles_) delete d; } +// This function assumes that it is executed in a HandleScope, and that a +// context is set on the isolate. void AsyncCompileJob::FinishCompile() { - RecordStats(native_module_, counters()); - + DCHECK_NOT_NULL(isolate_->context()); // Finish the wasm script now and make it public to the debugger. Handle<Script> script(module_object_->script(), isolate_); isolate_->debug()->OnAfterCompile(script); @@ -2215,12 +2223,16 @@ void AsyncCompileJob::FinishCompile() { // Log the code within the generated module for profiling. native_module_->LogWasmCodes(isolate_); + // We can only update the feature counts once the entire compile is done. + auto compilation_state = native_module_->compilation_state(); + compilation_state->PublishDetectedFeatures( + isolate_, *compilation_state->detected_features()); + // TODO(wasm): compiling wrappers should be made async as well. DoSync<CompileWrappers>(); } void AsyncCompileJob::AsyncCompileFailed(Handle<Object> error_reason) { - if (stream_) stream_->NotifyError(); // {job} keeps the {this} pointer alive. std::shared_ptr<AsyncCompileJob> job = isolate_->wasm_engine()->RemoveCompileJob(this); @@ -2235,16 +2247,11 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) { // task) and schedule the next step(s), if any. class AsyncCompileJob::CompileStep { public: - explicit CompileStep(int num_background_tasks = 0) - : num_background_tasks_(num_background_tasks) {} - virtual ~CompileStep() {} void Run(bool on_foreground) { if (on_foreground) { HandleScope scope(job_->isolate_); - --job_->num_pending_foreground_tasks_; - DCHECK_EQ(0, job_->num_pending_foreground_tasks_); SaveContext saved_context(job_->isolate_); job_->isolate_->set_context(*job_->native_context_); RunInForeground(); @@ -2256,10 +2263,7 @@ class AsyncCompileJob::CompileStep { virtual void RunInForeground() { UNREACHABLE(); } virtual void RunInBackground() { UNREACHABLE(); } - int NumberOfBackgroundTasks() { return num_background_tasks_; } - AsyncCompileJob* job_ = nullptr; - const int num_background_tasks_; }; class AsyncCompileJob::CompileTask : public CancelableTask { @@ -2274,18 +2278,55 @@ class AsyncCompileJob::CompileTask : public CancelableTask { job_(job), on_foreground_(on_foreground) {} - void RunInternal() override { job_->step_->Run(on_foreground_); } + ~CompileTask() { + if (job_ != nullptr && on_foreground_) ResetPendingForegroundTask(); + } + + void RunInternal() final { + if (!job_) return; + if (on_foreground_) ResetPendingForegroundTask(); + job_->step_->Run(on_foreground_); + // After execution, reset {job_} such that we don't try to reset the pending + // foreground task when the task is deleted. + job_ = nullptr; + } + + void Cancel() { + DCHECK_NOT_NULL(job_); + job_ = nullptr; + } private: + // {job_} will be cleared to cancel a pending task. AsyncCompileJob* job_; bool on_foreground_; + + void ResetPendingForegroundTask() const { + DCHECK_EQ(this, job_->pending_foreground_task_); + job_->pending_foreground_task_ = nullptr; + } }; void AsyncCompileJob::StartForegroundTask() { - ++num_pending_foreground_tasks_; - DCHECK_EQ(1, num_pending_foreground_tasks_); + DCHECK_NULL(pending_foreground_task_); + + auto new_task = base::make_unique<CompileTask>(this, true); + pending_foreground_task_ = new_task.get(); + foreground_task_runner_->PostTask(std::move(new_task)); +} + +void AsyncCompileJob::ExecuteForegroundTaskImmediately() { + DCHECK_NULL(pending_foreground_task_); - foreground_task_runner_->PostTask(base::make_unique<CompileTask>(this, true)); + auto new_task = base::make_unique<CompileTask>(this, true); + pending_foreground_task_ = new_task.get(); + new_task->Run(); +} + +void AsyncCompileJob::CancelPendingForegroundTask() { + if (!pending_foreground_task_) return; + pending_foreground_task_->Cancel(); + pending_foreground_task_ = nullptr; } template <typename Step, typename... Args> @@ -2309,10 +2350,7 @@ void AsyncCompileJob::StartBackgroundTask() { template <typename Step, typename... Args> void AsyncCompileJob::DoAsync(Args&&... args) { NextStep<Step>(std::forward<Args>(args)...); - int end = step_->NumberOfBackgroundTasks(); - for (int i = 0; i < end; ++i) { - StartBackgroundTask(); - } + StartBackgroundTask(); } template <typename Step, typename... Args> @@ -2326,8 +2364,6 @@ void AsyncCompileJob::NextStep(Args&&... args) { //========================================================================== class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep { public: - DecodeModule() : CompileStep(1) {} - void RunInBackground() override { ModuleResult result; { @@ -2335,17 +2371,18 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep { DisallowHeapAllocation no_allocation; // Decode the module bytes. TRACE_COMPILE("(1) Decoding module...\n"); - result = AsyncDecodeWasmModule(job_->isolate_, job_->wire_bytes_.start(), - job_->wire_bytes_.end(), false, - kWasmOrigin, job_->async_counters()); + result = + DecodeWasmModule(job_->enabled_features_, job_->wire_bytes_.start(), + job_->wire_bytes_.end(), false, kWasmOrigin, + job_->async_counters().get(), + job_->isolate()->wasm_engine()->allocator()); } if (result.failed()) { // Decoding failure; reject the promise and clean up. job_->DoSync<DecodeFail>(std::move(result)); } else { // Decode passed. - job_->module_ = std::move(result.val); - job_->DoSync<PrepareAndStartCompile>(true); + job_->DoSync<PrepareAndStartCompile>(std::move(result.val), true); } } }; @@ -2373,10 +2410,12 @@ class AsyncCompileJob::DecodeFail : public CompileStep { //========================================================================== class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { public: - explicit PrepareAndStartCompile(bool start_compilation) - : start_compilation_(start_compilation) {} + PrepareAndStartCompile(std::shared_ptr<const WasmModule> module, + bool start_compilation) + : module_(module), start_compilation_(start_compilation) {} private: + std::shared_ptr<const WasmModule> module_; bool start_compilation_; void RunInForeground() override { @@ -2386,12 +2425,18 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { // is done. job_->background_task_manager_.CancelAndWait(); + // Embedder usage count for declared shared memories. + if (module_->has_shared_memory) { + job_->isolate_->CountUsage( + v8::Isolate::UseCounterFeature::kWasmSharedMemory); + } + // Create heap objects for script and module bytes to be stored in the // module object. Asm.js is not compiled asynchronously. Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_); Handle<ByteArray> asm_js_offset_table; - const WasmModule* module = job_->module_.get(); + const WasmModule* module = module_.get(); ModuleEnv env = CreateDefaultModuleEnv(module); // TODO(wasm): Improve efficiency of storing module wire bytes. Only store // relevant sections, not function bodies @@ -2403,7 +2448,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { // breakpoints on a (potentially empty) subset of the instances. // Create the module object. job_->module_object_ = WasmModuleObject::New( - job_->isolate_, job_->module_, env, + job_->isolate_, job_->enabled_features_, module_, env, {std::move(job_->bytes_copy_), job_->wire_bytes_.length()}, script, asm_js_offset_table); job_->native_module_ = job_->module_object_->native_module(); @@ -2433,7 +2478,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { // capture the {job} pointer by copy, as it otherwise is dependent // on the current step we are in. AsyncCompileJob* job = job_; - compilation_state->AddCallback( + compilation_state->SetCallback( [job](CompilationEvent event, ErrorThrower* thrower) { // Callback is called from a foreground thread. switch (event) { @@ -2445,17 +2490,14 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { } return; case CompilationEvent::kFinishedTopTierCompilation: - // It is only safe to remove the AsyncCompileJob if no - // foreground task is currently pending, and no finisher is - // outstanding (streaming compilation). - if (job->num_pending_foreground_tasks_ == 0 && - job->outstanding_finishers_.load() == 0) { - job->isolate_->wasm_engine()->RemoveCompileJob(job); - } else { - // If a foreground task was pending or a finsher was pending, - // we will rely on FinishModule to remove the job. + // If a foreground task or a finisher is pending, we rely on + // FinishModule to remove the job. + if (job->pending_foreground_task_ || + job->outstanding_finishers_.load() > 0) { job->tiering_completed_ = true; + return; } + job->isolate_->wasm_engine()->RemoveCompileJob(job); return; case CompilationEvent::kFailedCompilation: { // Tier-up compilation should not fail if baseline compilation @@ -2473,9 +2515,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { job->DoSync<CompileFailed>(error); return; } - case CompilationEvent::kDestroyed: - // Nothing to do. - return; } UNREACHABLE(); }); @@ -2535,8 +2574,8 @@ class AsyncCompileJob::FinishModule : public CompileStep { TRACE_COMPILE("(6) Finish module...\n"); job_->AsyncCompileSucceeded(job_->module_object_); - size_t num_functions = - job_->module_->functions.size() - job_->module_->num_imported_functions; + size_t num_functions = job_->native_module_->num_functions() - + job_->native_module_->num_imported_functions(); if (job_->native_module_->compilation_state()->compile_mode() == CompileMode::kRegular || num_functions == 0) { @@ -2556,15 +2595,10 @@ class AsyncCompileJob::FinishModule : public CompileStep { } }; -class AsyncCompileJob::AbortCompilation : public CompileStep { - void RunInForeground() override { - TRACE_COMPILE("Abort asynchronous compilation ...\n"); - job_->isolate_->wasm_engine()->RemoveCompileJob(job_); - } -}; - AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job) - : job_(job), compilation_unit_builder_(nullptr) {} + : decoder_(job->enabled_features_), + job_(job), + compilation_unit_builder_(nullptr) {} void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) { // Make sure all background tasks stopped executing before we change the state @@ -2581,7 +2615,7 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) { if (job_->native_module_) { job_->native_module_->compilation_state()->Abort(); - if (job_->num_pending_foreground_tasks_ == 0) { + if (job_->pending_foreground_task_ == nullptr) { job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result)); } else { job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result)); @@ -2600,8 +2634,8 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) { bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes, uint32_t offset) { TRACE_STREAMING("Process module header...\n"); - decoder_.StartDecoding(job_->isolate()); - job_->module_ = decoder_.shared_module(); + decoder_.StartDecoding(job_->async_counters().get(), + job_->isolate()->wasm_engine()->allocator()); decoder_.DecodeModuleHeader(bytes, offset); if (!decoder_.ok()) { FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false)); @@ -2652,14 +2686,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count, FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false)); return false; } - job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(false); + job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>( + decoder_.shared_module(), false); // Execute the PrepareAndStartCompile step immediately and not in a separate - // task. The step expects to be run on a separate foreground thread though, so - // we to increment {num_pending_foreground_tasks_} to look like one. - ++job_->num_pending_foreground_tasks_; - DCHECK_EQ(1, job_->num_pending_foreground_tasks_); - constexpr bool on_foreground = true; - job_->step_->Run(on_foreground); + // task. + job_->ExecuteForegroundTaskImmediately(); job_->native_module_->compilation_state()->SetNumberOfFunctionsToCompile( functions_count); @@ -2709,15 +2740,17 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) { } ModuleResult result = decoder_.FinishDecoding(false); DCHECK(result.ok()); - DCHECK_EQ(job_->module_, result.val); if (job_->DecrementAndCheckFinisherCount()) { if (job_->native_module_ == nullptr) { // We are processing a WebAssembly module without code section. We need to // prepare compilation first before we can finish it. // {PrepareAndStartCompile} will call {FinishCompile} by itself if there // is no code section. - job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(true); + job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(result.val, true); } else { + HandleScope scope(job_->isolate_); + SaveContext saved_context(job_->isolate_); + job_->isolate_->set_context(*job_->native_context_); job_->FinishCompile(); } } @@ -2763,17 +2796,11 @@ CompilationState::CompilationState(internal::Isolate* isolate, v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_); v8::Platform* platform = V8::GetCurrentPlatform(); foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate); - - // Register task manager for clean shutdown in case of an engine shutdown. - wasm_engine_->Register(&background_task_manager_); - wasm_engine_->Register(&foreground_task_manager_); } CompilationState::~CompilationState() { - CancelAndWait(); + background_task_manager_.CancelAndWait(); foreground_task_manager_.CancelAndWait(); - wasm_engine_->Unregister(&foreground_task_manager_); - NotifyOnEvent(CompilationEvent::kDestroyed, nullptr); } void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) { @@ -2786,9 +2813,10 @@ void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) { } } -void CompilationState::AddCallback( +void CompilationState::SetCallback( std::function<void(CompilationEvent, ErrorThrower*)> callback) { - callbacks_.push_back(callback); + DCHECK_NULL(callback_); + callback_ = callback; } void CompilationState::AddCompilationUnits( @@ -2799,8 +2827,7 @@ void CompilationState::AddCompilationUnits( if (compile_mode_ == CompileMode::kTiering) { DCHECK_EQ(baseline_units.size(), tiering_units.size()); - DCHECK_EQ(tiering_units.back()->mode(), - WasmCompilationUnit::CompilationMode::kTurbofan); + DCHECK_EQ(tiering_units.back()->mode(), ExecutionTier::kOptimized); tiering_compilation_units_.insert( tiering_compilation_units_.end(), std::make_move_iterator(tiering_units.begin()), @@ -2860,7 +2887,7 @@ void CompilationState::OnFinishedUnit() { --outstanding_units_; if (outstanding_units_ == 0) { - CancelAndWait(); + background_task_manager_.CancelAndWait(); baseline_compilation_finished_ = true; DCHECK(compile_mode_ == CompileMode::kRegular || @@ -2887,11 +2914,10 @@ void CompilationState::OnFinishedUnit() { } void CompilationState::ScheduleUnitForFinishing( - std::unique_ptr<WasmCompilationUnit> unit, - WasmCompilationUnit::CompilationMode mode) { + std::unique_ptr<WasmCompilationUnit> unit, ExecutionTier mode) { base::LockGuard<base::Mutex> guard(&mutex_); if (compile_mode_ == CompileMode::kTiering && - mode == WasmCompilationUnit::CompilationMode::kTurbofan) { + mode == ExecutionTier::kOptimized) { tiering_finish_units_.push_back(std::move(unit)); } else { baseline_finish_units_.push_back(std::move(unit)); @@ -2904,15 +2930,21 @@ void CompilationState::ScheduleUnitForFinishing( } } -void CompilationState::CancelAndWait() { - background_task_manager_.CancelAndWait(); - wasm_engine_->Unregister(&background_task_manager_); -} - -void CompilationState::OnBackgroundTaskStopped() { +void CompilationState::OnBackgroundTaskStopped(const WasmFeatures& detected) { base::LockGuard<base::Mutex> guard(&mutex_); DCHECK_LE(1, num_background_tasks_); --num_background_tasks_; + UnionFeaturesInto(&detected_features_, detected); +} + +void CompilationState::PublishDetectedFeatures(Isolate* isolate, + const WasmFeatures& detected) { + // Notifying the isolate of the feature counts must take place under + // the mutex, because even if we have finished baseline compilation, + // tiering compilations may still occur in the background. + base::LockGuard<base::Mutex> guard(&mutex_); + UnionFeaturesInto(&detected_features_, detected); + UpdateFeatureUseCounts(isolate, detected_features_); } void CompilationState::RestartBackgroundTasks(size_t max) { @@ -2962,14 +2994,12 @@ void CompilationState::Abort() { base::LockGuard<base::Mutex> guard(&mutex_); failed_ = true; } - CancelAndWait(); + background_task_manager_.CancelAndWait(); } void CompilationState::NotifyOnEvent(CompilationEvent event, ErrorThrower* thrower) { - for (auto& callback_function : callbacks_) { - callback_function(event, thrower); - } + if (callback_) callback_(event, thrower); } void CompileJsToWasmWrappers(Isolate* isolate, @@ -2978,7 +3008,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, int wrapper_index = 0; Handle<FixedArray> export_wrappers(module_object->export_wrappers(), isolate); NativeModule* native_module = module_object->native_module(); - wasm::UseTrapHandler use_trap_handler = + UseTrapHandler use_trap_handler = native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler; const WasmModule* module = native_module->module(); for (auto exp : module->export_table) { diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h index eb9f271543c..57bbd883e21 100644 --- a/chromium/v8/src/wasm/module-compiler.h +++ b/chromium/v8/src/wasm/module-compiler.h @@ -11,6 +11,7 @@ #include "src/cancelable-task.h" #include "src/globals.h" +#include "src/wasm/wasm-features.h" #include "src/wasm/wasm-module.h" namespace v8 { @@ -48,7 +49,7 @@ std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState( ModuleEnv* GetModuleEnv(CompilationState* compilation_state); MaybeHandle<WasmModuleObject> CompileToModuleObject( - Isolate* isolate, ErrorThrower* thrower, + Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower, std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes); @@ -77,9 +78,10 @@ Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index); // TODO(wasm): factor out common parts of this with the synchronous pipeline. class AsyncCompileJob { public: - explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, - size_t length, Handle<Context> context, - std::unique_ptr<CompilationResultResolver> resolver); + AsyncCompileJob(Isolate* isolate, const WasmFeatures& enabled_features, + std::unique_ptr<byte[]> bytes_copy, size_t length, + Handle<Context> context, + std::shared_ptr<CompilationResultResolver> resolver); ~AsyncCompileJob(); void Start(); @@ -87,6 +89,7 @@ class AsyncCompileJob { std::shared_ptr<StreamingDecoder> CreateStreamingDecoder(); void Abort(); + void CancelPendingForegroundTask(); Isolate* isolate() const { return isolate_; } @@ -95,14 +98,12 @@ class AsyncCompileJob { class CompileStep; // States of the AsyncCompileJob. - class DecodeModule; - class DecodeFail; - class PrepareAndStartCompile; - class CompileFailed; - class CompileWrappers; - class FinishModule; - class AbortCompilation; - class UpdateToTopTierCompiledCode; + class DecodeModule; // Step 1 (async) + class DecodeFail; // Step 1b (sync) + class PrepareAndStartCompile; // Step 2 (sync) + class CompileFailed; // Step 4b (sync) + class CompileWrappers; // Step 5 (sync) + class FinishModule; // Step 6 (sync) const std::shared_ptr<Counters>& async_counters() const { return async_counters_; @@ -116,6 +117,7 @@ class AsyncCompileJob { void AsyncCompileSucceeded(Handle<WasmModuleObject> result); void StartForegroundTask(); + void ExecuteForegroundTaskImmediately(); void StartBackgroundTask(); @@ -137,16 +139,16 @@ class AsyncCompileJob { friend class AsyncStreamingProcessor; Isolate* isolate_; + const WasmFeatures enabled_features_; const std::shared_ptr<Counters> async_counters_; - // Copy of the module wire bytes, moved into the {native_module_} on it's + // Copy of the module wire bytes, moved into the {native_module_} on its // creation. std::unique_ptr<byte[]> bytes_copy_; // Reference to the wire bytes (hold in {bytes_copy_} or as part of // {native_module_}). ModuleWireBytes wire_bytes_; Handle<Context> native_context_; - std::unique_ptr<CompilationResultResolver> resolver_; - std::shared_ptr<const WasmModule> module_; + std::shared_ptr<CompilationResultResolver> resolver_; std::vector<DeferredHandles*> deferred_handles_; Handle<WasmModuleObject> module_object_; @@ -169,8 +171,8 @@ class AsyncCompileJob { return outstanding_finishers_.fetch_sub(1) == 1; } - // Counts the number of pending foreground tasks. - int32_t num_pending_foreground_tasks_ = 0; + // A reference to a pending foreground task, or {nullptr} if none is pending. + CompileTask* pending_foreground_task_ = nullptr; // The AsyncCompileJob owns the StreamingDecoder because the StreamingDecoder // contains data which is needed by the AsyncCompileJob for streaming diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc index bae8e4baf83..db9cf450495 100644 --- a/chromium/v8/src/wasm/module-decoder.cc +++ b/chromium/v8/src/wasm/module-decoder.cc @@ -15,6 +15,7 @@ #include "src/v8.h" #include "src/wasm/decoder.h" #include "src/wasm/function-body-decoder-impl.h" +#include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-limits.h" namespace v8 { @@ -82,8 +83,7 @@ const char* SectionName(SectionCode code) { case kNameSectionCode: return kNameString; case kExceptionSectionCode: - if (FLAG_experimental_wasm_eh) return kExceptionString; - return kUnknownString; + return kExceptionString; default: return kUnknownString; } @@ -246,13 +246,15 @@ class WasmSectionIterator { // The main logic for decoding the bytes of a module. class ModuleDecoderImpl : public Decoder { public: - explicit ModuleDecoderImpl(ModuleOrigin origin) + explicit ModuleDecoderImpl(const WasmFeatures& enabled, ModuleOrigin origin) : Decoder(nullptr, nullptr), + enabled_features_(enabled), origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {} - ModuleDecoderImpl(const byte* module_start, const byte* module_end, - ModuleOrigin origin) + ModuleDecoderImpl(const WasmFeatures& enabled, const byte* module_start, + const byte* module_end, ModuleOrigin origin) : Decoder(module_start, module_end), + enabled_features_(enabled), origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) { if (end_ < start_) { error(start_, "end is less than start"); @@ -288,11 +290,11 @@ class ModuleDecoderImpl : public Decoder { } } - void StartDecoding(Isolate* isolate) { + void StartDecoding(Counters* counters, AccountingAllocator* allocator) { CHECK_NULL(module_); - SetCounters(isolate->counters()); - module_.reset(new WasmModule(base::make_unique<Zone>( - isolate->wasm_engine()->allocator(), "signatures"))); + SetCounters(counters); + module_.reset( + new WasmModule(base::make_unique<Zone>(allocator, "signatures"))); module_->initial_pages = 0; module_->maximum_pages = 0; module_->mem_export = false; @@ -400,7 +402,7 @@ class ModuleDecoderImpl : public Decoder { DecodeNameSection(); break; case kExceptionSectionCode: - if (FLAG_experimental_wasm_eh) { + if (enabled_features_.eh) { DecodeExceptionSection(); } else { errorf(pc(), "unexpected section: %s", SectionName(section_code)); @@ -450,8 +452,8 @@ class ModuleDecoderImpl : public Decoder { }); WasmImport* import = &module_->import_table.back(); const byte* pos = pc_; - import->module_name = consume_string(true, "module name"); - import->field_name = consume_string(true, "field name"); + import->module_name = consume_string(*this, true, "module name"); + import->field_name = consume_string(*this, true, "field name"); import->kind = static_cast<ImportExportKindCode>(consume_u8("import kind")); switch (import->kind) { @@ -478,7 +480,7 @@ class ModuleDecoderImpl : public Decoder { WasmTable* table = &module_->tables.back(); table->imported = true; ValueType type = consume_reference_type(); - if (!FLAG_experimental_wasm_anyref) { + if (!enabled_features_.anyref) { if (type != kWasmAnyFunc) { error(pc_ - 1, "invalid table type"); break; @@ -511,7 +513,7 @@ class ModuleDecoderImpl : public Decoder { global->type = consume_value_type(); global->mutability = consume_mutability(); if (global->mutability) { - if (FLAG_experimental_wasm_mut_global) { + if (enabled_features_.mut_global) { module_->num_imported_mutable_globals++; } else { error("mutable globals cannot be imported"); @@ -555,7 +557,7 @@ class ModuleDecoderImpl : public Decoder { void DecodeTableSection() { // TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the // implementation of AnyRef landed. - uint32_t max_count = FLAG_experimental_wasm_anyref ? 10 : kV8MaxWasmTables; + uint32_t max_count = enabled_features_.anyref ? 10 : kV8MaxWasmTables; uint32_t table_count = consume_count("table count", max_count); for (uint32_t i = 0; ok() && i < table_count; i++) { @@ -614,7 +616,7 @@ class ModuleDecoderImpl : public Decoder { }); WasmExport* exp = &module_->export_table.back(); - exp->name = consume_string(true, "field name"); + exp->name = consume_string(*this, true, "field name"); const byte* pos = pc(); exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind")); @@ -646,7 +648,7 @@ class ModuleDecoderImpl : public Decoder { WasmGlobal* global = nullptr; exp->index = consume_global_index(module_.get(), &global); if (global) { - if (!FLAG_experimental_wasm_mut_global && global->mutability) { + if (!enabled_features_.mut_global && global->mutability) { error("mutable globals cannot be exported"); } global->exported = true; @@ -709,7 +711,7 @@ class ModuleDecoderImpl : public Decoder { for (uint32_t i = 0; ok() && i < element_count; ++i) { const byte* pos = pc(); uint32_t table_index = consume_u32v("table index"); - if (!FLAG_experimental_wasm_anyref && table_index != 0) { + if (!enabled_features_.anyref && table_index != 0) { errorf(pos, "illegal table index %u != 0", table_index); } if (table_index >= module_->tables.size()) { @@ -815,7 +817,7 @@ class ModuleDecoderImpl : public Decoder { // Decode module name, ignore the rest. // Function and local names will be decoded when needed. if (name_type == NameSectionKindCode::kModule) { - WireBytesRef name = wasm::consume_string(inner, false, "module name"); + WireBytesRef name = consume_string(inner, false, "module name"); if (inner.ok() && validate_utf8(&inner, name)) module_->name = name; } else { inner.consume_bytes(name_payload_len, "name subsection payload"); @@ -849,8 +851,9 @@ class ModuleDecoderImpl : public Decoder { } // Decodes an entire module. - ModuleResult DecodeModule(Isolate* isolate, bool verify_functions = true) { - StartDecoding(isolate); + ModuleResult DecodeModule(Counters* counters, AccountingAllocator* allocator, + bool verify_functions = true) { + StartDecoding(counters, allocator); uint32_t offset = 0; Vector<const byte> orig_bytes(start(), end() - start()); DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset); @@ -928,6 +931,7 @@ class ModuleDecoderImpl : public Decoder { } private: + const WasmFeatures enabled_features_; std::shared_ptr<WasmModule> module_; Counters* counters_ = nullptr; // The type section is the first section in a module. @@ -946,7 +950,7 @@ class ModuleDecoderImpl : public Decoder { } bool AddTable(WasmModule* module) { - if (FLAG_experimental_wasm_anyref) return true; + if (enabled_features_.anyref) return true; if (module->tables.size() > 0) { error("At most one table is supported"); return false; @@ -1019,7 +1023,7 @@ class ModuleDecoderImpl : public Decoder { for (WasmGlobal& global : module->globals) { byte size = ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type)); if (global.mutability && global.imported) { - DCHECK(FLAG_experimental_wasm_mut_global); + DCHECK(enabled_features_.mut_global); global.index = num_imported_mutable_globals++; } else { offset = (offset + size - 1) & ~(size - 1); // align @@ -1044,8 +1048,18 @@ class ModuleDecoderImpl : public Decoder { function->sig, function->code.offset(), start_ + GetBufferRelativeOffset(function->code.offset()), start_ + GetBufferRelativeOffset(function->code.end_offset())}; - DecodeResult result = VerifyWasmCodeWithStats(allocator, module, body, - origin_, GetCounters()); + + DecodeResult result; + { + auto time_counter = SELECT_WASM_COUNTER(GetCounters(), origin_, + wasm_decode, function_time); + + TimedHistogramScope wasm_decode_function_time_scope(time_counter); + WasmFeatures unused_detected_features; + result = VerifyWasmCode(allocator, enabled_features_, module, + &unused_detected_features, body); + } + if (result.failed()) { // Wrap the error message from the function decoder. std::ostringstream wrapped; @@ -1059,10 +1073,6 @@ class ModuleDecoderImpl : public Decoder { } } - WireBytesRef consume_string(bool validate_utf8, const char* name) { - return wasm::consume_string(*this, validate_utf8, name); - } - uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) { const byte* pos = pc_; uint32_t sig_index = consume_u32v("signature index"); @@ -1125,7 +1135,7 @@ class ModuleDecoderImpl : public Decoder { uint8_t flags = consume_u8("resizable limits flags"); const byte* pos = pc(); *has_shared_memory = false; - if (FLAG_experimental_wasm_threads) { + if (enabled_features_.threads) { if (flags & 0xFC) { errorf(pos - 1, "invalid memory limits flags"); } else if (flags == 3) { @@ -1243,7 +1253,7 @@ class ModuleDecoderImpl : public Decoder { break; } case kExprRefNull: { - if (FLAG_experimental_wasm_anyref) { + if (enabled_features_.anyref) { expr.kind = WasmInitExpr::kAnyRefConst; len = 0; break; @@ -1292,13 +1302,13 @@ class ModuleDecoderImpl : public Decoder { if (origin_ == kWasmOrigin) { switch (t) { case kLocalS128: - if (FLAG_experimental_wasm_simd) return kWasmS128; + if (enabled_features_.simd) return kWasmS128; break; case kLocalAnyFunc: - if (FLAG_experimental_wasm_anyref) return kWasmAnyFunc; + if (enabled_features_.anyref) return kWasmAnyFunc; break; case kLocalAnyRef: - if (FLAG_experimental_wasm_anyref) return kWasmAnyRef; + if (enabled_features_.anyref) return kWasmAnyRef; break; default: break; @@ -1317,7 +1327,7 @@ class ModuleDecoderImpl : public Decoder { case kLocalAnyFunc: return kWasmAnyFunc; case kLocalAnyRef: - if (!FLAG_experimental_wasm_anyref) { + if (!enabled_features_.anyref) { error(pc_ - 1, "Invalid type. Set --experimental-wasm-anyref to use 'AnyRef'"); } @@ -1356,7 +1366,7 @@ class ModuleDecoderImpl : public Decoder { uint32_t return_count = 0; if (has_return_values) { // parse return types - const size_t max_return_count = FLAG_experimental_wasm_mv + const size_t max_return_count = enabled_features_.mv ? kV8MaxWasmFunctionMultiReturns : kV8MaxWasmFunctionReturns; return_count = consume_count("return count", max_return_count); @@ -1379,9 +1389,11 @@ class ModuleDecoderImpl : public Decoder { } }; -ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start, - const byte* module_end, bool verify_functions, - ModuleOrigin origin, Counters* counters) { +ModuleResult DecodeWasmModule(const WasmFeatures& enabled, + const byte* module_start, const byte* module_end, + bool verify_functions, ModuleOrigin origin, + Counters* counters, + AccountingAllocator* allocator) { auto counter = SELECT_WASM_COUNTER(counters, origin, wasm_decode, module_time); TimedHistogramScope wasm_decode_module_time_scope(counter); @@ -1395,8 +1407,9 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start, size_counter->AddSample(static_cast<int>(size)); // Signatures are stored in zone memory, which have the same lifetime // as the {module}. - ModuleDecoderImpl decoder(module_start, module_end, origin); - ModuleResult result = decoder.DecodeModule(isolate, verify_functions); + ModuleDecoderImpl decoder(enabled, module_start, module_end, origin); + ModuleResult result = + decoder.DecodeModule(counters, allocator, verify_functions); // TODO(bradnelson): Improve histogram handling of size_t. // TODO(titzer): this isn't accurate, since it doesn't count the data // allocated on the C++ heap. @@ -1410,17 +1423,21 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start, return result; } -ModuleDecoder::ModuleDecoder() = default; +ModuleDecoder::ModuleDecoder(const WasmFeatures& enabled) + : enabled_features_(enabled) {} + ModuleDecoder::~ModuleDecoder() = default; const std::shared_ptr<WasmModule>& ModuleDecoder::shared_module() const { return impl_->shared_module(); } -void ModuleDecoder::StartDecoding(Isolate* isolate, ModuleOrigin origin) { +void ModuleDecoder::StartDecoding(Counters* counters, + AccountingAllocator* allocator, + ModuleOrigin origin) { DCHECK_NULL(impl_); - impl_.reset(new ModuleDecoderImpl(origin)); - impl_->StartDecoding(isolate); + impl_.reset(new ModuleDecoderImpl(enabled_features_, origin)); + impl_->StartDecoding(counters, allocator); } void ModuleDecoder::DecodeModuleHeader(Vector<const uint8_t> bytes, @@ -1450,7 +1467,7 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) { SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder, const byte* end) { - WireBytesRef string = wasm::consume_string(decoder, true, "section name"); + WireBytesRef string = consume_string(decoder, true, "section name"); if (decoder.failed() || decoder.pc() > end) { return kUnknownSectionCode; } @@ -1471,75 +1488,39 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder, bool ModuleDecoder::ok() { return impl_->ok(); } -ModuleResult SyncDecodeWasmModule(Isolate* isolate, const byte* module_start, - const byte* module_end, bool verify_functions, - ModuleOrigin origin) { - return DecodeWasmModule(isolate, module_start, module_end, verify_functions, - origin, isolate->counters()); -} - -ModuleResult AsyncDecodeWasmModule( - Isolate* isolate, const byte* module_start, const byte* module_end, - bool verify_functions, ModuleOrigin origin, - const std::shared_ptr<Counters> async_counters) { - return DecodeWasmModule(isolate, module_start, module_end, verify_functions, - origin, async_counters.get()); -} - -FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start, +FunctionSig* DecodeWasmSignatureForTesting(const WasmFeatures& enabled, + Zone* zone, const byte* start, const byte* end) { - ModuleDecoderImpl decoder(start, end, kWasmOrigin); + ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin); return decoder.DecodeFunctionSignature(zone, start); } -WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) { +WasmInitExpr DecodeWasmInitExprForTesting(const WasmFeatures& enabled, + const byte* start, const byte* end) { AccountingAllocator allocator; - ModuleDecoderImpl decoder(start, end, kWasmOrigin); + ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin); return decoder.DecodeInitExpr(start); } -namespace { - -FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, - const ModuleWireBytes& wire_bytes, - const WasmModule* module, - const byte* function_start, - const byte* function_end, - Counters* counters) { +FunctionResult DecodeWasmFunctionForTesting( + const WasmFeatures& enabled, Zone* zone, const ModuleWireBytes& wire_bytes, + const WasmModule* module, const byte* function_start, + const byte* function_end, Counters* counters) { size_t size = function_end - function_start; if (function_start > function_end) return FunctionResult::Error("start > end"); - auto size_histogram = - SELECT_WASM_COUNTER(counters, module->origin, wasm, function_size_bytes); + auto size_histogram = SELECT_WASM_COUNTER(counters, module->origin, wasm, + function_size_bytes); // TODO(bradnelson): Improve histogram handling of ptrdiff_t. size_histogram->AddSample(static_cast<int>(size)); if (size > kV8MaxWasmFunctionSize) return FunctionResult::Error("size > maximum function size: %zu", size); - ModuleDecoderImpl decoder(function_start, function_end, kWasmOrigin); + ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin); decoder.SetCounters(counters); return decoder.DecodeSingleFunction(zone, wire_bytes, module, base::make_unique<WasmFunction>()); } -} // namespace - -FunctionResult SyncDecodeWasmFunction(Isolate* isolate, Zone* zone, - const ModuleWireBytes& wire_bytes, - const WasmModule* module, - const byte* function_start, - const byte* function_end) { - return DecodeWasmFunction(isolate, zone, wire_bytes, module, function_start, - function_end, isolate->counters()); -} - -FunctionResult AsyncDecodeWasmFunction( - Isolate* isolate, Zone* zone, const ModuleWireBytes& wire_bytes, - const WasmModule* module, const byte* function_start, - const byte* function_end, std::shared_ptr<Counters> async_counters) { - return DecodeWasmFunction(isolate, zone, wire_bytes, module, function_start, - function_end, async_counters.get()); -} - AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start, const byte* tables_end) { AsmJsOffsets table; @@ -1670,7 +1651,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end, for (; decoder.ok() && functions_count > 0; --functions_count) { uint32_t function_index = decoder.consume_u32v("function index"); - WireBytesRef name = wasm::consume_string(decoder, false, "function name"); + WireBytesRef name = consume_string(decoder, false, "function name"); // Be lenient with errors in the name section: Ignore non-UTF8 names. You // can even assign to the same function multiple times (last valid one @@ -1713,7 +1694,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end, uint32_t num_names = decoder.consume_u32v("namings count"); for (uint32_t k = 0; k < num_names; ++k) { uint32_t local_index = decoder.consume_u32v("local index"); - WireBytesRef name = wasm::consume_string(decoder, true, "local name"); + WireBytesRef name = consume_string(decoder, true, "local name"); if (!decoder.ok()) break; if (local_index > kMaxInt) continue; func_names.max_local_index = diff --git a/chromium/v8/src/wasm/module-decoder.h b/chromium/v8/src/wasm/module-decoder.h index dc6d4c4ae05..f190a12844b 100644 --- a/chromium/v8/src/wasm/module-decoder.h +++ b/chromium/v8/src/wasm/module-decoder.h @@ -8,11 +8,15 @@ #include "src/globals.h" #include "src/wasm/function-body-decoder.h" #include "src/wasm/wasm-constants.h" +#include "src/wasm/wasm-features.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-result.h" namespace v8 { namespace internal { + +class Counters; + namespace wasm { struct ModuleEnv; @@ -55,36 +59,26 @@ struct LocalNames { }; // Decodes the bytes of a wasm module between {module_start} and {module_end}. -V8_EXPORT_PRIVATE ModuleResult SyncDecodeWasmModule(Isolate* isolate, - const byte* module_start, - const byte* module_end, - bool verify_functions, - ModuleOrigin origin); - -V8_EXPORT_PRIVATE ModuleResult AsyncDecodeWasmModule( - Isolate* isolate, const byte* module_start, const byte* module_end, - bool verify_functions, ModuleOrigin origin, - const std::shared_ptr<Counters> async_counters); +V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule( + const WasmFeatures& enabled, const byte* module_start, + const byte* module_end, bool verify_functions, ModuleOrigin origin, + Counters* counters, AccountingAllocator* allocator); // Exposed for testing. Decodes a single function signature, allocating it // in the given zone. Returns {nullptr} upon failure. -V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, - const byte* start, - const byte* end); +V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting( + const WasmFeatures& enabled, Zone* zone, const byte* start, + const byte* end); // Decodes the bytes of a wasm function between // {function_start} and {function_end}. -V8_EXPORT_PRIVATE FunctionResult SyncDecodeWasmFunction( - Isolate* isolate, Zone* zone, const ModuleWireBytes& wire_bytes, +V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunctionForTesting( + const WasmFeatures& enabled, Zone* zone, const ModuleWireBytes& wire_bytes, const WasmModule* module, const byte* function_start, - const byte* function_end); - -V8_EXPORT_PRIVATE FunctionResult AsyncDecodeWasmFunction( - Isolate* isolate, Zone* zone, ModuleEnv* env, const byte* function_start, - const byte* function_end, const std::shared_ptr<Counters> async_counters); + const byte* function_end, Counters* counters); -V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, - const byte* end); +V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting( + const WasmFeatures& enabled, const byte* start, const byte* end); struct CustomSectionOffset { WireBytesRef section; @@ -120,10 +114,10 @@ class ModuleDecoderImpl; class ModuleDecoder { public: - ModuleDecoder(); + explicit ModuleDecoder(const WasmFeatures& enabled); ~ModuleDecoder(); - void StartDecoding(Isolate* isolate, + void StartDecoding(Counters* counters, AccountingAllocator* allocator, ModuleOrigin origin = ModuleOrigin::kWasmOrigin); void DecodeModuleHeader(Vector<const uint8_t> bytes, uint32_t offset); @@ -154,6 +148,7 @@ class ModuleDecoder { static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end); private: + const WasmFeatures enabled_features_; std::unique_ptr<ModuleDecoderImpl> impl_; }; diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc index 07b425aad05..15ced2316ba 100644 --- a/chromium/v8/src/wasm/streaming-decoder.cc +++ b/chromium/v8/src/wasm/streaming-decoder.cc @@ -83,7 +83,10 @@ void StreamingDecoder::Finish() { void StreamingDecoder::Abort() { TRACE_STREAMING("Abort\n"); - if (ok()) processor_->OnAbort(); + if (ok()) { + ok_ = false; + processor_->OnAbort(); + } } // An abstract class to share code among the states which decode VarInts. This diff --git a/chromium/v8/src/wasm/streaming-decoder.h b/chromium/v8/src/wasm/streaming-decoder.h index 7b986bc28b0..e14c32daf39 100644 --- a/chromium/v8/src/wasm/streaming-decoder.h +++ b/chromium/v8/src/wasm/streaming-decoder.h @@ -65,8 +65,13 @@ class V8_EXPORT_PRIVATE StreamingDecoder { void Abort(); - // Notify the StreamingDecoder that there has been an compilation error. - void NotifyError() { ok_ = false; } + // Notify the StreamingDecoder that compilation ended and the + // StreamingProcessor should not be called anymore. + void NotifyCompilationEnded() { + // We set {ok_} to false to turn all future calls to the StreamingDecoder + // into no-ops. + ok_ = false; + } private: // TODO(ahaas): Put the whole private state of the StreamingDecoder into the diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc index d6771601a20..c2c425a44ef 100644 --- a/chromium/v8/src/wasm/wasm-code-manager.cc +++ b/chromium/v8/src/wasm/wasm-code-manager.cc @@ -117,7 +117,7 @@ void WasmCode::set_trap_handler_index(size_t value) { void WasmCode::RegisterTrapHandlerData() { DCHECK(!HasTrapHandlerIndex()); - if (kind() != wasm::WasmCode::kFunction) return; + if (kind() != WasmCode::kFunction) return; Address base = instruction_start(); @@ -199,7 +199,6 @@ void WasmCode::Validate() const { } case RelocInfo::JS_TO_WASM_CALL: case RelocInfo::EXTERNAL_REFERENCE: - case RelocInfo::OFF_HEAP_TARGET: case RelocInfo::COMMENT: case RelocInfo::CONST_POOL: case RelocInfo::VENEER_POOL: @@ -236,6 +235,9 @@ void WasmCode::Disassemble(const char* name, std::ostream& os, if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) { instruction_size = safepoint_table_offset_; } + if (handler_table_offset_ && handler_table_offset_ < instruction_size) { + instruction_size = handler_table_offset_; + } DCHECK_LT(0, instruction_size); os << "Instructions (size = " << instruction_size << ")\n"; Disassembler::Decode(nullptr, &os, instructions().start(), @@ -243,6 +245,23 @@ void WasmCode::Disassemble(const char* name, std::ostream& os, CodeReference(this), current_pc); os << "\n"; + if (handler_table_offset_ > 0) { + HandlerTable table(instruction_start(), handler_table_offset_); + os << "Exception Handler Table (size = " << table.NumberOfReturnEntries() + << "):\n"; + table.HandlerTableReturnPrint(os); + os << "\n"; + } + + if (!protected_instructions_.is_empty()) { + os << "Protected instructions:\n pc offset land pad\n"; + for (auto& data : protected_instructions()) { + os << std::setw(10) << std::hex << data.instr_offset << std::setw(10) + << std::hex << data.landing_offset << "\n"; + } + os << "\n"; + } + if (!source_positions().is_empty()) { os << "Source positions:\n pc offset position\n"; for (SourcePositionTableIterator it(source_positions()); !it.done(); @@ -289,12 +308,13 @@ WasmCode::~WasmCode() { } } -NativeModule::NativeModule(Isolate* isolate, bool can_request_more, - VirtualMemory* code_space, +NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled, + bool can_request_more, VirtualMemory* code_space, WasmCodeManager* code_manager, std::shared_ptr<const WasmModule> module, const ModuleEnv& env) - : module_(std::move(module)), + : enabled_features_(enabled), + module_(std::move(module)), compilation_state_(NewCompilationState(isolate, env)), free_code_space_({code_space->address(), code_space->end()}), wasm_code_manager_(code_manager), @@ -329,11 +349,11 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) { } void NativeModule::LogWasmCodes(Isolate* isolate) { - if (!wasm::WasmCode::ShouldBeLogged(isolate)) return; + if (!WasmCode::ShouldBeLogged(isolate)) return; // TODO(titzer): we skip the logging of the import wrappers // here, but they should be included somehow. - for (wasm::WasmCode* code : code_table()) { + for (WasmCode* code : code_table()) { if (code != nullptr) code->LogCode(isolate); } } @@ -398,7 +418,9 @@ WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) { WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry); ret->index_ = Just(index); + base::LockGuard<base::Mutex> lock(&allocation_mutex_); PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache); + set_code(index, ret); return ret; } @@ -408,16 +430,11 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) { WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub); // Fill the jump table with jumps to the lazy compile stub. Address lazy_compile_target = lazy_builtin->instruction_start(); - JumpTableAssembler jtasm( - jump_table_->instruction_start(), - static_cast<int>(jump_table_->instructions().size()) + 256); for (uint32_t i = 0; i < num_wasm_functions; ++i) { - // Check that the offset in the jump table increases as expected. - DCHECK_EQ(i * JumpTableAssembler::kJumpTableSlotSize, jtasm.pc_offset()); - jtasm.EmitLazyCompileJumpSlot(i + module_->num_imported_functions, - lazy_compile_target); - jtasm.NopBytes((i + 1) * JumpTableAssembler::kJumpTableSlotSize - - jtasm.pc_offset()); + JumpTableAssembler::EmitLazyCompileJumpSlot( + jump_table_->instruction_start(), i, + i + module_->num_imported_functions, lazy_compile_target, + WasmCode::kNoFlushICache); } Assembler::FlushICache(jump_table_->instructions().start(), jump_table_->instructions().size()); @@ -437,9 +454,13 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) { WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind) { - OwnedVector<byte> reloc_info = - OwnedVector<byte>::New(code->relocation_size()); - memcpy(reloc_info.start(), code->relocation_start(), code->relocation_size()); + // For off-heap builtins, we create a copy of the off-heap instruction stream + // instead of the on-heap code object containing the trampoline. Ensure that + // we do not apply the on-heap reloc info to the off-heap instructions. + const size_t relocation_size = + code->is_off_heap_trampoline() ? 0 : code->relocation_size(); + OwnedVector<byte> reloc_info = OwnedVector<byte>::New(relocation_size); + memcpy(reloc_info.start(), code->relocation_start(), relocation_size); Handle<ByteArray> source_pos_table(code->SourcePositionTable(), code->GetIsolate()); OwnedVector<byte> source_pos = @@ -567,9 +588,13 @@ WasmCode* NativeModule::AddDeserializedCode( } void NativeModule::PublishCode(WasmCode* code) { - // TODO(clemensh): Remove the need for locking here. Probably requires - // word-aligning the jump table slots. base::LockGuard<base::Mutex> lock(&allocation_mutex_); + // Skip publishing code if there is an active redirection to the interpreter + // for the given function index, in order to preserve the redirection. + if (has_code(code->index()) && + this->code(code->index())->kind() == WasmCode::kInterpreterEntry) { + return; + } if (!code->protected_instructions_.is_empty()) { code->RegisterTrapHandlerData(); } @@ -579,11 +604,19 @@ void NativeModule::PublishCode(WasmCode* code) { WasmCode::kFlushICache); } +std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { + base::LockGuard<base::Mutex> lock(&allocation_mutex_); + std::vector<WasmCode*> result; + result.reserve(code_table().size()); + for (WasmCode* code : code_table()) result.push_back(code); + return result; +} + WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) { // Only call this if we really need a jump table. DCHECK_LT(0, num_wasm_functions); OwnedVector<byte> instructions = OwnedVector<byte>::New( - num_wasm_functions * JumpTableAssembler::kJumpTableSlotSize); + JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions)); memset(instructions.start(), 0, instructions.size()); return AddOwnedCode(Nothing<uint32_t>(), // index instructions.as_vector(), // instructions @@ -602,9 +635,8 @@ void NativeModule::PatchJumpTable(uint32_t func_index, Address target, WasmCode::FlushICache flush_icache) { DCHECK_LE(module_->num_imported_functions, func_index); uint32_t slot_idx = func_index - module_->num_imported_functions; - Address jump_table_slot = jump_table_->instruction_start() + - slot_idx * JumpTableAssembler::kJumpTableSlotSize; - JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, target, flush_icache); + JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(), + slot_idx, target, flush_icache); } Address NativeModule::AllocateForCode(size_t size) { @@ -622,6 +654,8 @@ Address NativeModule::AllocateForCode(size_t size) { wasm_code_manager_->TryAllocate(size, &new_mem, reinterpret_cast<void*>(hint)); if (!new_mem.IsReserved()) return kNullAddress; + base::LockGuard<base::Mutex> lock( + &wasm_code_manager_->native_modules_mutex_); wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this); free_code_space_.Merge({new_mem.address(), new_mem.end()}); @@ -655,7 +689,7 @@ Address NativeModule::AllocateForCode(size_t size) { if (!wasm_code_manager_->Commit(start, commit_size)) { return kNullAddress; } - committed_code_space_ += commit_size; + committed_code_space_.fetch_add(commit_size); commit_end = start; } #else @@ -664,7 +698,7 @@ Address NativeModule::AllocateForCode(size_t size) { if (!wasm_code_manager_->Commit(commit_start, commit_size)) { return kNullAddress; } - committed_code_space_ += commit_size; + committed_code_space_.fetch_add(commit_size); #endif } DCHECK(IsAligned(mem.start, kCodeAlignment)); @@ -693,18 +727,17 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const { // Return the jump table slot for that function index. DCHECK_NOT_NULL(jump_table_); uint32_t slot_idx = func_index - module_->num_imported_functions; - DCHECK_LT(slot_idx, jump_table_->instructions().size() / - JumpTableAssembler::kJumpTableSlotSize); - return jump_table_->instruction_start() + - slot_idx * JumpTableAssembler::kJumpTableSlotSize; + uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx); + DCHECK_LT(slot_offset, jump_table_->instructions().size()); + return jump_table_->instruction_start() + slot_offset; } uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot( Address slot_address) const { DCHECK(is_jump_table_slot(slot_address)); - uint32_t offset = + uint32_t slot_offset = static_cast<uint32_t>(slot_address - jump_table_->instruction_start()); - uint32_t slot_idx = offset / JumpTableAssembler::kJumpTableSlotSize; + uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset); DCHECK_LT(slot_idx, module_->num_declared_functions); return module_->num_imported_functions + slot_idx; } @@ -789,6 +822,31 @@ void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) { reinterpret_cast<void*>(ret->end()), ret->size()); } +void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const { + base::LockGuard<base::Mutex> lock(&native_modules_mutex_); + for (NativeModule* native_module : native_modules_) { + int code_size = + static_cast<int>(native_module->committed_code_space_.load() / MB); + isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size); + } +} + +namespace { + +void ModuleSamplingCallback(v8::Isolate* v8_isolate, v8::GCType type, + v8::GCCallbackFlags flags, void* data) { + Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate); + isolate->wasm_engine()->code_manager()->SampleModuleSizes(isolate); +} + +} // namespace + +// static +void WasmCodeManager::InstallSamplingGCCallback(Isolate* isolate) { + isolate->heap()->AddGCEpilogueCallback(ModuleSamplingCallback, + v8::kGCTypeMarkSweepCompact, nullptr); +} + // static size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) { constexpr size_t kCodeSizeMultiplier = 4; @@ -802,8 +860,7 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) { (sizeof(WasmCode*) * num_wasm_functions /* code table size */) + (sizeof(WasmCode) * num_wasm_functions /* code object size */) + (kImportSize * module->num_imported_functions /* import size */) + - (JumpTableAssembler::kJumpTableSlotSize * - num_wasm_functions /* jump table size */); + (JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions)); for (auto& function : module->functions) { estimate += kCodeSizeMultiplier * function.code.length(); @@ -812,19 +869,22 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) { return estimate; } -std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule( - Isolate* isolate, size_t memory_estimate, bool can_request_more, - std::shared_ptr<const WasmModule> module, const ModuleEnv& env) { +bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() { + base::LockGuard<base::Mutex> lock(&native_modules_mutex_); // TODO(titzer): we force a critical memory pressure notification // when the code space is almost exhausted, but only upon the next module // creation. This is only for one isolate, and it should really do this for // all isolates, at the point of commit. constexpr size_t kCriticalThreshold = 32 * 1024 * 1024; - bool force_critical_notification = - (active_ > 1) && - (remaining_uncommitted_code_space_.load() < kCriticalThreshold); + return native_modules_.size() > 1 && + remaining_uncommitted_code_space_.load() < kCriticalThreshold; +} - if (force_critical_notification) { +std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule( + Isolate* isolate, const WasmFeatures& enabled, size_t memory_estimate, + bool can_request_more, std::shared_ptr<const WasmModule> module, + const ModuleEnv& env) { + if (ShouldForceCriticalMemoryPressureNotification()) { (reinterpret_cast<v8::Isolate*>(isolate)) ->MemoryPressureNotification(MemoryPressureLevel::kCritical); } @@ -852,21 +912,22 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule( size_t size = mem.size(); Address end = mem.end(); std::unique_ptr<NativeModule> ret(new NativeModule( - isolate, can_request_more, &mem, this, std::move(module), env)); + isolate, enabled, can_request_more, &mem, this, std::move(module), env)); TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start, size); + base::LockGuard<base::Mutex> lock(&native_modules_mutex_); AssignRanges(start, end, ret.get()); - ++active_; + native_modules_.emplace(ret.get()); return ret; } bool NativeModule::SetExecutable(bool executable) { if (is_executable_ == executable) return true; TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable); - PageAllocator::Permission permission = - executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite; if (FLAG_wasm_write_protect_code_memory) { + PageAllocator::Permission permission = + executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite; #if V8_OS_WIN // On windows, we need to switch permissions per separate virtual memory // reservation. This is really just a problem when the NativeModule is @@ -905,8 +966,9 @@ bool NativeModule::SetExecutable(bool executable) { } void WasmCodeManager::FreeNativeModule(NativeModule* native_module) { - DCHECK_GE(active_, 1); - --active_; + base::LockGuard<base::Mutex> lock(&native_modules_mutex_); + DCHECK_EQ(1, native_modules_.count(native_module)); + native_modules_.erase(native_module); TRACE_HEAP("Freeing NativeModule %p\n", this); for (auto& vmem : native_module->owned_code_space_) { lookup_map_.erase(vmem.address()); @@ -915,13 +977,8 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) { } native_module->owned_code_space_.clear(); - size_t code_size = native_module->committed_code_space_; + size_t code_size = native_module->committed_code_space_.load(); DCHECK(IsAligned(code_size, AllocatePageSize())); - - if (module_code_size_mb_) { - module_code_size_mb_->AddSample(static_cast<int>(code_size / MB)); - } - remaining_uncommitted_code_space_.fetch_add(code_size); } @@ -939,6 +996,7 @@ WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const { } NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const { + base::LockGuard<base::Mutex> lock(&native_modules_mutex_); if (lookup_map_.empty()) return nullptr; auto iter = lookup_map_.upper_bound(pc); @@ -971,17 +1029,21 @@ size_t WasmCodeManager::remaining_uncommitted_code_space() const { return remaining_uncommitted_code_space_.load(); } +// TODO(v8:7424): Code protection scopes are not yet supported with shared code +// enabled and need to be revisited to work with --wasm-shared-code as well. NativeModuleModificationScope::NativeModuleModificationScope( NativeModule* native_module) : native_module_(native_module) { - if (native_module_ && (native_module_->modification_scope_depth_++) == 0) { + if (FLAG_wasm_write_protect_code_memory && native_module_ && + (native_module_->modification_scope_depth_++) == 0) { bool success = native_module_->SetExecutable(false); CHECK(success); } } NativeModuleModificationScope::~NativeModuleModificationScope() { - if (native_module_ && (native_module_->modification_scope_depth_--) == 1) { + if (FLAG_wasm_write_protect_code_memory && native_module_ && + (native_module_->modification_scope_depth_--) == 1) { bool success = native_module_->SetExecutable(true); CHECK(success); } diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h index 02c417a20ed..ffcc05fbcd1 100644 --- a/chromium/v8/src/wasm/wasm-code-manager.h +++ b/chromium/v8/src/wasm/wasm-code-manager.h @@ -9,19 +9,21 @@ #include <list> #include <map> #include <unordered_map> +#include <unordered_set> #include "src/base/macros.h" +#include "src/builtins/builtins-definitions.h" #include "src/handles.h" #include "src/trap-handler/trap-handler.h" #include "src/vector.h" #include "src/wasm/module-compiler.h" +#include "src/wasm/wasm-features.h" namespace v8 { namespace internal { struct CodeDesc; class Code; -class Histogram; namespace wasm { @@ -30,18 +32,6 @@ class WasmCodeManager; class WasmMemoryTracker; struct WasmModule; -// Convenience macro listing all wasm runtime stubs. Note that the first few -// elements of the list coincide with {compiler::TrapId}, order matters. -#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \ - FOREACH_WASM_TRAPREASON(VTRAP) \ - V(WasmAllocateHeapNumber) \ - V(WasmArgumentsAdaptor) \ - V(WasmCallJavaScript) \ - V(WasmGrowMemory) \ - V(WasmStackGuard) \ - V(WasmToNumber) \ - V(DoubleToI) - struct AddressRange { Address start; Address end; @@ -270,6 +260,10 @@ class V8_EXPORT_PRIVATE NativeModule final { // threads executing the old code. void PublishCode(WasmCode* code); + // Creates a snapshot of the current state of the code table. This is useful + // to get a consistent view of the table (e.g. used by the serializer). + std::vector<WasmCode*> SnapshotCodeTable() const; + WasmCode* code(uint32_t index) const { DCHECK_LT(index, num_functions()); DCHECK_LE(module_->num_imported_functions, index); @@ -289,12 +283,15 @@ class V8_EXPORT_PRIVATE NativeModule final { return jump_table_ ? jump_table_->instruction_start() : kNullAddress; } + ptrdiff_t jump_table_offset(uint32_t func_index) const { + DCHECK_GE(func_index, num_imported_functions()); + return GetCallTargetForFunction(func_index) - jump_table_start(); + } + bool is_jump_table_slot(Address address) const { return jump_table_->contains(address); } - uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const; - // Transition this module from code relying on trap handlers (i.e. without // explicit memory bounds checks) to code that does not require trap handlers // (i.e. code with explicit bounds checks). @@ -307,6 +304,10 @@ class V8_EXPORT_PRIVATE NativeModule final { // slot within {jump_table_}). Address GetCallTargetForFunction(uint32_t func_index) const; + // Reverse lookup from a given call target (i.e. a jump table slot as the + // above {GetCallTargetForFunction} returns) to a function index. + uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const; + bool SetExecutable(bool executable); // For cctests, where we build both WasmModule and the runtime objects @@ -323,9 +324,6 @@ class V8_EXPORT_PRIVATE NativeModule final { uint32_t num_imported_functions() const { return module_->num_imported_functions; } - Vector<WasmCode*> code_table() const { - return {code_table_.get(), module_->num_declared_functions}; - } bool use_trap_handler() const { return use_trap_handler_; } void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; } bool lazy_compile_frozen() const { return lazy_compile_frozen_; } @@ -334,18 +332,22 @@ class V8_EXPORT_PRIVATE NativeModule final { wire_bytes_ = std::move(wire_bytes); } const WasmModule* module() const { return module_.get(); } + WasmCodeManager* code_manager() const { return wasm_code_manager_; } WasmCode* Lookup(Address) const; ~NativeModule(); + const WasmFeatures& enabled_features() const { return enabled_features_; } + private: friend class WasmCode; friend class WasmCodeManager; friend class NativeModuleModificationScope; - NativeModule(Isolate* isolate, bool can_request_more, - VirtualMemory* code_space, WasmCodeManager* code_manager, + NativeModule(Isolate* isolate, const WasmFeatures& enabled_features, + bool can_request_more, VirtualMemory* code_space, + WasmCodeManager* code_manager, std::shared_ptr<const WasmModule> module, const ModuleEnv& env); WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind); @@ -369,6 +371,9 @@ class V8_EXPORT_PRIVATE NativeModule final { void PatchJumpTable(uint32_t func_index, Address target, WasmCode::FlushICache); + Vector<WasmCode*> code_table() const { + return {code_table_.get(), module_->num_declared_functions}; + } void set_code(uint32_t index, WasmCode* code) { DCHECK_LT(index, num_functions()); DCHECK_LE(module_->num_imported_functions, index); @@ -376,6 +381,11 @@ class V8_EXPORT_PRIVATE NativeModule final { code_table_[index - module_->num_imported_functions] = code; } + // Features enabled for this module. We keep a copy of the features that + // were enabled at the time of the creation of this native module, + // to be consistent across asynchronous compilations later. + const WasmFeatures enabled_features_; + // TODO(clemensh): Make this a unique_ptr (requires refactoring // AsyncCompileJob). std::shared_ptr<const WasmModule> module_; @@ -406,7 +416,7 @@ class V8_EXPORT_PRIVATE NativeModule final { std::list<VirtualMemory> owned_code_space_; WasmCodeManager* wasm_code_manager_; - size_t committed_code_space_ = 0; + std::atomic<size_t> committed_code_space_{0}; int modification_scope_depth_ = 0; bool can_request_more_memory_; bool use_trap_handler_ = false; @@ -427,7 +437,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { // code. The native module may later request more memory. // TODO(titzer): isolate is only required here for CompilationState. std::unique_ptr<NativeModule> NewNativeModule( - Isolate* isolate, size_t memory_estimate, bool can_request_more, + Isolate* isolate, const WasmFeatures& enabled_features, + size_t memory_estimate, bool can_request_more, std::shared_ptr<const WasmModule> module, const ModuleEnv& env); NativeModule* LookupNativeModule(Address pc) const; @@ -435,9 +446,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { WasmCode* GetCodeFromStartAddress(Address pc) const; size_t remaining_uncommitted_code_space() const; - void SetModuleCodeSizeHistogram(Histogram* histogram) { - module_code_size_mb_ = histogram; - } + // Add a sample of all module sizes. + void SampleModuleSizes(Isolate* isolate) const; + + // TODO(v8:7424): For now we sample module sizes in a GC callback. This will + // bias samples towards apps with high memory pressure. We should switch to + // using sampling based on regular intervals independent of the GC. + static void InstallSamplingGCCallback(Isolate* isolate); + static size_t EstimateNativeModuleSize(const WasmModule* module); private: @@ -452,17 +468,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { void FreeNativeModule(NativeModule*); void Free(VirtualMemory* mem); void AssignRanges(Address start, Address end, NativeModule*); + bool ShouldForceCriticalMemoryPressureNotification(); WasmMemoryTracker* const memory_tracker_; + mutable base::Mutex native_modules_mutex_; std::map<Address, std::pair<Address, NativeModule*>> lookup_map_; - // Count of NativeModules not yet collected. Helps determine if it's - // worth requesting a GC on memory pressure. - size_t active_ = 0; + std::unordered_set<NativeModule*> native_modules_; std::atomic<size_t> remaining_uncommitted_code_space_; - // Histogram to update with the maximum used code space for each NativeModule. - Histogram* module_code_size_mb_ = nullptr; - DISALLOW_COPY_AND_ASSIGN(WasmCodeManager); }; diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h index 0233ced6ac1..70794fc7ab8 100644 --- a/chromium/v8/src/wasm/wasm-constants.h +++ b/chromium/v8/src/wasm/wasm-constants.h @@ -5,6 +5,9 @@ #ifndef V8_WASM_WASM_CONSTANTS_H_ #define V8_WASM_WASM_CONSTANTS_H_ +#include <cstddef> +#include <cstdint> + namespace v8 { namespace internal { namespace wasm { @@ -70,7 +73,8 @@ enum SectionCode : int8_t { // Binary encoding of name section kinds. enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 }; -constexpr uint32_t kWasmPageSize = 0x10000; +constexpr size_t kWasmPageSize = 0x10000; +constexpr uint32_t kWasmPageSizeLog2 = 16; constexpr int kInvalidExceptionTag = -1; // TODO(wasm): Wrap WasmCodePosition in a struct. diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc index b1f57fa8f88..0d8b1f18aaf 100644 --- a/chromium/v8/src/wasm/wasm-debug.cc +++ b/chromium/v8/src/wasm/wasm-debug.cc @@ -72,8 +72,7 @@ MaybeHandle<String> GetLocalName(Isolate* isolate, if (!debug_info->has_locals_names()) { Handle<WasmModuleObject> module_object( debug_info->wasm_instance()->module_object(), isolate); - Handle<FixedArray> locals_names = - wasm::DecodeLocalNames(isolate, module_object); + Handle<FixedArray> locals_names = DecodeLocalNames(isolate, module_object); debug_info->set_locals_names(*locals_names); } @@ -290,7 +289,7 @@ class InterpreterHandle { Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_); // Check that this is indeed the instance which is connected to this // interpreter. - DCHECK_EQ(this, Managed<wasm::InterpreterHandle>::cast( + DCHECK_EQ(this, Managed<InterpreterHandle>::cast( instance_obj->debug_info()->interpreter_handle()) ->raw()); return instance_obj; @@ -299,8 +298,6 @@ class InterpreterHandle { void NotifyDebugEventListeners(WasmInterpreter::Thread* thread) { // Enter the debugger. DebugScope debug_scope(isolate_->debug()); - // Postpone interrupt during breakpoint processing. - PostponeInterruptsScope postpone(isolate_); // Check whether we hit a breakpoint. if (isolate_->debug()->break_points_active()) { @@ -406,7 +403,7 @@ class InterpreterHandle { return interpreter()->GetThread(0)->NumInterpretedCalls(); } - Handle<JSObject> GetGlobalScopeObject(wasm::InterpretedFrame* frame, + Handle<JSObject> GetGlobalScopeObject(InterpretedFrame* frame, Handle<WasmDebugInfo> debug_info) { Isolate* isolate = isolate_; Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate); @@ -430,7 +427,7 @@ class InterpreterHandle { return global_scope_object; } - Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame, + Handle<JSObject> GetLocalScopeObject(InterpretedFrame* frame, Handle<WasmDebugInfo> debug_info) { Isolate* isolate = isolate_; @@ -531,7 +528,12 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle( Isolate* isolate, Handle<WasmDebugInfo> debug_info) { Handle<Object> handle(debug_info->interpreter_handle(), isolate); if (handle->IsUndefined(isolate)) { - size_t interpreter_size = 0; // TODO(titzer): estimate size properly. + // Use the maximum stack size to estimate the maximum size of the + // interpreter. The interpreter keeps its own stack internally, and the size + // of the stack should dominate the overall size of the interpreter. We + // multiply by '2' to account for the growing strategy for the backing store + // of the stack. + size_t interpreter_size = FLAG_stack_size * KB * 2; handle = Managed<wasm::InterpreterHandle>::Allocate( isolate, interpreter_size, isolate, *debug_info); debug_info->set_interpreter_handle(*handle); @@ -582,7 +584,11 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting( Handle<WasmInstanceObject> instance_obj) { Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj); Isolate* isolate = instance_obj->GetIsolate(); - size_t interpreter_size = 0; // TODO(titzer): estimate size properly. + // Use the maximum stack size to estimate the maximum size of the interpreter. + // The interpreter keeps its own stack internally, and the size of the stack + // should dominate the overall size of the interpreter. We multiply by '2' to + // account for the growing strategy for the backing store of the stack. + size_t interpreter_size = FLAG_stack_size * KB * 2; auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate( isolate, interpreter_size, isolate, *debug_info); debug_info->set_interpreter_handle(*interp_handle); @@ -637,13 +643,16 @@ void WasmDebugInfo::PrepareStep(StepAction step_action) { GetInterpreterHandle(this)->PrepareStep(step_action); } -bool WasmDebugInfo::RunInterpreter(Address frame_pointer, int func_index, +// static +bool WasmDebugInfo::RunInterpreter(Isolate* isolate, + Handle<WasmDebugInfo> debug_info, + Address frame_pointer, int func_index, Address arg_buffer) { DCHECK_LE(0, func_index); - Handle<WasmInstanceObject> instance(wasm_instance(), - wasm_instance()->GetIsolate()); - return GetInterpreterHandle(this)->Execute( - instance, frame_pointer, static_cast<uint32_t>(func_index), arg_buffer); + auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info); + Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate); + return handle->Execute(instance, frame_pointer, + static_cast<uint32_t>(func_index), arg_buffer); } std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack( @@ -719,6 +728,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry( WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED)); function_data->set_wrapper_code(*new_entry_code); function_data->set_instance(debug_info->wasm_instance()); + function_data->set_jump_table_offset(-1); function_data->set_function_index(-1); Handle<String> name = isolate->factory()->InternalizeOneByteString( STATIC_CHAR_VECTOR("c-wasm-entry")); diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc index 717c5defd79..4f772d9bdd3 100644 --- a/chromium/v8/src/wasm/wasm-engine.cc +++ b/chromium/v8/src/wasm/wasm-engine.cc @@ -8,10 +8,11 @@ #include "src/compilation-statistics.h" #include "src/objects-inl.h" #include "src/objects/js-promise.h" +#include "src/wasm/function-compiler.h" #include "src/wasm/module-compiler.h" #include "src/wasm/module-decoder.h" #include "src/wasm/streaming-decoder.h" -#include "src/wasm/wasm-objects.h" +#include "src/wasm/wasm-objects-inl.h" namespace v8 { namespace internal { @@ -20,13 +21,18 @@ namespace wasm { WasmEngine::WasmEngine() : code_manager_(&memory_tracker_, kMaxWasmCodeMemory) {} -WasmEngine::~WasmEngine() = default; +WasmEngine::~WasmEngine() { + // All AsyncCompileJobs have been canceled. + DCHECK(jobs_.empty()); +} -bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) { +bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled, + const ModuleWireBytes& bytes) { // TODO(titzer): remove dependency on the isolate. if (bytes.start() == nullptr || bytes.length() == 0) return false; - ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(), - bytes.end(), true, kWasmOrigin); + ModuleResult result = + DecodeWasmModule(enabled, bytes.start(), bytes.end(), true, kWasmOrigin, + isolate->counters(), allocator()); return result.ok(); } @@ -34,20 +40,24 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompileTranslatedAsmJs( Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes, Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes) { - ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(), - bytes.end(), false, kAsmJsOrigin); + ModuleResult result = + DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false, + kAsmJsOrigin, isolate->counters(), allocator()); CHECK(!result.failed()); // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated // in {CompileToModuleObject}. - return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes, - asm_js_script, asm_js_offset_table_bytes); + return CompileToModuleObject(isolate, kAsmjsWasmFeatures, thrower, + std::move(result.val), bytes, asm_js_script, + asm_js_offset_table_bytes); } MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile( - Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) { - ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(), - bytes.end(), false, kWasmOrigin); + Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower, + const ModuleWireBytes& bytes) { + ModuleResult result = + DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin, + isolate->counters(), allocator()); if (result.failed()) { thrower->CompileFailed("Wasm decoding failed", result); return {}; @@ -55,8 +65,8 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile( // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated // in {CompileToModuleObject}. - return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes, - Handle<Script>(), Vector<const byte>()); + return CompileToModuleObject(isolate, enabled, thrower, std::move(result.val), + bytes, Handle<Script>(), Vector<const byte>()); } MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate( @@ -70,7 +80,7 @@ MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate( void WasmEngine::AsyncInstantiate( Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver, Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) { - ErrorThrower thrower(isolate, nullptr); + ErrorThrower thrower(isolate, "WebAssembly Instantiation"); // Instantiate a TryCatch so that caught exceptions won't progagate out. // They will still be set as pending exceptions on the isolate. // TODO(clemensh): Avoid TryCatch, use Execution::TryCall internally to invoke @@ -87,24 +97,24 @@ void WasmEngine::AsyncInstantiate( return; } - // We either have a pending exception (if the start function threw), or an - // exception in the ErrorThrower. - DCHECK_EQ(1, isolate->has_pending_exception() + thrower.error()); - if (thrower.error()) { - resolver->OnInstantiationFailed(thrower.Reify()); - } else { - // The start function has thrown an exception. We have to move the - // exception to the promise chain. + if (isolate->has_pending_exception()) { + // The JS code executed during instantiation has thrown an exception. + // We have to move the exception to the promise chain. Handle<Object> exception(isolate->pending_exception(), isolate); isolate->clear_pending_exception(); DCHECK(*isolate->external_caught_exception_address()); *isolate->external_caught_exception_address() = false; resolver->OnInstantiationFailed(exception); + thrower.Reset(); + } else { + DCHECK(thrower.error()); + resolver->OnInstantiationFailed(thrower.Reify()); } } void WasmEngine::AsyncCompile( - Isolate* isolate, std::unique_ptr<CompilationResultResolver> resolver, + Isolate* isolate, const WasmFeatures& enabled, + std::shared_ptr<CompilationResultResolver> resolver, const ModuleWireBytes& bytes, bool is_shared) { if (!FLAG_wasm_async_compilation) { // Asynchronous compilation disabled; fall back on synchronous compilation. @@ -114,12 +124,11 @@ void WasmEngine::AsyncCompile( // Make a copy of the wire bytes to avoid concurrent modification. std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]); memcpy(copy.get(), bytes.start(), bytes.length()); - i::wasm::ModuleWireBytes bytes_copy(copy.get(), - copy.get() + bytes.length()); - module_object = SyncCompile(isolate, &thrower, bytes_copy); + ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length()); + module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy); } else { // The wire bytes are not shared, OK to use them directly. - module_object = SyncCompile(isolate, &thrower, bytes); + module_object = SyncCompile(isolate, enabled, &thrower, bytes); } if (thrower.error()) { resolver->OnCompilationFailed(thrower.Reify()); @@ -132,8 +141,9 @@ void WasmEngine::AsyncCompile( if (FLAG_wasm_test_streaming) { std::shared_ptr<StreamingDecoder> streaming_decoder = - isolate->wasm_engine()->StartStreamingCompilation( - isolate, handle(isolate->context(), isolate), std::move(resolver)); + StartStreamingCompilation(isolate, enabled, + handle(isolate->context(), isolate), + std::move(resolver)); streaming_decoder->OnBytesReceived(bytes.module_bytes()); streaming_decoder->Finish(); return; @@ -144,20 +154,53 @@ void WasmEngine::AsyncCompile( memcpy(copy.get(), bytes.start(), bytes.length()); AsyncCompileJob* job = CreateAsyncCompileJob( - isolate, std::move(copy), bytes.length(), + isolate, enabled, std::move(copy), bytes.length(), handle(isolate->context(), isolate), std::move(resolver)); job->Start(); } std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation( - Isolate* isolate, Handle<Context> context, - std::unique_ptr<CompilationResultResolver> resolver) { + Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context, + std::shared_ptr<CompilationResultResolver> resolver) { AsyncCompileJob* job = - CreateAsyncCompileJob(isolate, std::unique_ptr<byte[]>(nullptr), 0, - context, std::move(resolver)); + CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr), + 0, context, std::move(resolver)); return job->CreateStreamingDecoder(); } +bool WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module, + uint32_t function_index, ExecutionTier tier) { + ErrorThrower thrower(isolate, "Manually requested tier up"); + // Note we assume that "one-off" compilations can discard detected features. + WasmFeatures detected = kNoWasmFeatures; + WasmCode* ret = WasmCompilationUnit::CompileWasmFunction( + isolate, native_module, &detected, &thrower, + GetModuleEnv(native_module->compilation_state()), + &native_module->module()->functions[function_index], tier); + return ret != nullptr; +} + +std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule( + Handle<WasmModuleObject> module_object) { + return module_object->managed_native_module()->get(); +} + +Handle<WasmModuleObject> WasmEngine::ImportNativeModule( + Isolate* isolate, std::shared_ptr<NativeModule> shared_module) { + CHECK_EQ(code_manager(), shared_module->code_manager()); + Vector<const byte> wire_bytes = shared_module->wire_bytes(); + Handle<Script> script = CreateWasmScript(isolate, wire_bytes); + Handle<WasmModuleObject> module_object = + WasmModuleObject::New(isolate, shared_module, script); + + // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. + // This requires unlocking the code space here. This should eventually be + // moved into the allocator. + CodeSpaceMemoryModificationScope modification_scope(isolate->heap()); + CompileJsToWasmWrappers(isolate, module_object); + return module_object; +} + CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() { base::LockGuard<base::Mutex> guard(&mutex_); if (compilation_stats_ == nullptr) { @@ -181,27 +224,22 @@ CodeTracer* WasmEngine::GetCodeTracer() { return code_tracer_.get(); } -void WasmEngine::Register(CancelableTaskManager* task_manager) { - task_managers_.emplace_back(task_manager); -} - -void WasmEngine::Unregister(CancelableTaskManager* task_manager) { - task_managers_.remove(task_manager); -} - AsyncCompileJob* WasmEngine::CreateAsyncCompileJob( - Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length, - Handle<Context> context, - std::unique_ptr<CompilationResultResolver> resolver) { - AsyncCompileJob* job = new AsyncCompileJob( - isolate, std::move(bytes_copy), length, context, std::move(resolver)); + Isolate* isolate, const WasmFeatures& enabled, + std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context, + std::shared_ptr<CompilationResultResolver> resolver) { + AsyncCompileJob* job = + new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length, + context, std::move(resolver)); // Pass ownership to the unique_ptr in {jobs_}. + base::LockGuard<base::Mutex> guard(&mutex_); jobs_[job] = std::unique_ptr<AsyncCompileJob>(job); return job; } std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob( AsyncCompileJob* job) { + base::LockGuard<base::Mutex> guard(&mutex_); auto item = jobs_.find(job); DCHECK(item != jobs_.end()); std::unique_ptr<AsyncCompileJob> result = std::move(item->second); @@ -209,26 +247,56 @@ std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob( return result; } -void WasmEngine::AbortCompileJobsOnIsolate(Isolate* isolate) { - // Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}. - std::vector<AsyncCompileJob*> isolate_jobs; - +bool WasmEngine::HasRunningCompileJob(Isolate* isolate) { + base::LockGuard<base::Mutex> guard(&mutex_); for (auto& entry : jobs_) { - if (entry.first->isolate() != isolate) continue; - isolate_jobs.push_back(entry.first); + if (entry.first->isolate() == isolate) return true; } + return false; +} - for (auto* job : isolate_jobs) job->Abort(); +void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) { + base::LockGuard<base::Mutex> guard(&mutex_); + for (auto it = jobs_.begin(); it != jobs_.end();) { + if (it->first->isolate() == isolate) { + it = jobs_.erase(it); + } else { + ++it; + } + } } -void WasmEngine::TearDown() { - // Cancel all registered task managers. - for (auto task_manager : task_managers_) { - task_manager->CancelAndWait(); +namespace { + +struct WasmEnginePointerConstructTrait final { + static void Construct(void* raw_ptr) { + auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr); + *engine_ptr = std::shared_ptr<WasmEngine>(); } +}; + +// Holds the global shared pointer to the single {WasmEngine} that is intended +// to be shared among Isolates within the same process. The {LazyStaticInstance} +// here is required because {std::shared_ptr} has a non-trivial initializer. +base::LazyStaticInstance<std::shared_ptr<WasmEngine>, + WasmEnginePointerConstructTrait>::type + global_wasm_engine; + +} // namespace + +void WasmEngine::InitializeOncePerProcess() { + if (!FLAG_wasm_shared_engine) return; + global_wasm_engine.Pointer()->reset(new WasmEngine()); +} + +void WasmEngine::GlobalTearDown() { + if (!FLAG_wasm_shared_engine) return; + global_wasm_engine.Pointer()->reset(); +} - // Cancel all AsyncCompileJobs. - jobs_.clear(); +std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() { + if (FLAG_wasm_shared_engine) return global_wasm_engine.Get(); + return std::shared_ptr<WasmEngine>(new WasmEngine()); } } // namespace wasm diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h index 0a3667752f6..66c12404b70 100644 --- a/chromium/v8/src/wasm/wasm-engine.h +++ b/chromium/v8/src/wasm/wasm-engine.h @@ -9,6 +9,7 @@ #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-memory.h" +#include "src/wasm/wasm-tier.h" #include "src/zone/accounting-allocator.h" namespace v8 { @@ -22,6 +23,7 @@ class WasmInstanceObject; namespace wasm { class ErrorThrower; +struct WasmFeatures; struct ModuleWireBytes; class V8_EXPORT_PRIVATE CompilationResultResolver { @@ -47,7 +49,8 @@ class V8_EXPORT_PRIVATE WasmEngine { // Synchronously validates the given bytes that represent an encoded WASM // module. - bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes); + bool SyncValidate(Isolate* isolate, const WasmFeatures& enabled, + const ModuleWireBytes& bytes); // Synchronously compiles the given bytes that represent a translated // asm.js module. @@ -59,6 +62,7 @@ class V8_EXPORT_PRIVATE WasmEngine { // Synchronously compiles the given bytes that represent an encoded WASM // module. MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate, + const WasmFeatures& enabled, ErrorThrower* thrower, const ModuleWireBytes& bytes); @@ -74,8 +78,8 @@ class V8_EXPORT_PRIVATE WasmEngine { // encoded WASM module. // The {is_shared} flag indicates if the bytes backing the module could // be shared across threads, i.e. could be concurrently modified. - void AsyncCompile(Isolate* isolate, - std::unique_ptr<CompilationResultResolver> resolver, + void AsyncCompile(Isolate* isolate, const WasmFeatures& enabled, + std::shared_ptr<CompilationResultResolver> resolver, const ModuleWireBytes& bytes, bool is_shared); // Begin an asynchronous instantiation of the given WASM module. @@ -85,8 +89,24 @@ class V8_EXPORT_PRIVATE WasmEngine { MaybeHandle<JSReceiver> imports); std::shared_ptr<StreamingDecoder> StartStreamingCompilation( - Isolate* isolate, Handle<Context> context, - std::unique_ptr<CompilationResultResolver> resolver); + Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context, + std::shared_ptr<CompilationResultResolver> resolver); + + // Compiles the function with the given index at a specific compilation tier + // and returns true on success, false (and pending exception) otherwise. This + // is mostly used for testing to force a function into a specific tier. + bool CompileFunction(Isolate* isolate, NativeModule* native_module, + uint32_t function_index, ExecutionTier tier); + + // Exports the sharable parts of the given module object so that they can be + // transferred to a different Context/Isolate using the same engine. + std::shared_ptr<NativeModule> ExportNativeModule( + Handle<WasmModuleObject> module_object); + + // Imports the shared part of a module from a different Context/Isolate using + // the the same engine, recreating a full module object in the given Isolate. + Handle<WasmModuleObject> ImportNativeModule( + Isolate* isolate, std::shared_ptr<NativeModule> shared_module); WasmCodeManager* code_manager() { return &code_manager_; } @@ -103,41 +123,37 @@ class V8_EXPORT_PRIVATE WasmEngine { // Used to redirect tracing output from {stdout} to a file. CodeTracer* GetCodeTracer(); - // We register and unregister CancelableTaskManagers that run engine-dependent - // tasks. These tasks need to be shutdown if the engine is shut down. - void Register(CancelableTaskManager* task_manager); - void Unregister(CancelableTaskManager* task_manager); - // Remove {job} from the list of active compile jobs. std::unique_ptr<AsyncCompileJob> RemoveCompileJob(AsyncCompileJob* job); - // Returns true if at lease one AsyncCompileJob is currently running. - bool HasRunningCompileJob() const { return !jobs_.empty(); } + // Returns true if at least one AsyncCompileJob that belongs to the given + // Isolate is currently running. + bool HasRunningCompileJob(Isolate* isolate); - // Cancel all AsyncCompileJobs that belong to the given Isolate. Their - // deletion is delayed until all tasks accessing the AsyncCompileJob finish - // their execution. This is used to clean-up the isolate to be reused. - void AbortCompileJobsOnIsolate(Isolate*); + // Deletes all AsyncCompileJobs that belong to the given Isolate. All + // compilation is aborted, no more callbacks will be triggered. This is used + // for tearing down an isolate, or to clean it up to be reused. + void DeleteCompileJobsOnIsolate(Isolate* isolate); - void TearDown(); + // Call on process start and exit. + static void InitializeOncePerProcess(); + static void GlobalTearDown(); + + // Constructs a WasmEngine instance. Depending on whether we are sharing + // engines this might be a pointer to a new instance or to a shared one. + static std::shared_ptr<WasmEngine> GetWasmEngine(); private: AsyncCompileJob* CreateAsyncCompileJob( - Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length, + Isolate* isolate, const WasmFeatures& enabled, + std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context, - std::unique_ptr<CompilationResultResolver> resolver); + std::shared_ptr<CompilationResultResolver> resolver); - // We use an AsyncCompileJob as the key for itself so that we can delete the - // job from the map when it is finished. - std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_; WasmMemoryTracker memory_tracker_; WasmCodeManager code_manager_; AccountingAllocator allocator_; - // Contains all CancelableTaskManagers that run tasks that are dependent - // on the isolate. - std::list<CancelableTaskManager*> task_managers_; - // This mutex protects all information which is mutated concurrently or // fields that are initialized lazily on the first access. base::Mutex mutex_; @@ -145,6 +161,10 @@ class V8_EXPORT_PRIVATE WasmEngine { ////////////////////////////////////////////////////////////////////////////// // Protected by {mutex_}: + // We use an AsyncCompileJob as the key for itself so that we can delete the + // job from the map when it is finished. + std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_; + std::unique_ptr<CompilationStatistics> compilation_stats_; std::unique_ptr<CodeTracer> code_tracer_; diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc index 0f63c35bec4..0317bb7bf51 100644 --- a/chromium/v8/src/wasm/wasm-external-refs.cc +++ b/chromium/v8/src/wasm/wasm-external-refs.cc @@ -10,8 +10,8 @@ #include "include/v8config.h" #include "src/base/bits.h" -#include "src/trap-handler/trap-handler.h" #include "src/utils.h" +#include "src/v8memory.h" #include "src/wasm/wasm-external-refs.h" namespace v8 { @@ -247,10 +247,6 @@ void float64_pow_wrapper(Address data) { WriteUnalignedValue<double>(data, Pow(x, y)); } -void set_thread_in_wasm_flag() { trap_handler::SetThreadInWasm(); } - -void clear_thread_in_wasm_flag() { trap_handler::ClearThreadInWasm(); } - static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr; void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) { diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h index 438235179ba..fc116b7fd8f 100644 --- a/chromium/v8/src/wasm/wasm-external-refs.h +++ b/chromium/v8/src/wasm/wasm-external-refs.h @@ -67,9 +67,6 @@ uint32_t word32_ror_wrapper(Address data); void float64_pow_wrapper(Address data); -void set_thread_in_wasm_flag(); -void clear_thread_in_wasm_flag(); - typedef void (*WasmTrapCallbackForTesting)(); void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback); diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h new file mode 100644 index 00000000000..ec8aa8ba0c7 --- /dev/null +++ b/chromium/v8/src/wasm/wasm-feature-flags.h @@ -0,0 +1,26 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_WASM_WASM_FEATURE_FLAGS_H_ +#define V8_WASM_WASM_FEATURE_FLAGS_H_ + +// The SEPARATOR argument allows generating proper comma-separated lists. +#define FOREACH_WASM_FEATURE_FLAG(V, SEPARATOR) \ + V(mv, "multi-value support", false) \ + SEPARATOR \ + V(eh, "exception handling opcodes", false) \ + SEPARATOR \ + V(se, "sign extension opcodes", true) \ + SEPARATOR \ + V(sat_f2i_conversions, "saturating float conversion opcodes", false) \ + SEPARATOR \ + V(threads, "thread opcodes", false) \ + SEPARATOR \ + V(simd, "SIMD opcodes", false) \ + SEPARATOR \ + V(anyref, "anyref opcodes", false) \ + SEPARATOR \ + V(mut_global, "import/export mutable global support", true) + +#endif // V8_WASM_WASM_FEATURE_FLAGS_H_ diff --git a/chromium/v8/src/wasm/wasm-features.cc b/chromium/v8/src/wasm/wasm-features.cc new file mode 100644 index 00000000000..6271fd05069 --- /dev/null +++ b/chromium/v8/src/wasm/wasm-features.cc @@ -0,0 +1,40 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/wasm/wasm-features.h" +#include "src/flags.h" +#include "src/handles-inl.h" +#include "src/isolate.h" + +namespace v8 { +namespace internal { +namespace wasm { + +#define COMMA , +#define SPACE +#define DO_UNION(feat, desc, val) dst->feat |= src.feat; +#define FLAG_REF(feat, desc, val) FLAG_experimental_wasm_##feat + +void UnionFeaturesInto(WasmFeatures* dst, const WasmFeatures& src) { + FOREACH_WASM_FEATURE(DO_UNION, SPACE); +} + +WasmFeatures WasmFeaturesFromFlags() { + return WasmFeatures{FOREACH_WASM_FEATURE(FLAG_REF, COMMA)}; +} + +WasmFeatures WasmFeaturesFromIsolate(Isolate* isolate) { + WasmFeatures features = WasmFeaturesFromFlags(); + features.threads |= + isolate->AreWasmThreadsEnabled(handle(isolate->context(), isolate)); + return features; +} + +#undef DO_UNION +#undef FLAG_REF +#undef SPACE +#undef COMMA +} // namespace wasm +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/wasm/wasm-features.h b/chromium/v8/src/wasm/wasm-features.h new file mode 100644 index 00000000000..2c6ab0f85a5 --- /dev/null +++ b/chromium/v8/src/wasm/wasm-features.h @@ -0,0 +1,67 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_WASM_WASM_FEATURES_H_ +#define V8_WASM_WASM_FEATURES_H_ + +// The feature flags are declared in their own header. +#include "src/base/macros.h" +#include "src/wasm/wasm-feature-flags.h" + +// All features, including features that do not have flags. +#define FOREACH_WASM_FEATURE FOREACH_WASM_FEATURE_FLAG + +namespace v8 { +namespace internal { +class Isolate; +namespace wasm { + +#define COMMA , +#define SPACE +#define DECL_FIELD(feat, desc, val) bool feat = false; +#define JUST_TRUE(feat, desc, val) true +#define JUST_FALSE(feat, desc, val) false +#define DECL_PARAM(feat, desc, val) bool p##feat +#define DO_INIT(feat, desc, val) feat(p##feat) + +// Enabled or detected features. +struct WasmFeatures { + FOREACH_WASM_FEATURE(DECL_FIELD, SPACE) + + constexpr WasmFeatures() = default; + + explicit constexpr WasmFeatures(FOREACH_WASM_FEATURE(DECL_PARAM, COMMA)) + : FOREACH_WASM_FEATURE(DO_INIT, COMMA) {} +}; + +static constexpr WasmFeatures kAllWasmFeatures{ + FOREACH_WASM_FEATURE(JUST_TRUE, COMMA)}; + +static constexpr WasmFeatures kNoWasmFeatures{ + FOREACH_WASM_FEATURE(JUST_FALSE, COMMA)}; + +#undef JUST_TRUE +#undef JUST_FALSE +#undef DECL_FIELD +#undef DECL_PARAM +#undef DO_INIT +#undef COMMA +#undef SPACE + +static constexpr WasmFeatures kAsmjsWasmFeatures = kNoWasmFeatures; + +V8_EXPORT_PRIVATE WasmFeatures WasmFeaturesFromFlags(); + +// Enables features based on both commandline flags and the isolate. +// Precondition: A valid context must be set in {isolate->context()}. +V8_EXPORT_PRIVATE WasmFeatures WasmFeaturesFromIsolate(Isolate* isolate); + +V8_EXPORT_PRIVATE void UnionFeaturesInto(WasmFeatures* dst, + const WasmFeatures& src); + +} // namespace wasm +} // namespace internal +} // namespace v8 + +#endif // V8_WASM_WASM_FEATURES_H_ diff --git a/chromium/v8/src/wasm/wasm-interpreter.cc b/chromium/v8/src/wasm/wasm-interpreter.cc index 581277cbab7..0c7fb25b67a 100644 --- a/chromium/v8/src/wasm/wasm-interpreter.cc +++ b/chromium/v8/src/wasm/wasm-interpreter.cc @@ -37,6 +37,12 @@ namespace wasm { if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \ } while (false) +#if V8_TARGET_BIG_ENDIAN +#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1) +#else +#define LANE(i, type) (i) +#endif + #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF) #define WASM_CTYPES(V) \ @@ -786,7 +792,8 @@ class SideTable : public ZoneObject { case kExprBlock: case kExprLoop: { bool is_loop = opcode == kExprLoop; - BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc()); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i, + i.pc()); if (imm.type == kWasmVar) { imm.sig = module->signatures[imm.sig_index]; } @@ -801,7 +808,8 @@ class SideTable : public ZoneObject { break; } case kExprIf: { - BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc()); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i, + i.pc()); if (imm.type == kWasmVar) { imm.sig = module->signatures[imm.sig_index]; } @@ -931,8 +939,7 @@ class CodeMap { bool call_indirect_through_module_ = false; public: - CodeMap(Isolate* isolate, const WasmModule* module, - const uint8_t* module_start, Zone* zone) + CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone) : zone_(zone), module_(module), interpreter_code_(zone) { if (module == nullptr) return; interpreter_code_.reserve(module->functions.size()); @@ -1256,6 +1263,7 @@ class ThreadImpl { const WasmModule* module() const { return codemap_->module(); } void DoTrap(TrapReason trap, pc_t pc) { + TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap)); state_ = WasmInterpreter::TRAPPED; trap_reason_ = trap; CommitPc(pc); @@ -1419,8 +1427,8 @@ class ThreadImpl { len = 1 + imm.length; if (FLAG_wasm_trace_memory) { - wasm::MemoryTracingInfo info(imm.offset + index, false, rep); - TraceMemoryOperation(ExecutionEngine::kInterpreter, &info, + MemoryTracingInfo info(imm.offset + index, false, rep); + TraceMemoryOperation(ExecutionTier::kInterpreter, &info, code->function->func_index, static_cast<int>(pc), instance_object_->memory_start()); } @@ -1445,8 +1453,8 @@ class ThreadImpl { len = 1 + imm.length; if (FLAG_wasm_trace_memory) { - wasm::MemoryTracingInfo info(imm.offset + index, true, rep); - TraceMemoryOperation(ExecutionEngine::kInterpreter, &info, + MemoryTracingInfo info(imm.offset + index, true, rep); + TraceMemoryOperation(ExecutionTier::kInterpreter, &info, code->function->func_index, static_cast<int>(pc), instance_object_->memory_start()); } @@ -1664,7 +1672,6 @@ class ThreadImpl { byte* GetGlobalPtr(const WasmGlobal* global) { if (global->mutability && global->imported) { - DCHECK(FLAG_experimental_wasm_mut_global); return reinterpret_cast<byte*>( instance_object_->imported_mutable_globals()[global->index]); } else { @@ -1695,7 +1702,8 @@ class ThreadImpl { ++len; \ WasmValue val = Pop(); \ Simd128 s = val.to_s128(); \ - Push(WasmValue(s.to_##name().val[imm.lane])); \ + auto ss = s.to_##name(); \ + Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \ return true; \ } EXTRACT_LANE_CASE(I32x4, i32x4) @@ -1711,9 +1719,9 @@ class ThreadImpl { stype s2 = v2.to_s128().to_##name(); \ stype res; \ for (size_t i = 0; i < count; ++i) { \ - auto a = s1.val[i]; \ - auto b = s2.val[i]; \ - res.val[i] = expr; \ + auto a = s1.val[LANE(i, s1)]; \ + auto b = s2.val[LANE(i, s1)]; \ + res.val[LANE(i, s1)] = expr; \ } \ Push(WasmValue(Simd128(res))); \ return true; \ @@ -1856,7 +1864,7 @@ class ThreadImpl { WasmValue new_val = Pop(); \ WasmValue simd_val = Pop(); \ stype s = simd_val.to_s128().to_##name(); \ - s.val[imm.lane] = new_val.to<ctype>(); \ + s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \ Push(WasmValue(Simd128(s))); \ return true; \ } @@ -1905,8 +1913,8 @@ class ThreadImpl { src_type s = v.to_s128().to_##name(); \ dst_type res; \ for (size_t i = 0; i < count; ++i) { \ - ctype a = s.val[start_index + i]; \ - res.val[i] = expr; \ + ctype a = s.val[LANE(start_index + i, s)]; \ + res.val[LANE(i, res)] = expr; \ } \ Push(WasmValue(Simd128(res))); \ return true; \ @@ -1940,23 +1948,25 @@ class ThreadImpl { CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t, a) #undef CONVERT_CASE -#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \ - is_unsigned) \ - case kExpr##op: { \ - WasmValue v2 = Pop(); \ - WasmValue v1 = Pop(); \ - src_type s1 = v1.to_s128().to_##name(); \ - src_type s2 = v2.to_s128().to_##name(); \ - dst_type res; \ - int64_t min = std::numeric_limits<ctype>::min(); \ - int64_t max = std::numeric_limits<ctype>::max(); \ - for (size_t i = 0; i < count; ++i) { \ - int32_t v = i < count / 2 ? s1.val[i] : s2.val[i - count / 2]; \ - int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \ - res.val[i] = static_cast<dst_ctype>(std::max(min, std::min(max, a))); \ - } \ - Push(WasmValue(Simd128(res))); \ - return true; \ +#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \ + is_unsigned) \ + case kExpr##op: { \ + WasmValue v2 = Pop(); \ + WasmValue v1 = Pop(); \ + src_type s1 = v1.to_s128().to_##name(); \ + src_type s2 = v2.to_s128().to_##name(); \ + dst_type res; \ + int64_t min = std::numeric_limits<ctype>::min(); \ + int64_t max = std::numeric_limits<ctype>::max(); \ + for (size_t i = 0; i < count; ++i) { \ + int32_t v = i < count / 2 ? s1.val[LANE(i, s1)] \ + : s2.val[LANE(i - count / 2, s2)]; \ + int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \ + res.val[LANE(i, res)] = \ + static_cast<dst_ctype>(std::max(min, std::min(max, a))); \ + } \ + Push(WasmValue(Simd128(res))); \ + return true; \ } PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t, false) @@ -1978,19 +1988,21 @@ class ThreadImpl { Push(WasmValue(Simd128(res))); return true; } -#define ADD_HORIZ_CASE(op, name, stype, count) \ - case kExpr##op: { \ - WasmValue v2 = Pop(); \ - WasmValue v1 = Pop(); \ - stype s1 = v1.to_s128().to_##name(); \ - stype s2 = v2.to_s128().to_##name(); \ - stype res; \ - for (size_t i = 0; i < count / 2; ++i) { \ - res.val[i] = s1.val[i * 2] + s1.val[i * 2 + 1]; \ - res.val[i + count / 2] = s2.val[i * 2] + s2.val[i * 2 + 1]; \ - } \ - Push(WasmValue(Simd128(res))); \ - return true; \ +#define ADD_HORIZ_CASE(op, name, stype, count) \ + case kExpr##op: { \ + WasmValue v2 = Pop(); \ + WasmValue v1 = Pop(); \ + stype s1 = v1.to_s128().to_##name(); \ + stype s2 = v2.to_s128().to_##name(); \ + stype res; \ + for (size_t i = 0; i < count / 2; ++i) { \ + res.val[LANE(i, s1)] = \ + s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \ + res.val[LANE(i + count / 2, s1)] = \ + s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \ + } \ + Push(WasmValue(Simd128(res))); \ + return true; \ } ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4) ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4) @@ -2005,8 +2017,9 @@ class ThreadImpl { int16 res; for (size_t i = 0; i < kSimd128Size; ++i) { int lane = imm.shuffle[i]; - res.val[i] = - lane < kSimd128Size ? v1.val[lane] : v2.val[lane - kSimd128Size]; + res.val[LANE(i, v1)] = lane < kSimd128Size + ? v1.val[LANE(lane, v1)] + : v2.val[LANE(lane - kSimd128Size, v1)]; } Push(WasmValue(Simd128(res))); return true; @@ -2122,9 +2135,9 @@ class ThreadImpl { #ifdef DEBUG // Compute the stack effect of this opcode, and verify later that the // stack was modified accordingly. - std::pair<uint32_t, uint32_t> stack_effect = wasm::StackEffect( - codemap_->module(), frames_.back().code->function->sig, - code->orig_start + pc, code->orig_end); + std::pair<uint32_t, uint32_t> stack_effect = + StackEffect(codemap_->module(), frames_.back().code->function->sig, + code->orig_start + pc, code->orig_end); sp_t expected_new_stack_height = StackHeight() - stack_effect.first + stack_effect.second; #endif @@ -2133,17 +2146,20 @@ class ThreadImpl { case kExprNop: break; case kExprBlock: { - BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, + &decoder, code->at(pc)); len = 1 + imm.length; break; } case kExprLoop: { - BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, + &decoder, code->at(pc)); len = 1 + imm.length; break; } case kExprIf: { - BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc)); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, + &decoder, code->at(pc)); WasmValue cond = Pop(); bool is_true = cond.to<uint32_t>() != 0; if (is_true) { @@ -2302,6 +2318,7 @@ class ThreadImpl { uint32_t entry_index = Pop().to<uint32_t>(); // Assume only one table for now. DCHECK_LE(module()->tables.size(), 1u); + CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC. ExternalCallResult result = CallIndirectFunction(0, entry_index, imm.sig_index); switch (result.type) { @@ -2331,9 +2348,10 @@ class ThreadImpl { byte* ptr = GetGlobalPtr(global); WasmValue val; switch (global->type) { -#define CASE_TYPE(wasm, ctype) \ - case kWasm##wasm: \ - val = WasmValue(*reinterpret_cast<ctype*>(ptr)); \ +#define CASE_TYPE(wasm, ctype) \ + case kWasm##wasm: \ + val = WasmValue( \ + ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \ break; WASM_CTYPES(CASE_TYPE) #undef CASE_TYPE @@ -2351,9 +2369,10 @@ class ThreadImpl { byte* ptr = GetGlobalPtr(global); WasmValue val = Pop(); switch (global->type) { -#define CASE_TYPE(wasm, ctype) \ - case kWasm##wasm: \ - *reinterpret_cast<ctype*>(ptr) = val.to<ctype>(); \ +#define CASE_TYPE(wasm, ctype) \ + case kWasm##wasm: \ + WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \ + val.to<ctype>()); \ break; WASM_CTYPES(CASE_TYPE) #undef CASE_TYPE @@ -2687,8 +2706,8 @@ class ThreadImpl { ExternalCallResult CallExternalWasmFunction( Isolate* isolate, Handle<WasmInstanceObject> instance, - const wasm::WasmCode* code, FunctionSig* sig) { - if (code->kind() == wasm::WasmCode::kWasmToJsWrapper && + const WasmCode* code, FunctionSig* sig) { + if (code->kind() == WasmCode::kWasmToJsWrapper && !IsJSCompatibleSignature(sig)) { isolate->Throw(*isolate->factory()->NewTypeError( MessageTemplate::kWasmTrapTypeError)); @@ -2881,7 +2900,7 @@ class ThreadImpl { HandleScope scope(isolate); FunctionSig* signature = module()->signatures[sig_index]; - if (code->kind() == wasm::WasmCode::kFunction) { + if (code->kind() == WasmCode::kFunction) { if (!instance_object_.is_identical_to(instance)) { // Cross instance call. return CallExternalWasmFunction(isolate, instance, code, signature); @@ -2890,8 +2909,8 @@ class ThreadImpl { } // Call to external function. - if (code->kind() == wasm::WasmCode::kInterpreterEntry || - code->kind() == wasm::WasmCode::kWasmToJsWrapper) { + if (code->kind() == WasmCode::kInterpreterEntry || + code->kind() == WasmCode::kWasmToJsWrapper) { return CallExternalWasmFunction(isolate, instance, code, signature); } return {ExternalCallResult::INVALID_FUNC}; @@ -3060,12 +3079,11 @@ class WasmInterpreterInternals : public ZoneObject { CodeMap codemap_; ZoneVector<ThreadImpl> threads_; - WasmInterpreterInternals(Isolate* isolate, Zone* zone, - const WasmModule* module, + WasmInterpreterInternals(Zone* zone, const WasmModule* module, const ModuleWireBytes& wire_bytes, Handle<WasmInstanceObject> instance_object) : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone), - codemap_(isolate, module, module_bytes_.data(), zone), + codemap_(module, module_bytes_.data(), zone), threads_(zone) { threads_.emplace_back(zone, &codemap_, instance_object); } @@ -3097,8 +3115,7 @@ WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module, Handle<WasmInstanceObject> instance_object) : zone_(isolate->allocator(), ZONE_NAME), internals_(new (&zone_) WasmInterpreterInternals( - isolate, &zone_, module, wire_bytes, - MakeWeak(isolate, instance_object))) {} + &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {} WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); } @@ -3205,6 +3222,7 @@ void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) { } #undef TRACE +#undef LANE #undef FOREACH_INTERNAL_OPCODE #undef WASM_CTYPES #undef FOREACH_SIMPLE_BINOP diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc index 7ff27dc769b..1a20b88f10e 100644 --- a/chromium/v8/src/wasm/wasm-js.cc +++ b/chromium/v8/src/wasm/wasm-js.cc @@ -4,8 +4,8 @@ #include "src/wasm/wasm-js.h" +#include "src/api-inl.h" #include "src/api-natives.h" -#include "src/api.h" #include "src/assert-scope.h" #include "src/ast/ast.h" #include "src/execution.h" @@ -29,11 +29,39 @@ namespace v8 { class WasmStreaming::WasmStreamingImpl { public: - void OnBytesReceived(const uint8_t* bytes, size_t size) {} + WasmStreamingImpl( + Isolate* isolate, + std::shared_ptr<internal::wasm::CompilationResultResolver> resolver) + : isolate_(isolate), resolver_(std::move(resolver)) { + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_); + auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); + streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation( + i_isolate, enabled_features, handle(i_isolate->context(), i_isolate), + resolver_); + } + + void OnBytesReceived(const uint8_t* bytes, size_t size) { + streaming_decoder_->OnBytesReceived(i::Vector<const uint8_t>(bytes, size)); + } + void Finish() { streaming_decoder_->Finish(); } + + void Abort(MaybeLocal<Value> exception) { + i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate_)); + streaming_decoder_->Abort(); - void Finish() {} + // If no exception value is provided, we do not reject the promise. This can + // happen when streaming compilation gets aborted when no script execution + // is allowed anymore, e.g. when a browser tab gets refreshed. + if (exception.IsEmpty()) return; - void Abort(MaybeLocal<Value> exception) {} + resolver_->OnCompilationFailed( + Utils::OpenHandle(*exception.ToLocalChecked())); + } + + private: + Isolate* isolate_ = nullptr; + std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_; + std::shared_ptr<internal::wasm::CompilationResultResolver> resolver_; }; WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl) @@ -176,30 +204,6 @@ i::MaybeHandle<i::JSReceiver> GetValueAsImports(Local<Value> arg, return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj)); } -void WebAssemblyCompileStreaming( - const v8::FunctionCallbackInfo<v8::Value>& args) { - v8::Isolate* isolate = args.GetIsolate(); - i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); - - if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) { - // Manually create a promise and reject it. - Local<Context> context = isolate->GetCurrentContext(); - ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context)); - v8::ReturnValue<v8::Value> return_value = args.GetReturnValue(); - return_value.Set(resolver->GetPromise()); - ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()"); - thrower.CompileError("Wasm code generation disallowed by embedder"); - auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify())); - CHECK_IMPLIES(!maybe.FromMaybe(false), - i_isolate->has_scheduled_exception()); - return; - } - - MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks); - DCHECK_NOT_NULL(i_isolate->wasm_compile_streaming_callback()); - i_isolate->wasm_compile_streaming_callback()(args); -} - namespace { // This class resolves the result of WebAssembly.compile. It just places the // compilation result in the supplied {promise}. @@ -213,6 +217,8 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver { } void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override { + if (finished_) return; + finished_ = true; i::MaybeHandle<i::Object> promise_result = i::JSPromise::Resolve(promise_, result); CHECK_EQ(promise_result.is_null(), @@ -220,6 +226,8 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver { } void OnCompilationFailed(i::Handle<i::Object> error_reason) override { + if (finished_) return; + finished_ = true; i::MaybeHandle<i::Object> promise_result = i::JSPromise::Reject(promise_, error_reason); CHECK_EQ(promise_result.is_null(), @@ -227,6 +235,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver { } private: + bool finished_ = false; i::Handle<i::JSPromise> promise_; }; @@ -350,6 +359,8 @@ class AsyncInstantiateCompileResultResolver } void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override { + if (finished_) return; + finished_ = true; isolate_->wasm_engine()->AsyncInstantiate( isolate_, base::make_unique<InstantiateBytesResultResolver>(isolate_, promise_, @@ -358,12 +369,15 @@ class AsyncInstantiateCompileResultResolver } void OnCompilationFailed(i::Handle<i::Object> error_reason) override { + if (finished_) return; + finished_ = true; i::MaybeHandle<i::Object> promise_result = i::JSPromise::Reject(promise_, error_reason); CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception()); } private: + bool finished_ = false; i::Isolate* isolate_; i::Handle<i::JSPromise> promise_; i::MaybeHandle<i::JSReceiver> maybe_imports_; @@ -390,7 +404,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) { v8::ReturnValue<v8::Value> return_value = args.GetReturnValue(); return_value.Set(promise); - std::unique_ptr<i::wasm::CompilationResultResolver> resolver( + std::shared_ptr<i::wasm::CompilationResultResolver> resolver( new AsyncCompilationResolver(i_isolate, Utils::OpenHandle(*promise))); bool is_shared = false; @@ -400,8 +414,64 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) { return; } // Asynchronous compilation handles copying wire bytes if necessary. - i_isolate->wasm_engine()->AsyncCompile(i_isolate, std::move(resolver), bytes, - is_shared); + auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); + i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features, + std::move(resolver), bytes, is_shared); +} + +// WebAssembly.compileStreaming(Promise<Response>) -> Promise +void WebAssemblyCompileStreaming( + const v8::FunctionCallbackInfo<v8::Value>& args) { + v8::Isolate* isolate = args.GetIsolate(); + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks); + HandleScope scope(isolate); + ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()"); + Local<Context> context = isolate->GetCurrentContext(); + + // Create and assign the return value of this function. + ASSIGN(Promise::Resolver, result_resolver, Promise::Resolver::New(context)); + Local<Promise> promise = result_resolver->GetPromise(); + v8::ReturnValue<v8::Value> return_value = args.GetReturnValue(); + return_value.Set(promise); + + // Prepare the CompilationResultResolver for the compilation. + auto resolver = std::make_shared<AsyncCompilationResolver>( + i_isolate, Utils::OpenHandle(*promise)); + + if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) { + thrower.CompileError("Wasm code generation disallowed by embedder"); + resolver->OnCompilationFailed(thrower.Reify()); + return; + } + + // Allocate the streaming decoder in a Managed so we can pass it to the + // embedder. + i::Handle<i::Managed<WasmStreaming>> data = + i::Managed<WasmStreaming>::Allocate( + i_isolate, 0, + base::make_unique<WasmStreaming::WasmStreamingImpl>(isolate, + resolver)); + + DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback()); + ASSIGN( + v8::Function, compile_callback, + v8::Function::New(context, i_isolate->wasm_streaming_callback(), + Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1)); + + // The parameter may be of type {Response} or of type {Promise<Response>}. + // Treat either case of parameter as Promise.resolve(parameter) + // as per https://www.w3.org/2001/tag/doc/promises-guide#resolve-arguments + + // Ending with: + // return Promise.resolve(parameter).then(compile_callback); + ASSIGN(Promise::Resolver, input_resolver, Promise::Resolver::New(context)); + if (!input_resolver->Resolve(context, args[0]).IsJust()) return; + + // We do not have any use of the result here. The {compile_callback} will + // start streaming compilation, which will eventually resolve the promise we + // set as result value. + USE(input_resolver->GetPromise()->Then(context, compile_callback)); } // WebAssembly.validate(bytes) -> bool @@ -422,6 +492,7 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) { return; } + auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); bool validated = false; if (is_shared) { // Make a copy of the wire bytes to avoid concurrent modification. @@ -429,10 +500,12 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) { memcpy(copy.get(), bytes.start(), bytes.length()); i::wasm::ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length()); - validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes_copy); + validated = i_isolate->wasm_engine()->SyncValidate( + i_isolate, enabled_features, bytes_copy); } else { // The wire bytes are not shared, OK to use them directly. - validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes); + validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, + enabled_features, bytes); } return_value.Set(Boolean::New(isolate, validated)); @@ -462,6 +535,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) { if (thrower.error()) { return; } + auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); i::MaybeHandle<i::Object> module_obj; if (is_shared) { // Make a copy of the wire bytes to avoid concurrent modification. @@ -469,12 +543,12 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) { memcpy(copy.get(), bytes.start(), bytes.length()); i::wasm::ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length()); - module_obj = - i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes_copy); + module_obj = i_isolate->wasm_engine()->SyncCompile( + i_isolate, enabled_features, &thrower, bytes_copy); } else { // The wire bytes are not shared, OK to use them directly. - module_obj = - i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes); + module_obj = i_isolate->wasm_engine()->SyncCompile( + i_isolate, enabled_features, &thrower, bytes); } if (module_obj.is_null()) return; @@ -563,40 +637,6 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate, return Utils::ToLocal(instance_object.ToHandleChecked()); } -void WebAssemblyInstantiateCallback( - const v8::FunctionCallbackInfo<v8::Value>& args) { - DCHECK_GE(args.Length(), 1); - Isolate* isolate = args.GetIsolate(); - MicrotasksScope does_not_run_microtasks(isolate, - MicrotasksScope::kDoNotRunMicrotasks); - - HandleScope scope(args.GetIsolate()); - - Local<Context> context = isolate->GetCurrentContext(); - Local<Value> module = args[0]; - - const uint8_t* instance_str = reinterpret_cast<const uint8_t*>("instance"); - const uint8_t* module_str = reinterpret_cast<const uint8_t*>("module"); - Local<Value> instance; - if (!WebAssemblyInstantiateImpl(isolate, module, args.Data()) - .ToLocal(&instance)) { - return; - } - - Local<Object> ret = Object::New(isolate); - Local<String> instance_name = - String::NewFromOneByte(isolate, instance_str, - NewStringType::kInternalized) - .ToLocalChecked(); - Local<String> module_name = - String::NewFromOneByte(isolate, module_str, NewStringType::kInternalized) - .ToLocalChecked(); - - CHECK(ret->CreateDataProperty(context, instance_name, instance).IsJust()); - CHECK(ret->CreateDataProperty(context, module_name, module).IsJust()); - args.GetReturnValue().Set(ret); -} - // new WebAssembly.Instance(module, imports) -> WebAssembly.Instance void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) { Isolate* isolate = args.GetIsolate(); @@ -634,27 +674,76 @@ void WebAssemblyInstantiateStreaming( i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); i_isolate->CountUsage( v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation); - // we use i_isolate in DCHECKS in the ASSIGN statements. - USE(i_isolate); + MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks); HandleScope scope(isolate); - Local<Context> context = isolate->GetCurrentContext(); - ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context)); - Local<Value> first_arg_value = args[0]; + ScheduledErrorThrower thrower(i_isolate, + "WebAssembly.instantiateStreaming()"); - ASSIGN(Function, compileStreaming, - Function::New(context, WebAssemblyCompileStreaming)); - ASSIGN(Value, compile_retval, - compileStreaming->Call(context, args.Holder(), 1, &first_arg_value)); - Local<Promise> module_promise = Local<Promise>::Cast(compile_retval); + // Create and assign the return value of this function. + ASSIGN(Promise::Resolver, result_resolver, Promise::Resolver::New(context)); + Local<Promise> promise = result_resolver->GetPromise(); + v8::ReturnValue<v8::Value> return_value = args.GetReturnValue(); + return_value.Set(promise); - DCHECK(!module_promise.IsEmpty()); - Local<Value> data = args[1]; - ASSIGN(Function, instantiate_impl, - Function::New(context, WebAssemblyInstantiateCallback, data)); - ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl)); - args.GetReturnValue().Set(result); + // Create an InstantiateResultResolver in case there is an issue with the + // passed parameters. + std::unique_ptr<i::wasm::InstantiationResultResolver> resolver( + new InstantiateModuleResultResolver(i_isolate, + Utils::OpenHandle(*promise))); + + if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) { + thrower.CompileError("Wasm code generation disallowed by embedder"); + resolver->OnInstantiationFailed(thrower.Reify()); + return; + } + + // If args.Length < 2, this will be undefined - see FunctionCallbackInfo. + Local<Value> ffi = args[1]; + i::MaybeHandle<i::JSReceiver> maybe_imports = + GetValueAsImports(ffi, &thrower); + + if (thrower.error()) { + resolver->OnInstantiationFailed(thrower.Reify()); + return; + } + + // We start compilation now, we have no use for the + // {InstantiationResultResolver}. + resolver.reset(); + + std::shared_ptr<i::wasm::CompilationResultResolver> compilation_resolver( + new AsyncInstantiateCompileResultResolver( + i_isolate, Utils::OpenHandle(*promise), maybe_imports)); + + // Allocate the streaming decoder in a Managed so we can pass it to the + // embedder. + i::Handle<i::Managed<WasmStreaming>> data = + i::Managed<WasmStreaming>::Allocate( + i_isolate, 0, + base::make_unique<WasmStreaming::WasmStreamingImpl>( + isolate, compilation_resolver)); + + DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback()); + ASSIGN( + v8::Function, compile_callback, + v8::Function::New(context, i_isolate->wasm_streaming_callback(), + Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1)); + + // The parameter may be of type {Response} or of type {Promise<Response>}. + // Treat either case of parameter as Promise.resolve(parameter) + // as per https://www.w3.org/2001/tag/doc/promises-guide#resolve-arguments + + // Ending with: + // return Promise.resolve(parameter).then(compile_callback); + ASSIGN(Promise::Resolver, input_resolver, Promise::Resolver::New(context)); + if (!input_resolver->Resolve(context, args[0]).IsJust()) return; + + // We do not have any use of the result here. The {compile_callback} will + // start streaming compilation, which will eventually resolve the promise we + // set as result value. + USE(input_resolver->GetPromise()->Then(context, compile_callback)); } // WebAssembly.instantiate(module, imports) -> WebAssembly.Instance @@ -667,7 +756,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation); MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks); - ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()"); + ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation"); HandleScope scope(isolate); @@ -720,7 +809,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) { // {InstantiationResultResolver}. resolver.reset(); - std::unique_ptr<i::wasm::CompilationResultResolver> compilation_resolver( + std::shared_ptr<i::wasm::CompilationResultResolver> compilation_resolver( new AsyncInstantiateCompileResultResolver( i_isolate, Utils::OpenHandle(*promise), maybe_imports)); @@ -733,8 +822,10 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) { } // Asynchronous compilation handles copying wire bytes if necessary. - i_isolate->wasm_engine()->AsyncCompile( - i_isolate, std::move(compilation_resolver), bytes, is_shared); + auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); + i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features, + std::move(compilation_resolver), bytes, + is_shared); } bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower, @@ -788,9 +879,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) { if (!maybe.ToLocal(&value)) return; v8::Local<v8::String> string; if (!value->ToString(context).ToLocal(&string)) return; - bool equal; - if (!string->Equals(context, v8_str(isolate, "anyfunc")).To(&equal)) return; - if (!equal) { + if (!string->StringEquals(v8_str(isolate, "anyfunc"))) { thrower.TypeError("Descriptor property 'element' must be 'anyfunc'"); return; } @@ -858,7 +947,8 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) { } bool is_shared_memory = false; - if (i::FLAG_experimental_wasm_threads) { + auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); + if (enabled_features.threads) { // Shared property of descriptor Local<String> shared_key = v8_str(isolate, "shared"); Maybe<bool> has_shared = descriptor->Has(context, shared_key); @@ -938,14 +1028,11 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Local<v8::String> string; if (!value->ToString(context).ToLocal(&string)) return; - bool equal; - if (string->Equals(context, v8_str(isolate, "i32")).To(&equal) && equal) { + if (string->StringEquals(v8_str(isolate, "i32"))) { type = i::wasm::kWasmI32; - } else if (string->Equals(context, v8_str(isolate, "f32")).To(&equal) && - equal) { + } else if (string->StringEquals(v8_str(isolate, "f32"))) { type = i::wasm::kWasmF32; - } else if (string->Equals(context, v8_str(isolate, "f64")).To(&equal) && - equal) { + } else if (string->StringEquals(v8_str(isolate, "f64"))) { type = i::wasm::kWasmF64; } else { thrower.TypeError( @@ -1294,19 +1381,26 @@ void WebAssemblyGlobalSetValue( // TODO(titzer): we use the API to create the function template because the // internal guts are too ugly to replicate here. -static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate, - FunctionCallback func) { +static i::Handle<i::FunctionTemplateInfo> NewFunctionTemplate( + i::Isolate* i_isolate, FunctionCallback func) { Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate); Local<FunctionTemplate> templ = FunctionTemplate::New(isolate, func); templ->ReadOnlyPrototype(); return v8::Utils::OpenHandle(*templ); } +static i::Handle<i::ObjectTemplateInfo> NewObjectTemplate( + i::Isolate* i_isolate) { + Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate); + Local<ObjectTemplate> templ = ObjectTemplate::New(isolate); + return v8::Utils::OpenHandle(*templ); +} + namespace internal { Handle<JSFunction> CreateFunc(Isolate* isolate, Handle<String> name, FunctionCallback func) { - Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func); + Handle<FunctionTemplateInfo> temp = NewFunctionTemplate(isolate, func); Handle<JSFunction> function = ApiNatives::InstantiateFunction(temp, name).ToHandleChecked(); DCHECK(function->shared()->HasSharedName()); @@ -1365,6 +1459,15 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object, Utils::ToLocal(setter_func), attributes); } +// Assigns a dummy instance template to the given constructor function. Used to +// make sure the implicit receivers for the constructors in this file have an +// instance type different from the internal one, they allocate the resulting +// object explicitly and ignore implicit receiver. +void SetDummyInstanceTemplate(Isolate* isolate, Handle<JSFunction> fun) { + Handle<ObjectTemplateInfo> instance_template = NewObjectTemplate(isolate); + fun->shared()->get_api_func_data()->set_instance_template(*instance_template); +} + void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSGlobalObject> global = isolate->global_object(); Handle<Context> context(global->native_context(), isolate); @@ -1394,7 +1497,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1); InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1); - if (isolate->wasm_compile_streaming_callback() != nullptr) { + if (isolate->wasm_streaming_callback() != nullptr) { InstallFunc(isolate, webassembly, "compileStreaming", WebAssemblyCompileStreaming, 1); InstallFunc(isolate, webassembly, "instantiateStreaming", @@ -1410,6 +1513,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSFunction> module_constructor = InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1); context->set_wasm_module_constructor(*module_constructor); + SetDummyInstanceTemplate(isolate, module_constructor); JSFunction::EnsureHasInitialMap(module_constructor); Handle<JSObject> module_proto( JSObject::cast(module_constructor->instance_prototype()), isolate); @@ -1429,6 +1533,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSFunction> instance_constructor = InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance, 1); context->set_wasm_instance_constructor(*instance_constructor); + SetDummyInstanceTemplate(isolate, instance_constructor); JSFunction::EnsureHasInitialMap(instance_constructor); Handle<JSObject> instance_proto( JSObject::cast(instance_constructor->instance_prototype()), isolate); @@ -1445,6 +1550,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSFunction> table_constructor = InstallFunc(isolate, webassembly, "Table", WebAssemblyTable, 1); context->set_wasm_table_constructor(*table_constructor); + SetDummyInstanceTemplate(isolate, table_constructor); JSFunction::EnsureHasInitialMap(table_constructor); Handle<JSObject> table_proto( JSObject::cast(table_constructor->instance_prototype()), isolate); @@ -1462,6 +1568,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { Handle<JSFunction> memory_constructor = InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory, 1); context->set_wasm_memory_constructor(*memory_constructor); + SetDummyInstanceTemplate(isolate, memory_constructor); JSFunction::EnsureHasInitialMap(memory_constructor); Handle<JSObject> memory_proto( JSObject::cast(memory_constructor->instance_prototype()), isolate); @@ -1474,10 +1581,15 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) { v8_str(isolate, "WebAssembly.Memory"), ro_attributes); // Setup Global - if (i::FLAG_experimental_wasm_mut_global) { + + // The context is not set up completely yet. That's why we cannot use + // {WasmFeaturesFromIsolate} and have to use {WasmFeaturesFromFlags} instead. + auto enabled_features = i::wasm::WasmFeaturesFromFlags(); + if (enabled_features.mut_global) { Handle<JSFunction> global_constructor = InstallFunc(isolate, webassembly, "Global", WebAssemblyGlobal, 1); context->set_wasm_global_constructor(*global_constructor); + SetDummyInstanceTemplate(isolate, global_constructor); JSFunction::EnsureHasInitialMap(global_constructor); Handle<JSObject> global_proto( JSObject::cast(global_constructor->instance_prototype()), isolate); diff --git a/chromium/v8/src/wasm/wasm-limits.h b/chromium/v8/src/wasm/wasm-limits.h index c1011c3f89a..db99313e07b 100644 --- a/chromium/v8/src/wasm/wasm-limits.h +++ b/chromium/v8/src/wasm/wasm-limits.h @@ -15,6 +15,8 @@ namespace v8 { namespace internal { namespace wasm { +constexpr size_t kSpecMaxWasmMemoryPages = 65536; + // The following limits are imposed by V8 on WebAssembly modules. // The limits are agreed upon with other engines for consistency. constexpr size_t kV8MaxWasmTypes = 1000000; @@ -26,9 +28,7 @@ constexpr size_t kV8MaxWasmExceptions = 1000000; constexpr size_t kV8MaxWasmExceptionTypes = 1000000; constexpr size_t kV8MaxWasmDataSegments = 100000; // Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages. -// Current limit mimics the maximum allowed allocation on an ArrayBuffer -// (2GiB - 1 page). -constexpr size_t kV8MaxWasmMemoryPages = 32767; // ~ 2 GiB +constexpr size_t kV8MaxWasmMemoryPages = 32767; // = ~ 2 GiB constexpr size_t kV8MaxWasmStringSize = 100000; constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB constexpr size_t kV8MaxWasmFunctionSize = 7654321; @@ -42,14 +42,15 @@ constexpr size_t kV8MaxWasmTableEntries = 10000000; constexpr size_t kV8MaxWasmTables = 1; constexpr size_t kV8MaxWasmMemories = 1; -constexpr size_t kSpecMaxWasmMemoryPages = 65536; static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages, "v8 should not be more permissive than the spec"); constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu; -constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize; -static_assert(kV8MaxWasmMemoryBytes <= std::numeric_limits<int32_t>::max(), - "max memory bytes should fit in int32_t"); +constexpr uint64_t kV8MaxWasmMemoryBytes = + kV8MaxWasmMemoryPages * uint64_t{kWasmPageSize}; + +constexpr uint64_t kSpecMaxWasmMemoryBytes = + kSpecMaxWasmMemoryPages * uint64_t{kWasmPageSize}; constexpr uint64_t kWasmMaxHeapOffset = static_cast<uint64_t>( diff --git a/chromium/v8/src/wasm/wasm-linkage.h b/chromium/v8/src/wasm/wasm-linkage.h index 65fc0a26006..92390cc5567 100644 --- a/chromium/v8/src/wasm/wasm-linkage.h +++ b/chromium/v8/src/wasm/wasm-linkage.h @@ -124,17 +124,85 @@ class LinkageAllocator { const DoubleRegister* fp, int fpc) : gp_count_(gpc), gp_regs_(gp), fp_count_(fpc), fp_regs_(fp) {} - bool has_more_gp_regs() const { return gp_offset_ < gp_count_; } - bool has_more_fp_regs() const { return fp_offset_ < fp_count_; } + bool CanAllocateGP() const { return gp_offset_ < gp_count_; } + bool CanAllocateFP(MachineRepresentation rep) const { +#if V8_TARGET_ARCH_ARM + switch (rep) { + case MachineRepresentation::kFloat32: + return extra_float_reg >= 0 || fp_offset_ < fp_count_; + case MachineRepresentation::kFloat64: + return extra_double_reg >= 0 || fp_offset_ < fp_count_; + case MachineRepresentation::kSimd128: + return ((fp_offset_ + 1) & ~1) + 1 < fp_count_; + default: + UNREACHABLE(); + return false; + } +#endif + return fp_offset_ < fp_count_; + } - Register NextGpReg() { + int NextGpReg() { DCHECK_LT(gp_offset_, gp_count_); - return gp_regs_[gp_offset_++]; + return gp_regs_[gp_offset_++].code(); } - DoubleRegister NextFpReg() { + int NextFpReg(MachineRepresentation rep) { +#if V8_TARGET_ARCH_ARM + switch (rep) { + case MachineRepresentation::kFloat32: { + // Use the extra S-register if we can. + if (extra_float_reg >= 0) { + int reg_code = extra_float_reg; + extra_float_reg = -1; + return reg_code; + } + // Allocate a D-register and split into 2 float registers. + int d_reg_code = NextFpReg(MachineRepresentation::kFloat64); + DCHECK_GT(16, d_reg_code); // D-registers 16 - 31 can't split. + int reg_code = d_reg_code * 2; + // Save the extra S-register. + DCHECK_EQ(-1, extra_float_reg); + extra_float_reg = reg_code + 1; + return reg_code; + } + case MachineRepresentation::kFloat64: { + // Use an extra D-register if we can. + if (extra_double_reg >= 0) { + int reg_code = extra_double_reg; + extra_double_reg = -1; + return reg_code; + } + DCHECK_LT(fp_offset_, fp_count_); + return fp_regs_[fp_offset_++].code(); + } + case MachineRepresentation::kSimd128: { + // Q-register must be an even-odd pair, so we must try to allocate at + // the end, not using extra_double_reg. If we are at an odd D-register, + // skip past it (saving it to extra_double_reg). + DCHECK_LT(((fp_offset_ + 1) & ~1) + 1, fp_count_); + int d_reg1_code = fp_regs_[fp_offset_++].code(); + if (d_reg1_code % 2 != 0) { + // If we're misaligned then extra_double_reg must have been consumed. + DCHECK_EQ(-1, extra_double_reg); + int odd_double_reg = d_reg1_code; + d_reg1_code = fp_regs_[fp_offset_++].code(); + extra_double_reg = odd_double_reg; + } + // Combine the current D-register with the next to form a Q-register. + int d_reg2_code = fp_regs_[fp_offset_++].code(); + DCHECK_EQ(0, d_reg1_code % 2); + DCHECK_EQ(d_reg1_code + 1, d_reg2_code); + USE(d_reg2_code); + return d_reg1_code / 2; + } + default: + UNREACHABLE(); + } +#else DCHECK_LT(fp_offset_, fp_count_); - return fp_regs_[fp_offset_++]; + return fp_regs_[fp_offset_++].code(); +#endif } // Stackslots are counted upwards starting from 0 (or the offset set by @@ -172,6 +240,14 @@ class LinkageAllocator { int fp_offset_ = 0; const DoubleRegister* const fp_regs_; +#if V8_TARGET_ARCH_ARM + // ARM FP register aliasing may require splitting or merging double registers. + // Track fragments of registers below fp_offset_ here. There can only be one + // extra float and double register. + int extra_float_reg = -1; + int extra_double_reg = -1; +#endif + int stack_offset_ = 0; }; diff --git a/chromium/v8/src/wasm/wasm-memory.cc b/chromium/v8/src/wasm/wasm-memory.cc index 7f130a6c2a1..f7cc70a9e74 100644 --- a/chromium/v8/src/wasm/wasm-memory.cc +++ b/chromium/v8/src/wasm/wasm-memory.cc @@ -4,10 +4,12 @@ #include <limits> -#include "src/wasm/wasm-memory.h" +#include "src/heap/heap-inl.h" #include "src/objects-inl.h" +#include "src/objects/js-array-buffer-inl.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-limits.h" +#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-module.h" namespace v8 { @@ -18,6 +20,12 @@ namespace { constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB +void AddAllocationStatusSample(Isolate* isolate, + WasmMemoryTracker::AllocationStatus status) { + isolate->counters()->wasm_memory_allocation_result()->AddSample( + static_cast<int>(status)); +} + void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, size_t size, bool require_full_guard_regions, void** allocation_base, @@ -31,6 +39,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, // // To protect against 32-bit integer overflow issues, we also protect the 2GiB // before the valid part of the memory buffer. + // TODO(7881): do not use static_cast<uint32_t>() here *allocation_length = require_full_guard_regions ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize()) @@ -61,8 +70,8 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, if (FLAG_abort_on_stack_or_string_length_overflow) { FATAL("could not allocate wasm memory"); } - memory_tracker->AddAllocationStatusSample( - AllocationStatus::kAddressSpaceLimitReachedFailure); + AddAllocationStatusSample( + heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure); return nullptr; } // Collect garbage and retry. @@ -77,8 +86,8 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, if (*allocation_base != nullptr) break; if (trial == kAllocationRetries) { memory_tracker->ReleaseReservation(*allocation_length); - memory_tracker->AddAllocationStatusSample( - AllocationStatus::kOtherFailure); + AddAllocationStatusSample(heap->isolate(), + AllocationStatus::kOtherFailure); return nullptr; } heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true); @@ -99,11 +108,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, } } - memory_tracker->RegisterAllocation(*allocation_base, *allocation_length, - memory, size); - memory_tracker->AddAllocationStatusSample( - did_retry ? AllocationStatus::kSuccessAfterRetry - : AllocationStatus::kSuccess); + memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base, + *allocation_length, memory, size); + AddAllocationStatusSample(heap->isolate(), + did_retry ? AllocationStatus::kSuccessAfterRetry + : AllocationStatus::kSuccess); return memory; } } // namespace @@ -131,14 +140,17 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) { constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB #endif - while (true) { - size_t old_count = reserved_address_space_.load(); - if (kAddressSpaceLimit - old_count < num_bytes) return false; + int retries = 5; // cmpxchng can fail, retry some number of times. + do { + size_t old_count = reserved_address_space_; + if ((kAddressSpaceLimit - old_count) < num_bytes) return false; if (reserved_address_space_.compare_exchange_weak(old_count, old_count + num_bytes)) { return true; } - } + } while (retries-- > 0); + + return false; } void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) { @@ -147,14 +159,15 @@ void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) { DCHECK_LE(num_bytes, old_reserved); } -void WasmMemoryTracker::RegisterAllocation(void* allocation_base, +void WasmMemoryTracker::RegisterAllocation(Isolate* isolate, + void* allocation_base, size_t allocation_length, void* buffer_start, size_t buffer_length) { base::LockGuard<base::Mutex> scope_lock(&mutex_); allocated_address_space_ += allocation_length; - AddAddressSpaceSample(); + AddAddressSpaceSample(isolate); allocations_.emplace(buffer_start, AllocationData{allocation_base, allocation_length, @@ -162,12 +175,7 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base, } WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation( - const void* buffer_start) { - return InternalReleaseAllocation(buffer_start); -} - -WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation( - const void* buffer_start) { + Isolate* isolate, const void* buffer_start) { base::LockGuard<base::Mutex> scope_lock(&mutex_); auto find_result = allocations_.find(buffer_start); @@ -179,7 +187,10 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation( DCHECK_LE(num_bytes, allocated_address_space_); reserved_address_space_ -= num_bytes; allocated_address_space_ -= num_bytes; - AddAddressSpaceSample(); + // ReleaseAllocation might be called with a nullptr as isolate if the + // embedder is releasing the allocation and not a specific isolate. This + // happens if the allocation was shared between multiple isolates (threads). + if (isolate) AddAddressSpaceSample(isolate); AllocationData allocation_data = find_result->second; allocations_.erase(find_result); @@ -218,28 +229,21 @@ bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) { return start + kWasmMaxHeapOffset < limit; } -bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) { +bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate, + const void* buffer_start) { if (IsWasmMemory(buffer_start)) { - const AllocationData allocation = ReleaseAllocation(buffer_start); + const AllocationData allocation = ReleaseAllocation(isolate, buffer_start); CHECK(FreePages(allocation.allocation_base, allocation.allocation_length)); return true; } return false; } -void WasmMemoryTracker::AddAllocationStatusSample(AllocationStatus status) { - if (allocation_result_) { - allocation_result_->AddSample(static_cast<int>(status)); - } -} - -void WasmMemoryTracker::AddAddressSpaceSample() { - if (address_space_usage_mb_) { - // Report address space usage in MiB so the full range fits in an int on all - // platforms. - address_space_usage_mb_->AddSample( - static_cast<int>(allocated_address_space_ >> 20)); - } +void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) { + // Report address space usage in MiB so the full range fits in an int on all + // platforms. + isolate->counters()->wasm_address_space_usage_mb()->AddSample( + static_cast<int>(allocated_address_space_ >> 20)); } Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store, @@ -247,11 +251,9 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store, SharedFlag shared) { Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer(shared, TENURED); - DCHECK_GE(kMaxInt, size); - if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads); constexpr bool is_wasm_memory = true; - JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, - static_cast<int>(size), shared, is_wasm_memory); + JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size, + shared, is_wasm_memory); buffer->set_is_neuterable(false); buffer->set_is_growable(true); return buffer; @@ -259,13 +261,10 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store, MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size, SharedFlag shared) { - // Check against kMaxInt, since the byte length is stored as int in the - // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command - // line, and we don't want to fail a CHECK then. - if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) { - // TODO(titzer): lift restriction on maximum memory allocated here. - return {}; - } + // Enforce engine-limited maximum allocation size. + if (size > kV8MaxWasmMemoryBytes) return {}; + // Enforce flag-limited maximum allocation size. + if (size > (FLAG_wasm_max_mem_pages * uint64_t{kWasmPageSize})) return {}; WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker(); diff --git a/chromium/v8/src/wasm/wasm-memory.h b/chromium/v8/src/wasm/wasm-memory.h index 968c1b6f042..d95f7a88c83 100644 --- a/chromium/v8/src/wasm/wasm-memory.h +++ b/chromium/v8/src/wasm/wasm-memory.h @@ -11,13 +11,10 @@ #include "src/base/platform/mutex.h" #include "src/flags.h" #include "src/handles.h" -#include "src/objects/js-array.h" +#include "src/objects/js-array-buffer.h" namespace v8 { namespace internal { - -class Histogram; // defined in counters.h - namespace wasm { // The {WasmMemoryTracker} tracks reservations and allocations for wasm memory @@ -35,8 +32,9 @@ class WasmMemoryTracker { // and reserve {num_bytes} bytes), false otherwise. bool ReserveAddressSpace(size_t num_bytes); - void RegisterAllocation(void* allocation_base, size_t allocation_length, - void* buffer_start, size_t buffer_length); + void RegisterAllocation(Isolate* isolate, void* allocation_base, + size_t allocation_length, void* buffer_start, + size_t buffer_length); struct AllocationData { void* allocation_base = nullptr; @@ -69,7 +67,7 @@ class WasmMemoryTracker { void ReleaseReservation(size_t num_bytes); // Removes an allocation from the tracker. - AllocationData ReleaseAllocation(const void* buffer_start); + AllocationData ReleaseAllocation(Isolate* isolate, const void* buffer_start); bool IsWasmMemory(const void* buffer_start); @@ -84,14 +82,7 @@ class WasmMemoryTracker { // Checks if a buffer points to a Wasm memory and if so does any necessary // work to reclaim the buffer. If this function returns false, the caller must // free the buffer manually. - bool FreeMemoryIfIsWasmMemory(const void* buffer_start); - - void SetAllocationResultHistogram(Histogram* allocation_result) { - allocation_result_ = allocation_result; - } - void SetAddressSpaceUsageHistogram(Histogram* address_space_usage) { - address_space_usage_mb_ = address_space_usage; - } + bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start); // Allocation results are reported to UMA // @@ -107,11 +98,8 @@ class WasmMemoryTracker { kOtherFailure // Failed for an unknown reason }; - void AddAllocationStatusSample(AllocationStatus status); - private: - AllocationData InternalReleaseAllocation(const void* buffer_start); - void AddAddressSpaceSample(); + void AddAddressSpaceSample(Isolate* isolate); // Clients use a two-part process. First they "reserve" the address space, // which signifies an intent to actually allocate it. This determines whether @@ -133,10 +121,6 @@ class WasmMemoryTracker { // buffer, rather than by the start of the allocation. std::unordered_map<const void*, AllocationData> allocations_; - // Keep pointers to - Histogram* allocation_result_ = nullptr; - Histogram* address_space_usage_mb_ = nullptr; // in MiB - DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker); }; diff --git a/chromium/v8/src/wasm/wasm-module-builder.h b/chromium/v8/src/wasm/wasm-module-builder.h index db70502886d..131cda747c2 100644 --- a/chromium/v8/src/wasm/wasm-module-builder.h +++ b/chromium/v8/src/wasm/wasm-module-builder.h @@ -8,6 +8,7 @@ #include "src/signature.h" #include "src/zone/zone-containers.h" +#include "src/v8memory.h" #include "src/wasm/leb-helper.h" #include "src/wasm/local-decl-encoder.h" #include "src/wasm/wasm-opcodes.h" diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc index bd23345870c..ab603bfb3a8 100644 --- a/chromium/v8/src/wasm/wasm-module.cc +++ b/chromium/v8/src/wasm/wasm-module.cc @@ -5,12 +5,13 @@ #include <functional> #include <memory> -#include "src/api.h" +#include "src/api-inl.h" #include "src/assembler-inl.h" #include "src/compiler/wasm-compiler.h" #include "src/debug/interface-types.h" #include "src/frames-inl.h" #include "src/objects.h" +#include "src/objects/js-array-inl.h" #include "src/property-descriptor.h" #include "src/simulator.h" #include "src/snapshot/snapshot.h" @@ -39,8 +40,8 @@ WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes, uint32_t function_index) const { if (!function_names) { function_names.reset(new std::unordered_map<uint32_t, WireBytesRef>()); - wasm::DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(), - function_names.get()); + DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(), + function_names.get()); } auto it = function_names->find(function_index); if (it == function_names->end()) return WireBytesRef(); diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h index 3020548927f..b1020661ab0 100644 --- a/chromium/v8/src/wasm/wasm-module.h +++ b/chromium/v8/src/wasm/wasm-module.h @@ -114,9 +114,9 @@ struct WasmExport { enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin }; -#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \ - ((origin) == wasm::kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \ - : (counters)->prefix##_asm_##suffix()) +#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \ + ((origin) == kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \ + : (counters)->prefix##_asm_##suffix()) struct ModuleWireBytes; diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h index 96bb622afce..481d2274bfd 100644 --- a/chromium/v8/src/wasm/wasm-objects-inl.h +++ b/chromium/v8/src/wasm/wasm-objects-inl.h @@ -9,9 +9,10 @@ #include "src/contexts-inl.h" #include "src/heap/heap-inl.h" -#include "src/objects/js-array-inl.h" +#include "src/objects/js-array-buffer-inl.h" #include "src/objects/managed.h" #include "src/v8memory.h" +#include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-module.h" // Has to be the last include (doesn't have include guards) @@ -85,8 +86,7 @@ ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset) // WasmMemoryObject ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset) SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset) -OPTIONAL_ACCESSORS(WasmMemoryObject, instances, FixedArrayOfWeakCells, - kInstancesOffset) +OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakArrayList, kInstancesOffset) // WasmGlobalObject ACCESSORS(WasmGlobalObject, array_buffer, JSArrayBuffer, kArrayBufferOffset) @@ -108,36 +108,42 @@ Address WasmGlobalObject::address() const { return Address(array_buffer()->backing_store()) + offset(); } -int32_t WasmGlobalObject::GetI32() { return Memory::int32_at(address()); } +int32_t WasmGlobalObject::GetI32() { + return ReadLittleEndianValue<int32_t>(address()); +} -int64_t WasmGlobalObject::GetI64() { return Memory::int64_at(address()); } +int64_t WasmGlobalObject::GetI64() { + return ReadLittleEndianValue<int64_t>(address()); +} -float WasmGlobalObject::GetF32() { return Memory::float_at(address()); } +float WasmGlobalObject::GetF32() { + return ReadLittleEndianValue<float>(address()); +} -double WasmGlobalObject::GetF64() { return Memory::double_at(address()); } +double WasmGlobalObject::GetF64() { + return ReadLittleEndianValue<double>(address()); +} void WasmGlobalObject::SetI32(int32_t value) { - Memory::int32_at(address()) = value; + WriteLittleEndianValue<int32_t>(address(), value); } void WasmGlobalObject::SetI64(int64_t value) { - Memory::int64_at(address()) = value; + WriteLittleEndianValue<int64_t>(address(), value); } void WasmGlobalObject::SetF32(float value) { - Memory::float_at(address()) = value; + WriteLittleEndianValue<float>(address(), value); } void WasmGlobalObject::SetF64(double value) { - Memory::double_at(address()) = value; + WriteLittleEndianValue<double>(address(), value); } // WasmInstanceObject PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset) -PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, uint32_t, - kMemorySizeOffset) -PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, uint32_t, - kMemoryMaskOffset) +PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset) +PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset) PRIMITIVE_ACCESSORS(WasmInstanceObject, roots_array_address, Address, kRootsArrayAddressOffset) PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address, @@ -156,8 +162,8 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids, uint32_t*, kIndirectFunctionTableSigIdsOffset) PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets, Address*, kIndirectFunctionTableTargetsOffset) -PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_adjusted_start, Address, - kJumpTableAdjustedStartOffset) +PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address, + kJumpTableStartOffset) ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject, kModuleObjectOffset) @@ -207,6 +213,8 @@ ImportedFunctionEntry::ImportedFunctionEntry( ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset) ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject, kInstanceOffset) +SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset, + kJumpTableOffsetOffset) SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset) // WasmDebugInfo diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc index 7cd2fecb7f6..4cd66a81c52 100644 --- a/chromium/v8/src/wasm/wasm-objects.cc +++ b/chromium/v8/src/wasm/wasm-objects.cc @@ -12,6 +12,7 @@ #include "src/debug/debug-interface.h" #include "src/objects-inl.h" #include "src/objects/debug-objects-inl.h" +#include "src/objects/shared-function-info.h" #include "src/trap-handler/trap-handler.h" #include "src/wasm/jump-table-assembler.h" #include "src/wasm/module-compiler.h" @@ -177,9 +178,10 @@ enum DispatchTableElements : int { // static Handle<WasmModuleObject> WasmModuleObject::New( - Isolate* isolate, std::shared_ptr<const wasm::WasmModule> shared_module, - wasm::ModuleEnv& env, OwnedVector<const uint8_t> wire_bytes, - Handle<Script> script, Handle<ByteArray> asm_js_offset_table) { + Isolate* isolate, const wasm::WasmFeatures& enabled, + std::shared_ptr<const wasm::WasmModule> shared_module, wasm::ModuleEnv& env, + OwnedVector<const uint8_t> wire_bytes, Handle<Script> script, + Handle<ByteArray> asm_js_offset_table) { DCHECK_EQ(shared_module.get(), env.module); // Create a new {NativeModule} first. @@ -187,7 +189,7 @@ Handle<WasmModuleObject> WasmModuleObject::New( isolate->wasm_engine()->code_manager()->EstimateNativeModuleSize( env.module); auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule( - isolate, native_memory_estimate, + isolate, enabled, native_memory_estimate, wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module), env); native_module->set_wire_bytes(std::move(wire_bytes)); @@ -210,15 +212,6 @@ Handle<WasmModuleObject> WasmModuleObject::New( static_cast<int>(native_module->module()->num_exported_functions); Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(export_wrapper_size, TENURED); - Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast( - isolate->factory()->NewJSObject(isolate->wasm_module_constructor())); - module_object->set_export_wrappers(*export_wrappers); - if (script->type() == Script::TYPE_WASM) { - script->set_wasm_module_object(*module_object); - } - module_object->set_script(*script); - module_object->set_weak_instance_list( - ReadOnlyRoots(isolate).empty_weak_array_list()); // Use the given shared {NativeModule}, but increase its reference count by // allocating a new {Managed<T>} that the {WasmModuleObject} references. @@ -230,6 +223,16 @@ Handle<WasmModuleObject> WasmModuleObject::New( Handle<Managed<wasm::NativeModule>> managed_native_module = Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate, std::move(native_module)); + + Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast( + isolate->factory()->NewJSObject(isolate->wasm_module_constructor())); + module_object->set_export_wrappers(*export_wrappers); + if (script->type() == Script::TYPE_WASM) { + script->set_wasm_module_object(*module_object); + } + module_object->set_script(*script); + module_object->set_weak_instance_list( + ReadOnlyRoots(isolate).empty_weak_array_list()); module_object->set_managed_native_module(*managed_native_module); return module_object; } @@ -674,7 +677,11 @@ Handle<String> WasmModuleObject::GetFunctionName( MaybeHandle<String> name = GetFunctionNameOrNull(isolate, module_object, func_index); if (!name.is_null()) return name.ToHandleChecked(); - return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>"); + EmbeddedVector<char, 32> buffer; + int length = SNPrintF(buffer, "wasm-function[%u]", func_index); + return isolate->factory() + ->NewStringFromOneByte(Vector<uint8_t>::cast(buffer.SubVector(0, length))) + .ToHandleChecked(); } Vector<const uint8_t> WasmModuleObject::GetRawFunctionName( @@ -874,19 +881,19 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, uint32_t maximum_pages) { if (!old_buffer->is_growable()) return {}; void* old_mem_start = old_buffer->backing_store(); - uint32_t old_size = 0; - CHECK(old_buffer->byte_length()->ToUint32(&old_size)); - DCHECK_EQ(0, old_size % wasm::kWasmPageSize); - uint32_t old_pages = old_size / wasm::kWasmPageSize; - DCHECK_GE(std::numeric_limits<uint32_t>::max(), - old_size + pages * wasm::kWasmPageSize); - if (old_pages > maximum_pages || pages > maximum_pages - old_pages) return {}; - size_t new_size = - static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize; - if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize || - new_size > kMaxInt) { + size_t old_size = old_buffer->byte_length()->Number(); + CHECK_GE(wasm::kV8MaxWasmMemoryBytes, old_size); + CHECK_EQ(0, old_size % wasm::kWasmPageSize); + size_t old_pages = old_size / wasm::kWasmPageSize; + if (old_pages > maximum_pages || // already reached maximum + (pages > maximum_pages - old_pages) || // exceeds remaining + (pages > FLAG_wasm_max_mem_pages - old_pages)) { // exceeds limit return {}; } + size_t new_size = + static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize; + CHECK_GE(wasm::kV8MaxWasmMemoryBytes, new_size); + // Reusing the backing store from externalized buffers causes problems with // Blink's array buffers. The connection between the two is lost, which can // lead to Blink not knowing about the other reference to the buffer and @@ -939,19 +946,22 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, } // May GC, because SetSpecializationMemInfoFrom may GC -void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance, +void SetInstanceMemory(Handle<WasmInstanceObject> instance, Handle<JSArrayBuffer> buffer) { instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()), buffer->byte_length()->Number()); #if DEBUG - // To flush out bugs earlier, in DEBUG mode, check that all pages of the - // memory are accessible by reading and writing one byte on each page. - byte* mem_start = instance->memory_start(); - uintptr_t mem_size = instance->memory_size(); - for (uint32_t offset = 0; offset < mem_size; offset += wasm::kWasmPageSize) { - byte val = mem_start[offset]; - USE(val); - mem_start[offset] = val; + if (!FLAG_mock_arraybuffer_allocator) { + // To flush out bugs earlier, in DEBUG mode, check that all pages of the + // memory are accessible by reading and writing one byte on each page. + // Don't do this if the mock ArrayBuffer allocator is enabled. + byte* mem_start = instance->memory_start(); + size_t mem_size = instance->memory_size(); + for (size_t offset = 0; offset < mem_size; offset += wasm::kWasmPageSize) { + byte val = mem_start[offset]; + USE(val); + mem_start[offset] = val; + } } #endif } @@ -971,14 +981,9 @@ Handle<WasmMemoryObject> WasmMemoryObject::New( isolate->factory()->NewJSObject(memory_ctor, TENURED)); Handle<JSArrayBuffer> buffer; - if (maybe_buffer.is_null()) { + if (!maybe_buffer.ToHandle(&buffer)) { // If no buffer was provided, create a 0-length one. buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false); - } else { - buffer = maybe_buffer.ToHandleChecked(); - // Paranoid check that the buffer size makes sense. - uint32_t mem_size = 0; - CHECK(buffer->byte_length()->ToUint32(&mem_size)); } memory_obj->set_array_buffer(*buffer); memory_obj->set_maximum_pages(maximum); @@ -1019,22 +1024,22 @@ bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) { void WasmMemoryObject::AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory, Handle<WasmInstanceObject> instance) { - Handle<FixedArrayOfWeakCells> old_instances = + Handle<WeakArrayList> old_instances = memory->has_instances() - ? Handle<FixedArrayOfWeakCells>(memory->instances(), isolate) - : Handle<FixedArrayOfWeakCells>::null(); - Handle<FixedArrayOfWeakCells> new_instances = - FixedArrayOfWeakCells::Add(isolate, old_instances, instance); + ? Handle<WeakArrayList>(memory->instances(), isolate) + : handle(ReadOnlyRoots(isolate->heap()).empty_weak_array_list(), + isolate); + Handle<WeakArrayList> new_instances = WeakArrayList::AddToEnd( + isolate, old_instances, MaybeObjectHandle::Weak(instance)); memory->set_instances(*new_instances); Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate); - SetInstanceMemory(isolate, instance, buffer); + SetInstanceMemory(instance, buffer); } -void WasmMemoryObject::RemoveInstance(Isolate* isolate, - Handle<WasmMemoryObject> memory, +void WasmMemoryObject::RemoveInstance(Handle<WasmMemoryObject> memory, Handle<WasmInstanceObject> instance) { if (memory->has_instances()) { - memory->instances()->Remove(instance); + memory->instances()->RemoveOne(MaybeObjectHandle::Weak(instance)); } } @@ -1060,14 +1065,17 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, } if (memory_object->has_instances()) { - Handle<FixedArrayOfWeakCells> instances(memory_object->instances(), - isolate); - for (int i = 0; i < instances->Length(); i++) { - Object* elem = instances->Get(i); - if (!elem->IsWasmInstanceObject()) continue; - Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem), - isolate); - SetInstanceMemory(isolate, instance, new_buffer); + Handle<WeakArrayList> instances(memory_object->instances(), isolate); + for (int i = 0; i < instances->length(); i++) { + MaybeObject* elem = instances->Get(i); + HeapObject* heap_object; + if (elem->ToWeakHeapObject(&heap_object)) { + Handle<WasmInstanceObject> instance( + WasmInstanceObject::cast(heap_object), isolate); + SetInstanceMemory(instance, new_buffer); + } else { + DCHECK(elem->IsClearedWeakHeapObject()); + } } } memory_object->set_array_buffer(*new_buffer); @@ -1194,14 +1202,25 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( return true; } -void WasmInstanceObject::SetRawMemory(byte* mem_start, uint32_t mem_size) { - DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize); - uint32_t mem_size64 = mem_size; - uint32_t mem_mask64 = base::bits::RoundUpToPowerOfTwo32(mem_size) - 1; - DCHECK_LE(mem_size, mem_mask64 + 1); +void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) { + CHECK_LE(mem_size, wasm::kV8MaxWasmMemoryBytes); +#if V8_HOST_ARCH_64_BIT + uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1; set_memory_start(mem_start); - set_memory_size(mem_size64); + set_memory_size(mem_size); set_memory_mask(mem_mask64); +#else + // Must handle memory > 2GiB specially. + CHECK_LE(mem_size, size_t{kMaxUInt32}); + uint32_t mem_mask32 = + (mem_size > 2 * size_t{GB}) + ? 0xFFFFFFFFu + : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(mem_size)) - + 1; + set_memory_start(mem_start); + set_memory_size(mem_size); + set_memory_mask(mem_mask32); +#endif } const WasmModule* WasmInstanceObject::module() { @@ -1264,10 +1283,8 @@ Handle<WasmInstanceObject> WasmInstanceObject::New( instance->set_module_object(*module_object); instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value()); instance->set_null_value(ReadOnlyRoots(isolate).null_value()); - instance->set_jump_table_adjusted_start( - module_object->native_module()->jump_table_start() - - wasm::JumpTableAssembler::kJumpTableSlotSize * - module->num_imported_functions); + instance->set_jump_table_start( + module_object->native_module()->jump_table_start()); // Insert the new instance into the modules weak list of instances. // TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below. @@ -1298,8 +1315,7 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) { // the next GC cycle, so we need to manually break some links (such as // the weak references from {WasmMemoryObject::instances}. if (instance->has_memory_object()) { - WasmMemoryObject::RemoveInstance(isolate, - handle(instance->memory_object(), isolate), + WasmMemoryObject::RemoveInstance(handle(instance->memory_object(), isolate), handle(instance, isolate)); } @@ -1353,11 +1369,21 @@ Handle<WasmExportedFunction> WasmExportedFunction::New( MaybeHandle<String> maybe_name, int func_index, int arity, Handle<Code> export_wrapper) { DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind()); + int num_imported_functions = instance->module()->num_imported_functions; + int jump_table_offset = -1; + if (func_index >= num_imported_functions) { + ptrdiff_t jump_table_diff = + instance->module_object()->native_module()->jump_table_offset( + func_index); + DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX); + jump_table_offset = static_cast<int>(jump_table_diff); + } Handle<WasmExportedFunctionData> function_data = Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct( WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED)); function_data->set_wrapper_code(*export_wrapper); function_data->set_instance(*instance); + function_data->set_jump_table_offset(jump_table_offset); function_data->set_function_index(func_index); Handle<String> name; if (!maybe_name.ToHandle(&name)) { diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h index ee884ec0ddb..a493f97e95b 100644 --- a/chromium/v8/src/wasm/wasm-objects.h +++ b/chromium/v8/src/wasm/wasm-objects.h @@ -30,11 +30,11 @@ class SignatureMap; class WireBytesRef; class WasmInterpreter; using FunctionSig = Signature<ValueType>; +struct WasmFeatures; } // namespace wasm class BreakPoint; class JSArrayBuffer; -class FixedArrayOfWeakCells; class SeqOneByteString; class WasmDebugInfo; class WasmInstanceObject; @@ -134,9 +134,10 @@ class WasmModuleObject : public JSObject { // Creates a new {WasmModuleObject} with a new {NativeModule} underneath. static Handle<WasmModuleObject> New( - Isolate* isolate, std::shared_ptr<const wasm::WasmModule> module, - wasm::ModuleEnv& env, OwnedVector<const uint8_t> wire_bytes, - Handle<Script> script, Handle<ByteArray> asm_js_offset_table); + Isolate* isolate, const wasm::WasmFeatures& enabled, + std::shared_ptr<const wasm::WasmModule> module, wasm::ModuleEnv& env, + OwnedVector<const uint8_t> wire_bytes, Handle<Script> script, + Handle<ByteArray> asm_js_offset_table); // Creates a new {WasmModuleObject} for an existing {NativeModule} that is // reference counted and might be shared between multiple Isolates. @@ -174,8 +175,8 @@ class WasmModuleObject : public JSObject { uint32_t func_index); // Get the function name of the function identified by the given index. - // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a - // valid UTF-8 string. + // Returns "wasm-function[func_index]" if the function is unnamed or the + // name is not a valid UTF-8 string. static Handle<String> GetFunctionName(Isolate*, Handle<WasmModuleObject>, uint32_t func_index); @@ -284,7 +285,7 @@ class WasmMemoryObject : public JSObject { DECL_ACCESSORS(array_buffer, JSArrayBuffer) DECL_INT_ACCESSORS(maximum_pages) - DECL_OPTIONAL_ACCESSORS(instances, FixedArrayOfWeakCells) + DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList) // Layout description. #define WASM_MEMORY_OBJECT_FIELDS(V) \ @@ -301,7 +302,7 @@ class WasmMemoryObject : public JSObject { static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory, Handle<WasmInstanceObject> object); // Remove an instance from the internal (weak) list. - static void RemoveInstance(Isolate* isolate, Handle<WasmMemoryObject> memory, + static void RemoveInstance(Handle<WasmMemoryObject> memory, Handle<WasmInstanceObject> object); uint32_t current_pages(); inline bool has_maximum_pages(); @@ -390,8 +391,8 @@ class WasmInstanceObject : public JSObject { DECL_ACCESSORS(null_value, Oddball) DECL_ACCESSORS(centry_stub, Code) DECL_PRIMITIVE_ACCESSORS(memory_start, byte*) - DECL_PRIMITIVE_ACCESSORS(memory_size, uint32_t) - DECL_PRIMITIVE_ACCESSORS(memory_mask, uint32_t) + DECL_PRIMITIVE_ACCESSORS(memory_size, size_t) + DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t) DECL_PRIMITIVE_ACCESSORS(roots_array_address, Address) DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address) DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address) @@ -401,7 +402,7 @@ class WasmInstanceObject : public JSObject { DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t) DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*) DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*) - DECL_PRIMITIVE_ACCESSORS(jump_table_adjusted_start, Address) + DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address) // Dispatched behavior. DECL_PRINTER(WasmInstanceObject) @@ -426,8 +427,8 @@ class WasmInstanceObject : public JSObject { V(kCEntryStubOffset, kPointerSize) \ V(kFirstUntaggedOffset, 0) /* marker */ \ V(kMemoryStartOffset, kPointerSize) /* untagged */ \ - V(kMemorySizeOffset, kUInt32Size) /* untagged */ \ - V(kMemoryMaskOffset, kUInt32Size) /* untagged */ \ + V(kMemorySizeOffset, kSizetSize) /* untagged */ \ + V(kMemoryMaskOffset, kSizetSize) /* untagged */ \ V(kRootsArrayAddressOffset, kPointerSize) /* untagged */ \ V(kStackLimitAddressOffset, kPointerSize) /* untagged */ \ V(kRealStackLimitAddressOffset, kPointerSize) /* untagged */ \ @@ -436,7 +437,7 @@ class WasmInstanceObject : public JSObject { V(kImportedMutableGlobalsOffset, kPointerSize) /* untagged */ \ V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \ V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \ - V(kJumpTableAdjustedStartOffset, kPointerSize) /* untagged */ \ + V(kJumpTableStartOffset, kPointerSize) /* untagged */ \ V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \ V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \ V(kSize, 0) @@ -452,7 +453,7 @@ class WasmInstanceObject : public JSObject { bool has_indirect_function_table(); - void SetRawMemory(byte* mem_start, uint32_t mem_size); + void SetRawMemory(byte* mem_start, size_t mem_size); // Get the debug info associated with the given wasm object. // If no debug info exists yet, it is created automatically. @@ -496,6 +497,7 @@ class WasmExportedFunctionData : public Struct { public: DECL_ACCESSORS(wrapper_code, Code); DECL_ACCESSORS(instance, WasmInstanceObject) + DECL_INT_ACCESSORS(jump_table_offset); DECL_INT_ACCESSORS(function_index); DECL_CAST(WasmExportedFunctionData) @@ -505,10 +507,11 @@ class WasmExportedFunctionData : public Struct { DECL_VERIFIER(WasmExportedFunctionData) // Layout description. -#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \ - V(kWrapperCodeOffset, kPointerSize) \ - V(kInstanceOffset, kPointerSize) \ - V(kFunctionIndexOffset, kPointerSize) /* Smi */ \ +#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \ + V(kWrapperCodeOffset, kPointerSize) \ + V(kInstanceOffset, kPointerSize) \ + V(kJumpTableOffsetOffset, kPointerSize) /* Smi */ \ + V(kFunctionIndexOffset, kPointerSize) /* Smi */ \ V(kSize, 0) DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, @@ -516,8 +519,11 @@ class WasmExportedFunctionData : public Struct { #undef WASM_EXPORTED_FUNCTION_DATA_FIELDS }; -class WasmDebugInfo : public Struct { +class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject { public: + using NeverReadOnlySpaceObject::GetHeap; + using NeverReadOnlySpaceObject::GetIsolate; + DECL_ACCESSORS(wasm_instance, WasmInstanceObject) DECL_ACCESSORS(interpreter_handle, Object); DECL_ACCESSORS(interpreted_functions, Object); @@ -571,8 +577,9 @@ class WasmDebugInfo : public Struct { // interpreter for unwinding and frame inspection. // Returns true if exited regularly, false if a trap occurred. In the latter // case, a pending exception will have been set on the isolate. - bool RunInterpreter(Address frame_pointer, int func_index, - Address arg_buffer); + static bool RunInterpreter(Isolate* isolate, Handle<WasmDebugInfo>, + Address frame_pointer, int func_index, + Address arg_buffer); // Get the stack of the wasm interpreter as pairs of <function index, byte // offset>. The list is ordered bottom-to-top, i.e. caller before callee. diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc index 22c906e2706..650cb629f62 100644 --- a/chromium/v8/src/wasm/wasm-opcodes.cc +++ b/chromium/v8/src/wasm/wasm-opcodes.cc @@ -374,7 +374,7 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) { bool IsJSCompatibleSignature(const FunctionSig* sig) { for (auto type : sig->all()) { - if (type == wasm::kWasmI64 || type == wasm::kWasmS128) return false; + if (type == kWasmI64 || type == kWasmS128) return false; } return sig->return_count() <= 1; } diff --git a/chromium/v8/src/wasm/wasm-result.cc b/chromium/v8/src/wasm/wasm-result.cc index 3fe63b9c71d..314f3207528 100644 --- a/chromium/v8/src/wasm/wasm-result.cc +++ b/chromium/v8/src/wasm/wasm-result.cc @@ -145,7 +145,7 @@ void ErrorThrower::Reset() { error_msg_.clear(); } -ErrorThrower::ErrorThrower(ErrorThrower&& other) +ErrorThrower::ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT : isolate_(other.isolate_), context_(other.context_), error_type_(other.error_type_), diff --git a/chromium/v8/src/wasm/wasm-result.h b/chromium/v8/src/wasm/wasm-result.h index a1e5a885af1..694a8b7f761 100644 --- a/chromium/v8/src/wasm/wasm-result.h +++ b/chromium/v8/src/wasm/wasm-result.h @@ -26,10 +26,10 @@ class V8_EXPORT_PRIVATE ResultBase { protected: ResultBase() = default; - ResultBase& operator=(ResultBase&& other) = default; + ResultBase& operator=(ResultBase&& other) V8_NOEXCEPT = default; public: - ResultBase(ResultBase&& other) + ResultBase(ResultBase&& other) V8_NOEXCEPT : error_offset_(other.error_offset_), error_msg_(std::move(other.error_msg_)) {} @@ -73,10 +73,10 @@ class Result : public ResultBase { explicit Result(S&& value) : val(std::forward<S>(value)) {} template <typename S> - Result(Result<S>&& other) - : ResultBase(std::move(other)), val(std::move(other.val)) {} + Result(Result<S>&& other) V8_NOEXCEPT : ResultBase(std::move(other)), + val(std::move(other.val)) {} - Result& operator=(Result&& other) = default; + Result& operator=(Result&& other) V8_NOEXCEPT = default; static Result<T> PRINTF_FORMAT(1, 2) Error(const char* format, ...) { va_list args; @@ -99,7 +99,7 @@ class V8_EXPORT_PRIVATE ErrorThrower { ErrorThrower(Isolate* isolate, const char* context) : isolate_(isolate), context_(context) {} // Explicitly allow move-construction. Disallow copy (below). - ErrorThrower(ErrorThrower&& other); + ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT; ~ErrorThrower(); PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...); @@ -123,6 +123,7 @@ class V8_EXPORT_PRIVATE ErrorThrower { bool error() const { return error_type_ != kNone; } bool wasm_error() { return error_type_ >= kFirstWasmError; } + const char* error_msg() { return error_msg_.c_str(); } Isolate* isolate() const { return isolate_; } diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc index 3c21a5d2237..2edc412afa8 100644 --- a/chromium/v8/src/wasm/wasm-serialization.cc +++ b/chromium/v8/src/wasm/wasm-serialization.cc @@ -146,12 +146,12 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) { #elif V8_TARGET_ARCH_ARM64 Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc()); if (instr->IsLdrLiteralX()) { - Memory::Address_at(rinfo->constant_pool_entry_address()) = + Memory<Address>(rinfo->constant_pool_entry_address()) = static_cast<Address>(tag); } else { DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); instr->SetBranchImmTarget( - reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstructionSize)); + reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstrSize)); } #else Address addr = static_cast<Address>(tag); @@ -172,10 +172,10 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) { Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc()); if (instr->IsLdrLiteralX()) { return static_cast<uint32_t>( - Memory::Address_at(rinfo->constant_pool_entry_address())); + Memory<Address>(rinfo->constant_pool_entry_address())); } else { DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); - return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize); + return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize); } #else Address addr; @@ -211,7 +211,8 @@ constexpr size_t kCodeHeaderSize = class V8_EXPORT_PRIVATE NativeModuleSerializer { public: NativeModuleSerializer() = delete; - NativeModuleSerializer(Isolate*, const NativeModule*); + NativeModuleSerializer(Isolate*, const NativeModule*, + Vector<WasmCode* const>); size_t Measure() const; bool Write(Writer* writer); @@ -223,6 +224,7 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer { Isolate* const isolate_; const NativeModule* const native_module_; + Vector<WasmCode* const> code_table_; bool write_called_; // Reverse lookup tables for embedded addresses. @@ -232,9 +234,13 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer { DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer); }; -NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate, - const NativeModule* module) - : isolate_(isolate), native_module_(module), write_called_(false) { +NativeModuleSerializer::NativeModuleSerializer( + Isolate* isolate, const NativeModule* module, + Vector<WasmCode* const> code_table) + : isolate_(isolate), + native_module_(module), + code_table_(code_table), + write_called_(false) { DCHECK_NOT_NULL(isolate_); DCHECK_NOT_NULL(native_module_); // TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist @@ -263,7 +269,7 @@ size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const { size_t NativeModuleSerializer::Measure() const { size_t size = kHeaderSize; - for (WasmCode* code : native_module_->code_table()) { + for (WasmCode* code : code_table_) { size += MeasureCode(code); } return size; @@ -370,26 +376,31 @@ bool NativeModuleSerializer::Write(Writer* writer) { WriteHeader(writer); - for (WasmCode* code : native_module_->code_table()) { + for (WasmCode* code : code_table_) { WriteCode(code, writer); } return true; } -size_t GetSerializedNativeModuleSize(Isolate* isolate, - NativeModule* native_module) { - NativeModuleSerializer serializer(isolate, native_module); +WasmSerializer::WasmSerializer(Isolate* isolate, NativeModule* native_module) + : isolate_(isolate), + native_module_(native_module), + code_table_(native_module->SnapshotCodeTable()) {} + +size_t WasmSerializer::GetSerializedNativeModuleSize() const { + Vector<WasmCode* const> code_table(code_table_.data(), code_table_.size()); + NativeModuleSerializer serializer(isolate_, native_module_, code_table); return kVersionSize + serializer.Measure(); } -bool SerializeNativeModule(Isolate* isolate, NativeModule* native_module, - Vector<byte> buffer) { - NativeModuleSerializer serializer(isolate, native_module); +bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const { + Vector<WasmCode* const> code_table(code_table_.data(), code_table_.size()); + NativeModuleSerializer serializer(isolate_, native_module_, code_table); size_t measured_size = kVersionSize + serializer.Measure(); if (buffer.size() < measured_size) return false; Writer writer(buffer); - WriteVersion(isolate, &writer); + WriteVersion(isolate_, &writer); if (!serializer.Write(&writer)) return false; DCHECK_EQ(measured_size, writer.bytes_written()); @@ -534,9 +545,11 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( if (!IsSupportedVersion(isolate, data)) { return {}; } - ModuleResult decode_result = - SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(), false, - i::wasm::kWasmOrigin); + // TODO(titzer): module features should be part of the serialization format. + WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate); + ModuleResult decode_result = DecodeWasmModule( + enabled_features, wire_bytes.start(), wire_bytes.end(), false, + i::wasm::kWasmOrigin, isolate->counters(), isolate->allocator()); if (!decode_result.ok()) return {}; CHECK_NOT_NULL(decode_result.val); WasmModule* module = decode_result.val.get(); @@ -546,14 +559,14 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( // handler was used or not when serializing. UseTrapHandler use_trap_handler = trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler; - wasm::ModuleEnv env(module, use_trap_handler, - wasm::RuntimeExceptionSupport::kRuntimeExceptionSupport); + ModuleEnv env(module, use_trap_handler, + RuntimeExceptionSupport::kRuntimeExceptionSupport); OwnedVector<uint8_t> wire_bytes_copy = OwnedVector<uint8_t>::Of(wire_bytes); Handle<WasmModuleObject> module_object = WasmModuleObject::New( - isolate, std::move(decode_result.val), env, std::move(wire_bytes_copy), - script, Handle<ByteArray>::null()); + isolate, enabled_features, std::move(decode_result.val), env, + std::move(wire_bytes_copy), script, Handle<ByteArray>::null()); NativeModule* native_module = module_object->native_module(); if (FLAG_wasm_lazy_compilation) { diff --git a/chromium/v8/src/wasm/wasm-serialization.h b/chromium/v8/src/wasm/wasm-serialization.h index 352195b2b0b..436a369fb6e 100644 --- a/chromium/v8/src/wasm/wasm-serialization.h +++ b/chromium/v8/src/wasm/wasm-serialization.h @@ -11,12 +11,27 @@ namespace v8 { namespace internal { namespace wasm { -size_t GetSerializedNativeModuleSize(Isolate* isolate, - NativeModule* native_module); - -bool SerializeNativeModule(Isolate* isolate, NativeModule* native_module, - Vector<byte> buffer); - +// Support to serialize WebAssembly {NativeModule} objects. This class intends +// to be thread-safe in that it takes a consistent snapshot of the module state +// at instantiation, allowing other threads to mutate the module concurrently. +class WasmSerializer { + public: + WasmSerializer(Isolate* isolate, NativeModule* native_module); + + // Measure the required buffer size needed for serialization. + size_t GetSerializedNativeModuleSize() const; + + // Serialize the {NativeModule} into the provided {buffer}. Returns true on + // success and false if the given buffer it too small for serialization. + bool SerializeNativeModule(Vector<byte> buffer) const; + + private: + Isolate* isolate_; + NativeModule* native_module_; + std::vector<WasmCode*> code_table_; +}; + +// Support to deserialize WebAssembly {NativeModule} objects. MaybeHandle<WasmModuleObject> DeserializeNativeModule( Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes); diff --git a/chromium/v8/src/wasm/wasm-text.cc b/chromium/v8/src/wasm/wasm-text.cc index 8efccae9ccd..9885f18ce19 100644 --- a/chromium/v8/src/wasm/wasm-text.cc +++ b/chromium/v8/src/wasm/wasm-text.cc @@ -100,7 +100,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes, case kExprIf: case kExprBlock: case kExprTry: { - BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc()); + BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i, + i.pc()); os << WasmOpcodes::OpcodeName(opcode); if (imm.type == kWasmVar) { os << " (type " << imm.sig_index << ")"; diff --git a/chromium/v8/src/wasm/wasm-tier.h b/chromium/v8/src/wasm/wasm-tier.h new file mode 100644 index 00000000000..6445608193f --- /dev/null +++ b/chromium/v8/src/wasm/wasm-tier.h @@ -0,0 +1,23 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_WASM_WASM_TIER_H_ +#define V8_WASM_WASM_TIER_H_ + +namespace v8 { +namespace internal { +namespace wasm { + +// All the tiers of WASM execution. +enum class ExecutionTier { + kInterpreter, // interpreter (used to provide debugging services). + kBaseline, // Liftoff. + kOptimized // TurboFan. +}; + +} // namespace wasm +} // namespace internal +} // namespace v8 + +#endif // V8_WASM_WASM_TIER_H_ diff --git a/chromium/v8/src/wasm/wasm-value.h b/chromium/v8/src/wasm/wasm-value.h index 602135135c1..c1538e85238 100644 --- a/chromium/v8/src/wasm/wasm-value.h +++ b/chromium/v8/src/wasm/wasm-value.h @@ -6,7 +6,7 @@ #define V8_WASM_WASM_VALUE_H_ #include "src/boxed-float.h" -#include "src/utils.h" +#include "src/v8memory.h" #include "src/wasm/wasm-opcodes.h" #include "src/zone/zone-containers.h" |