summaryrefslogtreecommitdiff
path: root/chromium/v8/src/wasm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-03 13:42:47 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:27:51 +0000
commit8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec (patch)
treed29d987c4d7b173cf853279b79a51598f104b403 /chromium/v8/src/wasm
parent830c9e163d31a9180fadca926b3e1d7dfffb5021 (diff)
downloadqtwebengine-chromium-8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec.tar.gz
BASELINE: Update Chromium to 66.0.3359.156
Change-Id: I0c9831ad39911a086b6377b16f995ad75a51e441 Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/v8/src/wasm')
-rw-r--r--chromium/v8/src/wasm/OWNERS2
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h182
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h182
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h514
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h61
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.cc348
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h165
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc807
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-register.h129
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h520
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h465
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h182
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h182
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h431
-rw-r--r--chromium/v8/src/wasm/compilation-manager.cc10
-rw-r--r--chromium/v8/src/wasm/compilation-manager.h11
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h90
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.cc23
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc607
-rw-r--r--chromium/v8/src/wasm/module-compiler.h32
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc13
-rw-r--r--chromium/v8/src/wasm/wasm-api.cc31
-rw-r--r--chromium/v8/src/wasm/wasm-api.h35
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc163
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h71
-rw-r--r--chromium/v8/src/wasm/wasm-code-specialization.cc70
-rw-r--r--chromium/v8/src/wasm/wasm-code-specialization.h17
-rw-r--r--chromium/v8/src/wasm/wasm-code-wrapper.cc17
-rw-r--r--chromium/v8/src/wasm/wasm-code-wrapper.h10
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h6
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc21
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc101
-rw-r--r--chromium/v8/src/wasm/wasm-engine.h48
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.h7
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.cc181
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.h7
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc180
-rw-r--r--chromium/v8/src/wasm/wasm-js.h16
-rw-r--r--chromium/v8/src/wasm/wasm-limits.h2
-rw-r--r--chromium/v8/src/wasm/wasm-memory.cc93
-rw-r--r--chromium/v8/src/wasm/wasm-memory.h6
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc8
-rw-r--r--chromium/v8/src/wasm/wasm-module.h8
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h6
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc467
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h59
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.cc20
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h43
-rw-r--r--chromium/v8/src/wasm/wasm-result.h6
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.cc87
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.h6
-rw-r--r--chromium/v8/src/wasm/wasm-text.cc3
-rw-r--r--chromium/v8/src/wasm/wasm-text.h6
-rw-r--r--chromium/v8/src/wasm/wasm-value.h6
54 files changed, 4500 insertions, 2263 deletions
diff --git a/chromium/v8/src/wasm/OWNERS b/chromium/v8/src/wasm/OWNERS
index e68fb0847d6..2b6cc5c057c 100644
--- a/chromium/v8/src/wasm/OWNERS
+++ b/chromium/v8/src/wasm/OWNERS
@@ -6,8 +6,6 @@ bradnelson@chromium.org
clemensh@chromium.org
gdeepti@chromium.org
eholk@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 7f7993d34fb..ef8893f005d 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#ifndef V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
+#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("arm " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 8d28c2b21c2..09bce6d4501 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#ifndef V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
+#define V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("arm64 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index a8b5b32bdcf..35943554cc2 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#ifndef V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
+#define V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -16,12 +16,20 @@ namespace wasm {
namespace liftoff {
+// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
+// is located at ebp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
inline Operand GetStackSlot(uint32_t index) {
- // ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
- // is located at ebp-24.
- constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(
- ebp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return Operand(ebp, -kFirstStackSlotOffset - offset);
+}
+
+inline Operand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return Operand(ebp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
@@ -41,26 +49,45 @@ static constexpr Register kCCallLastArgAddrReg = eax;
static constexpr DoubleRegister kScratchDoubleReg = xmm7;
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ sub_sp_32(0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
- sub(esp, Immediate(bytes));
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.sub_sp_32(bytes);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
- if (value.to_i32() == 0) {
- xor_(reg.gp(), reg.gp());
- } else {
- mov(reg.gp(), Immediate(value.to_i32()));
- }
+ TurboAssembler::Move(
+ reg.gp(),
+ Immediate(reinterpret_cast<Address>(value.to_i32()), rmode));
break;
- case kWasmF32: {
- Register tmp = GetUnusedRegister(kGpReg).gp();
- mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
- movd(reg.fp(), tmp);
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
+ TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
default:
UNREACHABLE();
}
@@ -86,20 +113,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
+ DCHECK_EQ(type.value_type() == kWasmI64, dst.is_pair());
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm)
: Operand(src_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
- mov(src, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(src, src, offset_reg);
- }
- src_op = Operand(src_addr, src, times_1, 0);
- }
if (protected_load_pc) *protected_load_pc = pc_offset();
+
switch (type.value()) {
case LoadType::kI32Load8U:
movzx_b(dst.gp(), src_op);
@@ -107,18 +130,61 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kI32Load8S:
movsx_b(dst.gp(), src_op);
break;
+ case LoadType::kI64Load8U:
+ movzx_b(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load8S:
+ movsx_b(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
case LoadType::kI32Load16U:
movzx_w(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
movsx_w(dst.gp(), src_op);
break;
+ case LoadType::kI64Load16U:
+ movzx_w(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load16S:
+ movsx_w(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
case LoadType::kI32Load:
mov(dst.gp(), src_op);
break;
+ case LoadType::kI64Load32U:
+ mov(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load32S:
+ mov(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
+ case LoadType::kI64Load: {
+ // Compute the operand for the load of the upper half.
+ DCHECK(is_uint31(offset_imm + 4));
+ Operand upper_src_op =
+ offset_reg == no_reg
+ ? Operand(src_addr, offset_imm + 4)
+ : Operand(src_addr, offset_reg, times_1, offset_imm + 4);
+ // The high word has to be mov'ed first, such that this is the protected
+ // instruction. The mov of the low word cannot segfault.
+ mov(dst.high_gp(), upper_src_op);
+ mov(dst.low_gp(), src_op);
+ break;
+ }
case LoadType::kF32Load:
movss(dst.fp(), src_op);
break;
+ case LoadType::kF64Load:
+ movsd(dst.fp(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -128,21 +194,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
+ DCHECK_EQ(type.value_type() == kWasmI64, src.is_pair());
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
: Operand(dst_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
- mov(dst, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(dst, dst, offset_reg);
- }
- dst_op = Operand(dst_addr, dst, times_1, 0);
- }
if (protected_store_pc) *protected_store_pc = pc_offset();
+
switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store8:
// Only the lower 4 registers can be addressed as 8-bit registers.
if (src.gp().is_byte_register()) {
@@ -153,80 +218,139 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
mov_b(dst_op, byte_src);
}
break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store16:
mov_w(dst_op, src.gp());
break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store:
mov(dst_op, src.gp());
break;
+ case StoreType::kI64Store: {
+ // Compute the operand for the store of the upper half.
+ DCHECK(is_uint31(offset_imm + 4));
+ Operand upper_dst_op =
+ offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm + 4)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
+ // The high word has to be mov'ed first, such that this is the protected
+ // instruction. The mov of the low word cannot segfault.
+ mov(upper_dst_op, src.high_gp());
+ mov(dst_op, src.low_gp());
+ break;
+ }
case StoreType::kF32Store:
movss(dst_op, src.fp());
break;
+ case StoreType::kF64Store:
+ movsd(dst_op, src.fp());
+ break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
+ uint32_t caller_slot_idx,
+ ValueType type) {
Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
- if (dst.is_gp()) {
- mov(dst.gp(), src);
- } else {
- movss(dst.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ mov(dst.gp(), src);
+ break;
+ case kWasmF32:
+ movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index);
- Spill(dst_index, reg);
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
} else {
push(liftoff::GetStackSlot(src_index));
pop(liftoff::GetStackSlot(dst_index));
}
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
- reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
- if (reg != dst) Move(dst, reg);
+ reg.is_pair()
+ ? LiftoffRegister::ForPair(LiftoffRegister(eax), LiftoffRegister(edx))
+ : reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- // The caller should check that the registers are not equal. For most
- // occurences, this is already guaranteed, so no need to check within this
- // method.
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- mov(dst.gp(), src.gp());
+ DCHECK_EQ(kWasmI32, type);
+ mov(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ movss(dst, src);
} else {
- movsd(dst.fp(), src.fp());
+ DCHECK_EQ(kWasmF64, type);
+ movsd(dst, src);
}
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- mov(dst, reg.gp());
- } else {
- movsd(dst, reg.fp());
+ switch (type) {
+ case kWasmI32:
+ mov(dst, reg.gp());
+ break;
+ case kWasmI64:
+ mov(dst, reg.low_gp());
+ mov(liftoff::GetHalfStackSlot(2 * index + 1), reg.high_gp());
+ break;
+ case kWasmF32:
+ movss(dst, reg.fp());
+ break;
+ case kWasmF64:
+ movsd(dst, reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
mov(dst, Immediate(value.to_i32()));
break;
+ case kWasmI64: {
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ mov(dst, Immediate(low_word));
+ mov(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ break;
+ }
case kWasmF32:
mov(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
@@ -235,16 +359,32 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
Operand src = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- mov(reg.gp(), src);
- } else {
- movsd(reg.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ mov(reg.gp(), src);
+ break;
+ case kWasmI64:
+ mov(reg.low_gp(), src);
+ mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ movss(reg.fp(), src);
+ break;
+ case kWasmF64:
+ movsd(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
+ mov(reg, liftoff::GetHalfStackSlot(half_index));
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
lea(dst, Operand(lhs, rhs, times_1, 0));
@@ -286,8 +426,11 @@ COMMUTATIVE_I32_BINOP(xor, xor_)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
- void (Assembler::*emit_shift)(Register)) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, lhs, rhs);
+ void (Assembler::*emit_shift)(Register),
+ LiftoffRegList pinned) {
+ pinned.set(dst);
+ pinned.set(lhs);
+ pinned.set(rhs);
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
@@ -302,7 +445,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// first. If lhs is ecx, lhs is now the tmp register.
Register tmp_reg = no_reg;
if (rhs != ecx) {
- if (lhs == ecx || assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ if (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
+ pinned.has(LiftoffRegister(ecx))) {
tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->mov(tmp_reg, ecx);
if (lhs == ecx) lhs = tmp_reg;
@@ -319,30 +463,19 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl);
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl);
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl);
-}
-
-bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- Register tmp_byte_reg = dst;
- // Only the lower 4 registers can be addressed as 8-bit registers.
- if (!dst.is_byte_register()) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
- tmp_byte_reg = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
- }
-
- test(src, src);
- setcc(zero, tmp_byte_reg);
- movzx_b(dst, tmp_byte_reg);
- return true;
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -432,22 +565,141 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
-void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorps(dst, src);
+ }
+}
+
+void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ addsd(dst, rhs);
+ }
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- cmp(lhs, rhs);
+void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ subsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ subsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ mulsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorpd(dst, src);
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ switch (type) {
+ case kWasmI32:
+ cmp(lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, kWasmI32);
+ test(lhs, lhs);
+ }
+
j(cond, label);
}
+namespace liftoff {
+inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
+ Register tmp_byte_reg = dst;
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (!dst.is_byte_register()) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ // {GetUnusedRegister()} may insert move instructions to spill registers to
+ // the stack. This is OK because {mov} does not change the status flags.
+ tmp_byte_reg = assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ }
+
+ assm->setcc(cond, tmp_byte_reg);
+ assm->movzx_b(dst, tmp_byte_reg);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ if (rhs != no_reg) {
+ cmp(lhs, rhs);
+ } else {
+ test(lhs, lhs);
+ }
+ liftoff::setcc_32(this, cond, dst);
+}
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label cont;
+ Label not_nan;
+
+ ucomiss(lhs, rhs);
+ // IF PF is one, one of the operands was Nan. This needs special handling.
+ j(parity_odd, &not_nan, Label::kNear);
+ // Return 1 for f32.ne, 0 for all other cases.
+ if (cond == not_equal) {
+ mov(dst, Immediate(1));
+ } else {
+ xor_(dst, dst);
+ }
+ jmp(&cont, Label::kNear);
+ bind(&not_nan);
+
+ liftoff::setcc_32(this, cond, dst);
+ bind(&cont);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) {
- Register limit = GetUnusedRegister(kGpReg).gp();
- mov(limit, Immediate(ExternalReference::address_of_stack_limit(isolate())));
- cmp(esp, Operand(limit, 0));
+ cmp(esp,
+ Operand(Immediate(ExternalReference::address_of_stack_limit(isolate()))));
j(below_equal, ool_code);
}
@@ -462,27 +714,50 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
+ uint32_t src_index,
+ RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack:
- DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
- push(liftoff::GetStackSlot(src_index));
+ if (src.type() == kWasmF64) {
+ DCHECK_EQ(kLowWord, half);
+ push(liftoff::GetHalfStackSlot(2 * src_index - 1));
+ }
+ push(liftoff::GetHalfStackSlot(2 * src_index +
+ (half == kLowWord ? 0 : 1)));
break;
case VarState::kRegister:
- PushCallerFrameSlot(src.reg());
+ if (src.type() == kWasmI64) {
+ PushCallerFrameSlot(
+ half == kLowWord ? src.reg().low() : src.reg().high(), kWasmI32);
+ } else {
+ PushCallerFrameSlot(src.reg(), src.type());
+ }
break;
- case VarState::kI32Const:
- push(Immediate(src.i32_const()));
+ case VarState::KIntConst:
+ // The high word is the sign extension of the low word.
+ push(Immediate(half == kLowWord ? src.i32_const()
+ : src.i32_const() >> 31));
break;
}
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- if (reg.is_gp()) {
- push(reg.gp());
- } else {
- sub(esp, Immediate(kPointerSize));
- movss(Operand(esp, 0), reg.fp());
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ push(reg.gp());
+ break;
+ case kWasmF32:
+ sub(esp, Immediate(sizeof(float)));
+ movss(Operand(esp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ sub(esp, Immediate(sizeof(double)));
+ movsd(Operand(esp, 0), reg.fp());
+ break;
+ default:
+ // Also kWasmI64 is unreachable, as it will always be pushed as two halfs.
+ UNREACHABLE();
}
}
@@ -571,6 +846,17 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ add(esp, Immediate(kPointerSize));
+ call(Operand(esp, -4));
+ } else {
+ call(target);
+ }
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
sub(esp, Immediate(size));
mov(addr, esp);
@@ -584,4 +870,4 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#endif // V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 3eef1e1960f..26f59c68be3 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -11,6 +11,10 @@
#include "src/ia32/assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64.h"
#endif
namespace v8 {
@@ -19,8 +23,6 @@ namespace wasm {
#if V8_TARGET_ARCH_IA32
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
@@ -30,17 +32,31 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
#elif V8_TARGET_ARCH_X64
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
-#else
+#elif V8_TARGET_ARCH_MIPS
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7, v0, v1>();
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
+ f22, f24>();
+
+#elif V8_TARGET_ARCH_MIPS64
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7, v0, v1>();
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
+ f22, f24, f26>();
+
+#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
@@ -49,12 +65,45 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+
constexpr Condition kEqual = equal;
+constexpr Condition kUnequal = not_equal;
+constexpr Condition kSignedLessThan = less;
+constexpr Condition kSignedLessEqual = less_equal;
+constexpr Condition kSignedGreaterThan = greater;
+constexpr Condition kSignedGreaterEqual = greater_equal;
+constexpr Condition kUnsignedLessThan = below;
+constexpr Condition kUnsignedLessEqual = below_equal;
+constexpr Condition kUnsignedGreaterThan = above;
constexpr Condition kUnsignedGreaterEqual = above_equal;
+
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+
+constexpr Condition kEqual = eq;
+constexpr Condition kUnequal = ne;
+constexpr Condition kSignedLessThan = lt;
+constexpr Condition kSignedLessEqual = le;
+constexpr Condition kSignedGreaterThan = gt;
+constexpr Condition kSignedGreaterEqual = ge;
+constexpr Condition kUnsignedLessThan = ult;
+constexpr Condition kUnsignedLessEqual = ule;
+constexpr Condition kUnsignedGreaterThan = ugt;
+constexpr Condition kUnsignedGreaterEqual = uge;
+
#else
+
// On unimplemented platforms, just make this compile.
constexpr Condition kEqual = static_cast<Condition>(0);
+constexpr Condition kUnequal = static_cast<Condition>(0);
+constexpr Condition kSignedLessThan = static_cast<Condition>(0);
+constexpr Condition kSignedLessEqual = static_cast<Condition>(0);
+constexpr Condition kSignedGreaterThan = static_cast<Condition>(0);
+constexpr Condition kSignedGreaterEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedLessThan = static_cast<Condition>(0);
+constexpr Condition kUnsignedLessEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedGreaterThan = static_cast<Condition>(0);
constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
+
#endif
} // namespace wasm
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
index 121cfeea6ab..09b8229dc14 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -31,20 +31,45 @@ class StackTransferRecipe {
struct RegisterMove {
LiftoffRegister dst;
LiftoffRegister src;
- constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src)
- : dst(dst), src(src) {}
+ ValueType type;
+ constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
+ ValueType type)
+ : dst(dst), src(src), type(type) {}
};
struct RegisterLoad {
- LiftoffRegister dst;
- bool is_constant_load; // otherwise load it from the stack.
- union {
- uint32_t stack_slot;
- WasmValue constant;
+ enum LoadKind : uint8_t {
+ kConstant, // load a constant value into a register.
+ kStack, // fill a register from a stack slot.
+ kHalfStack // fill one half of a register pair from half a stack slot.
};
- RegisterLoad(LiftoffRegister dst, WasmValue constant)
- : dst(dst), is_constant_load(true), constant(constant) {}
- RegisterLoad(LiftoffRegister dst, uint32_t stack_slot)
- : dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
+
+ LiftoffRegister dst;
+ LoadKind kind;
+ ValueType type;
+ int32_t value; // i32 constant value or stack index, depending on kind.
+
+ // Named constructors.
+ static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
+ if (constant.type() == kWasmI32) {
+ return {dst, kConstant, kWasmI32, constant.to_i32()};
+ }
+ DCHECK_EQ(kWasmI64, constant.type());
+ DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
+ return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
+ }
+ static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
+ ValueType type) {
+ return {dst, kStack, type, stack_index};
+ }
+ static RegisterLoad HalfStack(LiftoffRegister dst,
+ int32_t half_stack_index) {
+ return {dst, kHalfStack, kWasmI32, half_stack_index};
+ }
+
+ private:
+ RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
+ int32_t value)
+ : dst(dst), kind(kind), type(type), value(value) {}
};
public:
@@ -55,15 +80,17 @@ class StackTransferRecipe {
// First, execute register moves. Then load constants and stack values into
// registers.
- if ((move_dst_regs & move_src_regs).is_empty()) {
+ if ((move_dst_regs_ & move_src_regs_).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
- for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
- register_moves.clear();
+ for (RegisterMove& rm : register_moves_) {
+ asm_->Move(rm.dst, rm.src, rm.type);
+ }
+ register_moves_.clear();
} else {
// Keep use counters of src registers.
uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
- for (RegisterMove& rm : register_moves) {
+ for (RegisterMove& rm : register_moves_) {
++src_reg_use_count[rm.src.liftoff_code()];
}
// Now repeatedly iterate the list of register moves, and execute those
@@ -73,11 +100,11 @@ class StackTransferRecipe {
// register to the stack, add a RegisterLoad to reload it later, and
// continue.
uint32_t next_spill_slot = asm_->cache_state()->stack_height();
- while (!register_moves.empty()) {
+ while (!register_moves_.empty()) {
int executed_moves = 0;
- for (auto& rm : register_moves) {
+ for (auto& rm : register_moves_) {
if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
- asm_->Move(rm.dst, rm.src);
+ asm_->Move(rm.dst, rm.src, rm.type);
++executed_moves;
DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
--src_reg_use_count[rm.src.liftoff_code()];
@@ -89,53 +116,64 @@ class StackTransferRecipe {
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
// TODO(clemensh): Use an unused register if available.
- LiftoffRegister spill_reg = register_moves.back().src;
- asm_->Spill(next_spill_slot, spill_reg);
+ RegisterMove& rm = register_moves_.back();
+ LiftoffRegister spill_reg = rm.src;
+ asm_->Spill(next_spill_slot, spill_reg, rm.type);
// Remember to reload into the destination register later.
- LoadStackSlot(register_moves.back().dst, next_spill_slot);
+ LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
src_reg_use_count[spill_reg.liftoff_code()] = 0;
++next_spill_slot;
executed_moves = 1;
}
- register_moves.erase(register_moves.end() - executed_moves,
- register_moves.end());
+ register_moves_.erase(register_moves_.end() - executed_moves,
+ register_moves_.end());
}
}
- for (RegisterLoad& rl : register_loads) {
- if (rl.is_constant_load) {
- asm_->LoadConstant(rl.dst, rl.constant);
- } else {
- asm_->Fill(rl.dst, rl.stack_slot);
+ for (RegisterLoad& rl : register_loads_) {
+ switch (rl.kind) {
+ case RegisterLoad::kConstant:
+ asm_->LoadConstant(rl.dst, rl.type == kWasmI64
+ ? WasmValue(int64_t{rl.value})
+ : WasmValue(int32_t{rl.value}));
+ break;
+ case RegisterLoad::kStack:
+ asm_->Fill(rl.dst, rl.value, rl.type);
+ break;
+ case RegisterLoad::kHalfStack:
+ // As half of a register pair, {rl.dst} must be a gp register.
+ asm_->FillI64Half(rl.dst.gp(), rl.value);
+ break;
}
}
- register_loads.clear();
+ register_loads_.clear();
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
uint32_t dst_index, uint32_t src_index) {
const VarState& dst = dst_state.stack_state[dst_index];
const VarState& src = __ cache_state()->stack_state[src_index];
+ DCHECK_EQ(dst.type(), src.type());
switch (dst.loc()) {
case VarState::kStack:
switch (src.loc()) {
case VarState::kStack:
if (src_index == dst_index) break;
- asm_->MoveStackValue(dst_index, src_index);
+ asm_->MoveStackValue(dst_index, src_index, src.type());
break;
case VarState::kRegister:
- asm_->Spill(dst_index, src.reg());
+ asm_->Spill(dst_index, src.reg(), src.type());
break;
- case VarState::kI32Const:
- asm_->Spill(dst_index, WasmValue(src.i32_const()));
+ case VarState::KIntConst:
+ asm_->Spill(dst_index, src.constant());
break;
}
break;
case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, src_index);
break;
- case VarState::kI32Const:
+ case VarState::KIntConst:
DCHECK_EQ(dst, src);
break;
}
@@ -146,40 +184,80 @@ class StackTransferRecipe {
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
- LoadStackSlot(dst, src_index);
+ LoadStackSlot(dst, src_index, src.type());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
- if (dst != src.reg()) MoveRegister(dst, src.reg());
+ if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
+ break;
+ case VarState::KIntConst:
+ LoadConstant(dst, src.constant());
break;
- case VarState::kI32Const:
- LoadConstant(dst, WasmValue(src.i32_const()));
+ }
+ }
+
+ void LoadI64HalfIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t index, RegPairHalf half) {
+ // Use CHECK such that the remaining code is statically dead if
+ // {kNeedI64RegPair} is false.
+ CHECK(kNeedI64RegPair);
+ DCHECK_EQ(kWasmI64, src.type());
+ switch (src.loc()) {
+ case VarState::kStack:
+ LoadI64HalfStackSlot(dst, 2 * index + (half == kLowWord ? 0 : 1));
+ break;
+ case VarState::kRegister: {
+ LiftoffRegister src_half =
+ half == kLowWord ? src.reg().low() : src.reg().high();
+ if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
+ break;
+ }
+ case VarState::KIntConst:
+ int32_t value = src.i32_const();
+ // The high word is the sign extension of the low word.
+ if (half == kHighWord) value = value >> 31;
+ LoadConstant(dst, WasmValue(value));
break;
}
}
- void MoveRegister(LiftoffRegister dst, LiftoffRegister src) {
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK(!move_dst_regs.has(dst));
- move_dst_regs.set(dst);
- move_src_regs.set(src);
- register_moves.emplace_back(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ DCHECK_EQ(reg_class_for(type), src.reg_class());
+ if (src.is_pair()) {
+ DCHECK_EQ(kWasmI64, type);
+ if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
+ if (dst.high() != src.high())
+ MoveRegister(dst.high(), src.high(), kWasmI32);
+ return;
+ }
+ DCHECK(!move_dst_regs_.has(dst));
+ move_dst_regs_.set(dst);
+ move_src_regs_.set(src);
+ register_moves_.emplace_back(dst, src, type);
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
- register_loads.emplace_back(dst, value);
+ register_loads_.push_back(RegisterLoad::Const(dst, value));
+ }
+
+ void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
+ ValueType type) {
+ register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
}
- void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
- register_loads.emplace_back(dst, stack_index);
+ void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
+ register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
- std::vector<RegisterMove> register_moves;
- std::vector<RegisterLoad> register_loads;
- LiftoffRegList move_dst_regs;
- LiftoffRegList move_src_regs;
+ std::vector<RegisterMove> register_moves_;
+ std::vector<RegisterLoad> register_loads_;
+ LiftoffRegList move_dst_regs_;
+ LiftoffRegList move_src_regs_;
LiftoffAssembler* const asm_;
};
@@ -301,16 +379,16 @@ LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
switch (slot.loc()) {
case VarState::kStack: {
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- Fill(reg, cache_state_.stack_height());
+ Fill(reg, cache_state_.stack_height(), slot.type());
return reg;
}
case VarState::kRegister:
DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
- case VarState::kI32Const: {
+ case VarState::KIntConst: {
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- LoadConstant(reg, WasmValue(slot.i32_const()));
+ LoadConstant(reg, slot.constant());
return reg;
}
}
@@ -335,6 +413,8 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
// ^target_stack_base
uint32_t stack_height = cache_state_.stack_height();
uint32_t target_stack_height = target.stack_height();
+ DCHECK_LE(target_stack_height, stack_height);
+ DCHECK_LE(arity, target_stack_height);
uint32_t stack_base = stack_height - arity;
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
@@ -352,11 +432,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack:
return;
case VarState::kRegister:
- Spill(index, slot.reg());
+ Spill(index, slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
break;
- case VarState::kI32Const:
- Spill(index, WasmValue(slot.i32_const()));
+ case VarState::KIntConst:
+ Spill(index, slot.constant());
break;
}
slot.MakeStack();
@@ -372,19 +452,17 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
- Spill(i, slot.reg());
+ Spill(i, slot.reg(), slot.type());
slot.MakeStack();
}
cache_state_.reset_used_registers();
}
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
- compiler::CallDescriptor* call_desc) {
+ compiler::CallDescriptor* call_descriptor,
+ Register* target,
+ LiftoffRegister* explicit_context) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
- // Parameter 0 is the wasm context.
- constexpr size_t kFirstActualParameter = 1;
- DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
-
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
@@ -394,66 +472,134 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue;
- Spill(idx, slot.reg());
+ Spill(idx, slot.reg(), slot.type());
slot.MakeStack();
}
StackTransferRecipe stack_transfers(this);
+ LiftoffRegList param_regs;
+
+ // Move the explicit context (if any) into the correct context register.
+ compiler::LinkageLocation context_loc =
+ call_descriptor->GetInputLocation(kInputShift);
+ DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
+ LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister()));
+ param_regs.set(context_reg);
+ if (explicit_context && *explicit_context != context_reg) {
+ stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr);
+ }
// Now move all parameter values into the right slot for the call.
- // Process parameters backward, such that we can just pop values from the
- // stack.
+ // Don't pop values yet, such that the stack height is still correct when
+ // executing the {stack_transfers}.
+ // Process parameters backwards, such that pushes of caller frame slots are
+ // in the correct order.
+ uint32_t param_base = cache_state_.stack_height() - num_params;
+ uint32_t call_desc_input_idx =
+ static_cast<uint32_t>(call_descriptor->InputCount());
for (uint32_t i = num_params; i > 0; --i) {
- uint32_t param = i - 1;
+ const uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
- RegClass rc = reg_class_for(type);
- compiler::LinkageLocation loc = call_desc->GetInputLocation(
- param + kFirstActualParameter + kInputShift);
- const VarState& slot = cache_state_.stack_state.back();
- uint32_t stack_idx = cache_state_.stack_height() - 1;
- if (loc.IsRegister()) {
- DCHECK(!loc.IsAnyRegister());
- int reg_code = loc.AsRegister();
- LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
- stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ const bool is_pair = kNeedI64RegPair && type == kWasmI64;
+ const int num_lowered_params = is_pair ? 2 : 1;
+ const uint32_t stack_idx = param_base + param;
+ const VarState& slot = cache_state_.stack_state[stack_idx];
+ // Process both halfs of register pair separately, because they are passed
+ // as separate parameters. One or both of them could end up on the stack.
+ for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
+ const RegPairHalf half =
+ is_pair && lowered_idx == 0 ? kHighWord : kLowWord;
+ --call_desc_input_idx;
+ compiler::LinkageLocation loc =
+ call_descriptor->GetInputLocation(call_desc_input_idx);
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ RegClass rc = is_pair ? kGpReg : reg_class_for(type);
+ LiftoffRegister reg = LiftoffRegister::from_code(rc, loc.AsRegister());
+ param_regs.set(reg);
+ if (is_pair) {
+ stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
+ } else {
+ stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ }
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ PushCallerFrameSlot(slot, stack_idx, half);
+ }
+ }
+ }
+ // {call_desc_input_idx} should point after the context parameter now.
+ DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
+
+ // If the target register overlaps with a parameter register, then move the
+ // target to another free register, or spill to the stack.
+ if (target && param_regs.has(LiftoffRegister(*target))) {
+ // Try to find another free register.
+ LiftoffRegList free_regs = kGpCacheRegList.MaskOut(param_regs);
+ if (!free_regs.is_empty()) {
+ LiftoffRegister new_target = free_regs.GetFirstRegSet();
+ stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
+ kWasmIntPtr);
+ *target = new_target.gp();
} else {
- DCHECK(loc.IsCallerFrameSlot());
- PushCallerFrameSlot(slot, stack_idx);
+ PushCallerFrameSlot(LiftoffRegister(*target), kWasmIntPtr);
+ *target = no_reg;
}
- cache_state_.stack_state.pop_back();
}
// Execute the stack transfers before filling the context register.
stack_transfers.Execute();
+ // Pop parameters from the value stack.
+ auto stack_end = cache_state_.stack_state.end();
+ cache_state_.stack_state.erase(stack_end - num_params, stack_end);
+
// Reset register use counters.
cache_state_.reset_used_registers();
- // Fill the wasm context into the right register.
- compiler::LinkageLocation context_loc =
- call_desc->GetInputLocation(kInputShift);
- DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
- int context_reg_code = context_loc.AsRegister();
- LiftoffRegister context_reg(Register::from_code(context_reg_code));
- FillContextInto(context_reg.gp());
+ // Reload the context from the stack.
+ if (!explicit_context) {
+ FillContextInto(context_reg.gp());
+ }
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
- compiler::CallDescriptor* call_desc) {
- size_t return_count = call_desc->ReturnCount();
- DCHECK_EQ(return_count, sig->return_count());
+ compiler::CallDescriptor* call_descriptor) {
+ const size_t return_count = sig->return_count();
if (return_count != 0) {
DCHECK_EQ(1, return_count);
- compiler::LinkageLocation return_loc = call_desc->GetReturnLocation(0);
- int return_reg_code = return_loc.AsRegister();
ValueType return_type = sig->GetReturn(0);
- LiftoffRegister return_reg =
- LiftoffRegister::from_code(reg_class_for(return_type), return_reg_code);
+ const bool need_pair = kNeedI64RegPair && return_type == kWasmI64;
+ DCHECK_EQ(need_pair ? 2 : 1, call_descriptor->ReturnCount());
+ RegClass rc = need_pair ? kGpReg : reg_class_for(return_type);
+ LiftoffRegister return_reg = LiftoffRegister::from_code(
+ rc, call_descriptor->GetReturnLocation(0).AsRegister());
+ DCHECK(GetCacheRegList(rc).has(return_reg));
+ if (need_pair) {
+ LiftoffRegister high_reg = LiftoffRegister::from_code(
+ rc, call_descriptor->GetReturnLocation(1).AsRegister());
+ DCHECK(GetCacheRegList(rc).has(high_reg));
+ return_reg = LiftoffRegister::ForPair(return_reg, high_reg);
+ }
DCHECK(!cache_state_.is_used(return_reg));
PushRegister(return_type, return_reg);
}
}
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
+ ValueType type) {
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ if (kNeedI64RegPair && dst.is_pair()) {
+ // Use the {StackTransferRecipe} to move pairs, as the registers in the
+ // pairs might overlap.
+ StackTransferRecipe(this).MoveRegister(dst, src, type);
+ } else if (dst.is_gp()) {
+ Move(dst.gp(), src.gp(), type);
+ } else {
+ Move(dst.fp(), src.fp(), type);
+ }
+}
+
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
@@ -468,8 +614,14 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto* slot = &cache_state_.stack_state[idx];
- if (!slot->is_reg() || slot->reg() != reg) continue;
- Spill(idx, reg);
+ if (!slot->is_reg() || !slot->reg().overlaps(reg)) continue;
+ if (slot->reg().is_pair()) {
+ // Make sure to decrement *both* registers in a pair, because the
+ // {clear_used} call below only clears one of them.
+ cache_state_.dec_used(slot->reg().low());
+ cache_state_.dec_used(slot->reg().high());
+ }
+ Spill(idx, slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
@@ -486,10 +638,6 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
}
}
-uint32_t LiftoffAssembler::GetTotalFrameSlotCount() const {
- return num_locals() + kMaxValueStackHeight;
-}
-
std::ostream& operator<<(std::ostream& os, VarState slot) {
os << WasmOpcodes::TypeName(slot.type()) << ":";
switch (slot.loc()) {
@@ -497,7 +645,7 @@ std::ostream& operator<<(std::ostream& os, VarState slot) {
return os << "s";
case VarState::kRegister:
return os << slot.reg();
- case VarState::kI32Const:
+ case VarState::KIntConst:
return os << "c" << slot.i32_const();
}
UNREACHABLE();
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index b91f6d7c880..99d9814dea3 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -8,8 +8,6 @@
#include <iosfwd>
#include <memory>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
@@ -29,35 +27,35 @@ struct ModuleEnv;
class LiftoffAssembler : public TurboAssembler {
public:
- // TODO(clemensh): Remove this limitation by allocating more stack space if
- // needed.
- static constexpr int kMaxValueStackHeight = 8;
-
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr uint32_t kStackSlotSize = 8;
+ static constexpr ValueType kWasmIntPtr =
+ kPointerSize == 8 ? kWasmI64 : kWasmI32;
+
class VarState {
public:
- enum Location : uint8_t { kStack, kRegister, kI32Const };
+ enum Location : uint8_t { kStack, kRegister, KIntConst };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
explicit VarState(ValueType type, LiftoffRegister r)
: loc_(kRegister), type_(type), reg_(r) {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
}
- explicit VarState(ValueType type, uint32_t i32_const)
- : loc_(kI32Const), type_(type), i32_const_(i32_const) {
+ explicit VarState(ValueType type, int32_t i32_const)
+ : loc_(KIntConst), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
bool operator==(const VarState& other) const {
if (loc_ != other.loc_) return false;
+ if (type_ != other.type_) return false;
switch (loc_) {
case kStack:
return true;
case kRegister:
return reg_ == other.reg_;
- case kI32Const:
+ case KIntConst:
return i32_const_ == other.i32_const_;
}
UNREACHABLE();
@@ -67,16 +65,23 @@ class LiftoffAssembler : public TurboAssembler {
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
- bool is_const() const { return loc_ == kI32Const; }
+ bool is_const() const { return loc_ == KIntConst; }
ValueType type() const { return type_; }
Location loc() const { return loc_; }
- uint32_t i32_const() const {
- DCHECK_EQ(loc_, kI32Const);
+ int32_t i32_const() const {
+ DCHECK_EQ(loc_, KIntConst);
return i32_const_;
}
+ WasmValue constant() const {
+ DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK_EQ(loc_, KIntConst);
+ return type_ == kWasmI32 ? WasmValue(i32_const_)
+ : WasmValue(int64_t{i32_const_});
+ }
+
Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); }
LiftoffRegister reg() const {
@@ -95,7 +100,7 @@ class LiftoffAssembler : public TurboAssembler {
union {
LiftoffRegister reg_; // used if loc_ == kRegister
- uint32_t i32_const_; // used if loc_ == kI32Const
+ int32_t i32_const_; // used if loc_ == KIntConst
};
};
@@ -117,6 +122,11 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegList available_regs =
+ kGpCacheRegList & ~used_registers & ~pinned;
+ return available_regs.GetNumRegsSet() >= 2;
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates, pinned);
@@ -130,9 +140,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned = {}) const {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegister low = pinned.set(unused_register(kGpReg, pinned));
+ LiftoffRegister high = unused_register(kGpReg, pinned);
+ return LiftoffRegister::ForPair(low, high);
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
- return unused_register(candidates);
+ return unused_register(candidates, pinned);
}
LiftoffRegister unused_register(LiftoffRegList candidates,
@@ -142,22 +157,31 @@ class LiftoffAssembler : public TurboAssembler {
}
void inc_used(LiftoffRegister reg) {
+ if (reg.is_pair()) {
+ inc_used(reg.low());
+ inc_used(reg.high());
+ return;
+ }
used_registers.set(reg);
DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
++register_use_count[reg.liftoff_code()];
}
// Returns whether this was the last use.
- bool dec_used(LiftoffRegister reg) {
+ void dec_used(LiftoffRegister reg) {
DCHECK(is_used(reg));
+ if (reg.is_pair()) {
+ dec_used(reg.low());
+ dec_used(reg.high());
+ return;
+ }
int code = reg.liftoff_code();
DCHECK_LT(0, register_use_count[code]);
- if (--register_use_count[code] != 0) return false;
- used_registers.clear(reg);
- return true;
+ if (--register_use_count[code] == 0) used_registers.clear(reg);
}
bool is_used(LiftoffRegister reg) const {
+ if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
bool used = used_registers.has(reg);
DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
@@ -239,6 +263,12 @@ class LiftoffAssembler : public TurboAssembler {
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegList candidates = kGpCacheRegList;
+ LiftoffRegister low = pinned.set(GetUnusedRegister(candidates, pinned));
+ LiftoffRegister high = GetUnusedRegister(candidates, pinned);
+ return LiftoffRegister::ForPair(low, high);
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
@@ -270,18 +300,36 @@ class LiftoffAssembler : public TurboAssembler {
void SpillLocals();
void SpillAllRegisters();
+ // Call this method whenever spilling something, such that the number of used
+ // spill slot can be tracked and the stack frame will be allocated big enough.
+ void RecordUsedSpillSlot(uint32_t index) {
+ if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
+ }
+
// Load parameters into the right registers / stack slots for the call.
- void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ // Move {*target} into another register if needed and update {*target} to that
+ // register, or {no_reg} if target was spilled to the stack.
+ void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
+ Register* target = nullptr,
+ LiftoffRegister* explicit_context = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
+
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
- inline void ReserveStackSpace(uint32_t bytes);
+ // This function emits machine code to prepare the stack frame, before the
+ // size of the stack frame is known. It returns an offset in the machine code
+ // which can later be patched (via {PatchPrepareStackFrame)} when the size of
+ // the frame is known.
+ inline uint32_t PrepareStackFrame();
+ inline void PatchPrepareStackFrame(uint32_t offset, uint32_t stack_slots);
- inline void LoadConstant(LiftoffRegister, WasmValue);
+ inline void LoadConstant(LiftoffRegister, WasmValue,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void FillContextInto(Register dst);
@@ -291,16 +339,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister src, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr);
- inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx);
- inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
+ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
+ ValueType);
+ inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
- inline void MoveToReturnRegister(LiftoffRegister);
- // TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
- inline void Move(LiftoffRegister dst, LiftoffRegister src);
+ inline void MoveToReturnRegister(LiftoffRegister src, ValueType);
+ inline void Move(Register dst, Register src, ValueType);
+ inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
- inline void Spill(uint32_t index, LiftoffRegister);
+ inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
- inline void Fill(LiftoffRegister, uint32_t index);
+ inline void Fill(LiftoffRegister, uint32_t index, ValueType);
+ inline void FillI64Half(Register, uint32_t half_index);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
@@ -309,29 +359,49 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shl(Register dst, Register lhs, Register rhs);
- inline void emit_i32_sar(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shr(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
+ inline void emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
+ inline void emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
// i32 unops.
- inline bool emit_i32_eqz(Register dst, Register src);
inline bool emit_i32_clz(Register dst, Register src);
inline bool emit_i32_ctz(Register dst, Register src);
inline bool emit_i32_popcnt(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs);
+ // f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ // f32 unops.
+ inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
+
+ // f64 binops.
+ inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+
+ // f64 unops.
+ inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
- inline void emit_i32_test(Register);
- inline void emit_i32_compare(Register, Register);
inline void emit_jump(Label*);
- inline void emit_cond_jump(Condition, Label*);
+ inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
+ Register rhs = no_reg);
+ // Set {dst} to 1 if condition holds, 0 otherwise.
+ inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
+ Register rhs = no_reg);
+ inline void emit_f32_set_cond(Condition, Register dst, DoubleRegister lhs,
+ DoubleRegister rhs);
inline void StackCheck(Label* ool_code);
@@ -340,8 +410,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AssertUnreachable(AbortReason reason);
// Push a value to the stack (will become a caller frame slot).
- inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
- inline void PushCallerFrameSlot(LiftoffRegister reg);
+ inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index,
+ RegPairHalf half);
+ inline void PushCallerFrameSlot(LiftoffRegister reg, ValueType type);
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
@@ -358,8 +429,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
inline void CallNativeWasmCode(Address addr);
-
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
+ // Indirect call: If {target == no_reg}, then pop the target from the stack.
+ inline void CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target);
// Reserve space in the current frame, store address to space in {addr}.
inline void AllocateStackSlot(Register addr, uint32_t size);
@@ -372,7 +446,9 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
- uint32_t GetTotalFrameSlotCount() const;
+ uint32_t GetTotalFrameSlotCount() const {
+ return num_locals_ + num_used_spill_slots_;
+ }
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
@@ -389,6 +465,9 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
+ bool did_bailout() { return bailout_reason_ != nullptr; }
+ const char* bailout_reason() const { return bailout_reason_; }
+
private:
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
@@ -399,9 +478,15 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
+ uint32_t num_used_spill_slots_ = 0;
+ const char* bailout_reason_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
+
+ void bailout(const char* reason) {
+ if (bailout_reason_ == nullptr) bailout_reason_ = reason;
+ }
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index 255ee0347e1..c6adb90f824 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -20,7 +20,7 @@ namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
-constexpr auto kI32Const = LiftoffAssembler::VarState::kI32Const;
+constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
@@ -41,6 +41,8 @@ class MovableLabel {
Label* get() { return label_.get(); }
MovableLabel() : MovableLabel(new Label()) {}
+ operator bool() const { return label_ != nullptr; }
+
static MovableLabel None() { return MovableLabel(nullptr); }
private:
@@ -53,6 +55,8 @@ class MovableLabel {
public:
Label* get() { return &label_; }
+ operator bool() const { return true; }
+
static MovableLabel None() { return MovableLabel(); }
private:
@@ -60,6 +64,25 @@ class MovableLabel {
};
#endif
+wasm::WasmValue WasmPtrValue(uintptr_t ptr) {
+ using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
+ static_assert(sizeof(int_t) == sizeof(uintptr_t), "weird uintptr_t");
+ return wasm::WasmValue(static_cast<int_t>(ptr));
+}
+
+wasm::WasmValue WasmPtrValue(void* ptr) {
+ return WasmPtrValue(reinterpret_cast<uintptr_t>(ptr));
+}
+
+compiler::CallDescriptor* GetLoweredCallDescriptor(
+ Zone* zone, compiler::CallDescriptor* call_desc) {
+ return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
+ : call_desc;
+}
+
+constexpr ValueType kTypesArr_ilfd[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64};
+constexpr Vector<const ValueType> kTypes_ilfd = ArrayVector(kTypesArr_ilfd);
+
class LiftoffCompiler {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
@@ -106,32 +129,30 @@ class LiftoffCompiler {
};
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
- compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env,
+ compiler::CallDescriptor* call_descriptor,
+ compiler::ModuleEnv* env,
compiler::RuntimeExceptionSupport runtime_exception_support,
SourcePositionTableBuilder* source_position_table_builder,
std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions,
Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone)
: asm_(liftoff_asm),
- call_desc_(call_desc),
+ descriptor_(
+ GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
- min_size_(env_->module->initial_pages * wasm::kWasmPageSize),
- max_size_((env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages) *
+ min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
+ max_size_(uint64_t{env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages} *
wasm::kWasmPageSize),
runtime_exception_support_(runtime_exception_support),
source_position_table_builder_(source_position_table_builder),
protected_instructions_(protected_instructions),
compilation_zone_(compilation_zone),
codegen_zone_(codegen_zone),
- safepoint_table_builder_(compilation_zone_) {
- // Check for overflow in max_size_.
- DCHECK_EQ(max_size_, uint64_t{env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages} *
- wasm::kWasmPageSize);
- }
+ safepoint_table_builder_(compilation_zone_) {}
+
+ ~LiftoffCompiler() { BindUnboundLabels(nullptr); }
bool ok() const { return ok_; }
@@ -142,6 +163,26 @@ class LiftoffCompiler {
BindUnboundLabels(decoder);
}
+ bool DidAssemblerBailout(Decoder* decoder) {
+ if (decoder->failed() || !asm_->did_bailout()) return false;
+ unsupported(decoder, asm_->bailout_reason());
+ return true;
+ }
+
+ bool CheckSupportedType(Decoder* decoder,
+ Vector<const ValueType> supported_types,
+ ValueType type, const char* context) {
+ char buffer[128];
+ // Check supported types.
+ for (ValueType supported : supported_types) {
+ if (type == supported) return true;
+ }
+ SNPrintF(ArrayVector(buffer), "%s %s", WasmOpcodes::TypeName(type),
+ context);
+ unsupported(decoder, buffer);
+ return false;
+ }
+
int GetSafepointTableOffset() const {
return safepoint_table_builder_.GetCodeOffset();
}
@@ -150,7 +191,8 @@ class LiftoffCompiler {
#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
- for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
+ uint32_t control_depth = decoder ? decoder->control_depth() : 0;
+ for (uint32_t i = 0; i < control_depth; ++i) {
Control* c = decoder->control_at(i);
Label* label = c->label.get();
if (!label->is_bound()) __ bind(label);
@@ -165,14 +207,6 @@ class LiftoffCompiler {
#endif
}
- void CheckStackSizeLimit(Decoder* decoder) {
- DCHECK_GE(__ cache_state()->stack_height(), __ num_locals());
- int stack_height = __ cache_state()->stack_height() - __ num_locals();
- if (stack_height > LiftoffAssembler::kMaxValueStackHeight) {
- unsupported(decoder, "value stack grows too large");
- }
- }
-
void StartFunction(Decoder* decoder) {
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
@@ -181,37 +215,48 @@ class LiftoffCompiler {
}
}
- void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
- ValueType type = __ local_type(param_idx);
- RegClass rc = reg_class_for(type);
- compiler::LinkageLocation param_loc =
- call_desc_->GetInputLocation(input_location);
- if (param_loc.IsRegister()) {
- DCHECK(!param_loc.IsAnyRegister());
- int reg_code = param_loc.AsRegister();
- LiftoffRegister reg =
- rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
- : LiftoffRegister(DoubleRegister::from_code(reg_code));
- LiftoffRegList cache_regs =
- rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
- if (cache_regs.has(reg)) {
- // This is a cache register, just use it.
- __ PushRegister(type, reg);
- return;
+ // Returns the number of inputs processed (1 or 2).
+ uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
+ const int num_lowered_params = 1 + (kNeedI64RegPair && type == kWasmI64);
+ // Initialize to anything, will be set in the loop and used afterwards.
+ LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
+ RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
+ LiftoffRegList pinned;
+ for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
+ compiler::LinkageLocation param_loc =
+ descriptor_->GetInputLocation(input_idx + pair_idx);
+ // Initialize to anything, will be set in both arms of the if.
+ LiftoffRegister in_reg = LiftoffRegister::from_code(kGpReg, 0);
+ if (param_loc.IsRegister()) {
+ DCHECK(!param_loc.IsAnyRegister());
+ int reg_code = param_loc.AsRegister();
+ RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
+ : kLiftoffAssemblerFpCacheRegs;
+ if (cache_regs & (1 << reg_code)) {
+ // This is a cache register, just use it.
+ in_reg = LiftoffRegister::from_code(rc, reg_code);
+ } else {
+ // Move to a cache register (spill one if necessary).
+ // Note that we cannot create a {LiftoffRegister} for reg_code, since
+ // {LiftoffRegister} can only store cache regs.
+ LiftoffRegister in_reg = __ GetUnusedRegister(rc, pinned);
+ if (rc == kGpReg) {
+ __ Move(in_reg.gp(), Register::from_code(reg_code), type);
+ } else {
+ __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), type);
+ }
+ }
+ } else if (param_loc.IsCallerFrameSlot()) {
+ in_reg = __ GetUnusedRegister(rc, pinned);
+ ValueType lowered_type = num_lowered_params == 1 ? type : kWasmI32;
+ __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
+ lowered_type);
}
- // Move to a cache register.
- LiftoffRegister cache_reg = __ GetUnusedRegister(rc);
- __ Move(cache_reg, reg);
- __ PushRegister(type, reg);
- return;
+ reg = pair_idx == 0 ? in_reg : LiftoffRegister::ForPair(reg, in_reg);
+ pinned.set(reg);
}
- if (param_loc.IsCallerFrameSlot()) {
- LiftoffRegister tmp_reg = __ GetUnusedRegister(rc);
- __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
- __ PushRegister(type, tmp_reg);
- return;
- }
- UNREACHABLE();
+ __ PushRegister(type, reg);
+ return num_lowered_params;
}
void StackCheck(wasm::WasmCodePosition position) {
@@ -220,69 +265,65 @@ class LiftoffCompiler {
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
__ StackCheck(ool.label.get());
- __ bind(ool.continuation.get());
+ if (ool.continuation) __ bind(ool.continuation.get());
}
void StartFunctionBody(Decoder* decoder, Control* block) {
- if (!kLiftoffAssemblerImplementedOnThisPlatform) {
- unsupported(decoder, "platform");
- return;
- }
__ EnterFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(true);
- __ ReserveStackSpace(LiftoffAssembler::kStackSlotSize *
- __ GetTotalFrameSlotCount());
+ pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
+ // {PrepareStackFrame} is the first platform-specific assembler method.
+ // If this failed, we can bail out immediately, avoiding runtime overhead
+ // and potential failures because of other unimplemented methods.
+ // A platform implementing {PrepareStackFrame} must ensure that we can
+ // finish compilation without errors even if we hit unimplemented
+ // LiftoffAssembler methods.
+ if (DidAssemblerBailout(decoder)) return;
// Parameter 0 is the wasm context.
uint32_t num_params =
- static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
+ static_cast<uint32_t>(decoder->sig_->parameter_count());
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- switch (__ local_type(i)) {
- case kWasmI32:
- case kWasmF32:
- // supported.
- break;
- case kWasmI64:
- unsupported(decoder, "i64 param/local");
- return;
- case kWasmF64:
- unsupported(decoder, "f64 param/local");
- return;
- default:
- unsupported(decoder, "exotic param/local");
- return;
- }
+ if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
+ return;
}
// Input 0 is the call target, the context is at 1.
constexpr int kContextParameterIndex = 1;
// Store the context parameter to a special stack slot.
compiler::LinkageLocation context_loc =
- call_desc_->GetInputLocation(kContextParameterIndex);
+ descriptor_->GetInputLocation(kContextParameterIndex);
DCHECK(context_loc.IsRegister());
DCHECK(!context_loc.IsAnyRegister());
Register context_reg = Register::from_code(context_loc.AsRegister());
__ SpillContext(context_reg);
- uint32_t param_idx = 0;
- for (; param_idx < num_params; ++param_idx) {
- constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
- ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
+ // Input 0 is the code target, 1 is the context. First parameter at 2.
+ uint32_t input_idx = kContextParameterIndex + 1;
+ for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
+ input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
+ DCHECK_EQ(input_idx, descriptor_->InputCount());
// Set to a gp register, to mark this uninitialized.
LiftoffRegister zero_double_reg(Register::from_code<0>());
DCHECK(zero_double_reg.is_gp());
- for (; param_idx < __ num_locals(); ++param_idx) {
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
break;
+ case kWasmI64:
+ __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
+ break;
case kWasmF32:
+ case kWasmF64:
if (zero_double_reg.is_gp()) {
// Note: This might spill one of the registers used to hold
// parameters.
zero_double_reg = __ GetUnusedRegister(kFpReg);
- __ LoadConstant(zero_double_reg, WasmValue(0.f));
+ // Zero is represented by the bit pattern 0 for both f32 and f64.
+ __ LoadConstant(zero_double_reg, WasmValue(0.));
}
- __ PushRegister(kWasmF32, zero_double_reg);
+ __ PushRegister(type, zero_double_reg);
break;
default:
UNIMPLEMENTED();
@@ -294,9 +335,7 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(0);
- DCHECK_EQ(__ num_locals(), param_idx);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
- CheckStackSizeLimit(decoder);
}
void GenerateOutOfLineCode(OutOfLineCode& ool) {
@@ -338,10 +377,13 @@ class LiftoffCompiler {
}
void FinishFunction(Decoder* decoder) {
+ if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
}
safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
+ __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
+ __ GetTotalFrameSlotCount());
}
void OnFirstError(Decoder* decoder) {
@@ -391,8 +433,8 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister(kGpReg).gp();
- __ emit_i32_test(value);
- __ emit_cond_jump(kEqual, if_block->else_state->label.get());
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
+ value);
if_block->label_state.stack_base = __ cache_state()->stack_height();
// Store the state (after popping the value) for executing the else branch.
@@ -433,14 +475,15 @@ class LiftoffCompiler {
DCHECK_LE(num_args, kMaxArgs);
MachineSignature sig(kNumReturns, num_args, kReps);
- compiler::CallDescriptor* desc =
+ auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
// Before making a call, spill all cache registers.
__ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C.
- uint32_t num_params = static_cast<uint32_t>(desc->ParameterCount());
+ uint32_t num_params =
+ static_cast<uint32_t>(call_descriptor->ParameterCount());
__ PrepareCCall(num_params, arg_regs);
// Set parameters (in sp[0], sp[8], ...).
@@ -449,7 +492,7 @@ class LiftoffCompiler {
constexpr size_t kInputShift = 1; // Input 0 is the call target.
compiler::LinkageLocation loc =
- desc->GetInputLocation(param + kInputShift);
+ call_descriptor->GetInputLocation(param + kInputShift);
if (loc.IsRegister()) {
Register reg = Register::from_code(loc.AsRegister());
// Load address of that parameter to the register.
@@ -465,126 +508,209 @@ class LiftoffCompiler {
__ CallC(ext_ref, num_params);
// Load return value.
- compiler::LinkageLocation return_loc = desc->GetReturnLocation(0);
+ compiler::LinkageLocation return_loc =
+ call_descriptor->GetReturnLocation(0);
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != res_reg) {
- __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg));
+ DCHECK_EQ(MachineRepresentation::kWord32,
+ sig.GetReturn(0).representation());
+ __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg), kWasmI32);
}
}
- void I32UnOp(bool (LiftoffAssembler::*emit_fn)(Register, Register),
- ExternalReference (*fallback_fn)(Isolate*)) {
+ template <ValueType type, class EmitFn>
+ void EmitUnOp(EmitFn fn) {
+ static RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetUnaryOpTargetRegister(kGpReg));
- LiftoffRegister src_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- if (!emit_fn || !(asm_->*emit_fn)(dst_reg.gp(), src_reg.gp())) {
+ LiftoffRegister dst = pinned.set(__ GetUnaryOpTargetRegister(rc));
+ LiftoffRegister src = __ PopToRegister(rc, pinned);
+ fn(dst, src);
+ __ PushRegister(type, dst);
+ }
+
+ void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
+ Register),
+ ExternalReference (*fallback_fn)(Isolate*)) {
+ auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
+ if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn(asm_->isolate());
- Register args[] = {src_reg.gp()};
- GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
- }
- __ PushRegister(kWasmI32, dst_reg);
+ Register args[] = {src.gp()};
+ GenerateCCall(dst.gp(), arraysize(args), args, ext_ref);
+ };
+ EmitUnOp<kWasmI32>(emit_with_c_fallback);
}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
-#define CASE_UNOP(opcode, type, fn, ext_ref_fn) \
- case WasmOpcode::kExpr##opcode: \
- type##UnOp(&LiftoffAssembler::emit_##fn, ext_ref_fn); \
+#define CASE_I32_UNOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.gp(), src.gp()); \
+ }); \
+ break;
+#define CASE_FLOAT_UNOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasm##type>([=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.fp(), src.fp()); \
+ }); \
break;
switch (opcode) {
- CASE_UNOP(I32Eqz, I32, i32_eqz, nullptr)
- CASE_UNOP(I32Clz, I32, i32_clz, nullptr)
- CASE_UNOP(I32Ctz, I32, i32_ctz, nullptr)
- CASE_UNOP(I32Popcnt, I32, i32_popcnt,
- &ExternalReference::wasm_word32_popcnt)
+ CASE_I32_UNOP(I32Clz, i32_clz)
+ CASE_I32_UNOP(I32Ctz, i32_ctz)
+ case kExprI32Popcnt:
+ EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
+ &ExternalReference::wasm_word32_popcnt);
+ break;
+ case kExprI32Eqz:
+ EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i32_set_cond(kEqual, dst.gp(), src.gp());
+ });
+ break;
+ CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
+ CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_UNOP
+#undef CASE_I32_UNOP
+#undef CASE_FLOAT_UNOP
}
- void I32BinOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
- Register)) {
+ template <ValueType type, typename EmitFn>
+ void EmitMonomorphicBinOp(EmitFn fn) {
+ static constexpr RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
- (asm_->*emit_fn)(dst_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
- __ PushRegister(kWasmI32, dst_reg);
+ LiftoffRegister dst = pinned.set(__ GetBinaryOpTargetRegister(rc));
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(rc, pinned));
+ LiftoffRegister lhs = __ PopToRegister(rc, pinned);
+ fn(dst, lhs, rhs);
+ __ PushRegister(type, dst);
}
- void I32CCallBinOp(ExternalReference ext_ref) {
+ template <ValueType result_type, RegClass src_rc, typename EmitFn>
+ void EmitBinOpWithDifferentResultType(EmitFn fn) {
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
- Register args[] = {lhs_reg.gp(), rhs_reg.gp()};
- GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
- __ PushRegister(kWasmI32, dst_reg);
- }
-
- void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
- DoubleRegister,
- DoubleRegister)) {
- LiftoffRegList pinned;
- LiftoffRegister target_reg =
- pinned.set(__ GetBinaryOpTargetRegister(kFpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kFpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kFpReg, pinned);
- (asm_->*emit_fn)(target_reg.fp(), lhs_reg.fp(), rhs_reg.fp());
- __ PushRegister(kWasmF32, target_reg);
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(src_rc, pinned));
+ LiftoffRegister lhs = pinned.set(__ PopToRegister(src_rc, pinned));
+ LiftoffRegister dst = __ GetUnusedRegister(reg_class_for(result_type));
+ fn(dst, lhs, rhs);
+ __ PushRegister(result_type, dst);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
-#define CASE_BINOP(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
- return type##BinOp(&LiftoffAssembler::emit_##fn);
-#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
- case WasmOpcode::kExpr##opcode: \
- type##CCallBinOp(ExternalReference::ext_ref_fn(asm_->isolate())); \
- break;
+#define CASE_I32_BINOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
+ });
+#define CASE_FLOAT_BINOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasm##type>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_I32_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
+ });
+#define CASE_F32_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpWithDifferentResultType<kWasmI32, kFpReg>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_SHIFTOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
+ });
+#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ Register args[] = {lhs.gp(), rhs.gp()}; \
+ auto ext_ref = ExternalReference::ext_ref_fn(__ isolate()); \
+ GenerateCCall(dst.gp(), arraysize(args), args, ext_ref); \
+ });
switch (opcode) {
- CASE_BINOP(I32Add, I32, i32_add)
- CASE_BINOP(I32Sub, I32, i32_sub)
- CASE_BINOP(I32Mul, I32, i32_mul)
- CASE_BINOP(I32And, I32, i32_and)
- CASE_BINOP(I32Ior, I32, i32_or)
- CASE_BINOP(I32Xor, I32, i32_xor)
- CASE_BINOP(I32Shl, I32, i32_shl)
- CASE_BINOP(I32ShrS, I32, i32_sar)
- CASE_BINOP(I32ShrU, I32, i32_shr)
+ CASE_I32_BINOP(I32Add, i32_add)
+ CASE_I32_BINOP(I32Sub, i32_sub)
+ CASE_I32_BINOP(I32Mul, i32_mul)
+ CASE_I32_BINOP(I32And, i32_and)
+ CASE_I32_BINOP(I32Ior, i32_or)
+ CASE_I32_BINOP(I32Xor, i32_xor)
+ CASE_I32_CMPOP(I32Eq, kEqual)
+ CASE_I32_CMPOP(I32Ne, kUnequal)
+ CASE_I32_CMPOP(I32LtS, kSignedLessThan)
+ CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
+ CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
+ CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
+ CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
+ CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
+ CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
+ CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
+ CASE_F32_CMPOP(F32Eq, kEqual)
+ CASE_F32_CMPOP(F32Ne, kUnequal)
+ CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
+ CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
+ CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
+ CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
+ CASE_SHIFTOP(I32Shl, i32_shl)
+ CASE_SHIFTOP(I32ShrS, i32_sar)
+ CASE_SHIFTOP(I32ShrU, i32_shr)
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
- CASE_BINOP(F32Add, F32, f32_add)
- CASE_BINOP(F32Sub, F32, f32_sub)
- CASE_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F32Add, F32, f32_add)
+ CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
+ CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F64Add, F64, f64_add)
+ CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
+ CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_BINOP
+#undef CASE_I32_BINOP
+#undef CASE_FLOAT_BINOP
+#undef CASE_I32_CMPOP
+#undef CASE_F32_CMPOP
+#undef CASE_SHIFTOP
#undef CASE_CCALL_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(kWasmI32, value);
- CheckStackSizeLimit(decoder);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
- unsupported(decoder, "i64.const");
+ // The {VarState} stores constant values as int32_t, thus we only store
+ // 64-bit constants in this field if it fits in an int32_t. Larger values
+ // cannot be used as immediate value anyway, so we can also just put them in
+ // a register immediately.
+ int32_t value_i32 = static_cast<int32_t>(value);
+ if (value_i32 == value) {
+ __ cache_state()->stack_state.emplace_back(kWasmI64, value_i32);
+ } else {
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmI64, reg);
+ }
}
void F32Const(Decoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
- CheckStackSizeLimit(decoder);
}
void F64Const(Decoder* decoder, Value* result, double value) {
- unsupported(decoder, "f64.const");
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmF64, reg);
}
void Drop(Decoder* decoder, const Value& value) {
@@ -603,11 +729,11 @@ class LiftoffCompiler {
if (values.size() > 1) return unsupported(decoder, "multi-return");
RegClass rc = reg_class_for(values[0].type);
LiftoffRegister reg = __ PopToRegister(rc);
- __ MoveToReturnRegister(reg);
+ __ MoveToReturnRegister(reg, values[0].type);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
- static_cast<uint32_t>(call_desc_->StackParameterCount()));
+ static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
void GetLocal(Decoder* decoder, Value* result,
@@ -618,37 +744,36 @@ class LiftoffCompiler {
case kRegister:
__ PushRegister(slot.type(), slot.reg());
break;
- case kI32Const:
+ case KIntConst:
__ cache_state()->stack_state.emplace_back(operand.type,
slot.i32_const());
break;
case kStack: {
auto rc = reg_class_for(operand.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
- __ Fill(reg, operand.index);
+ __ Fill(reg, operand.index, operand.type);
__ PushRegister(slot.type(), reg);
break;
}
}
- CheckStackSizeLimit(decoder);
}
void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
+ ValueType type = dst_slot.type();
if (dst_slot.is_reg()) {
LiftoffRegister slot_reg = dst_slot.reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot.reg(), state.stack_height() - 1);
+ __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
}
- ValueType type = dst_slot.type();
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
- __ Fill(dst_reg, __ cache_state()->stack_height() - 1);
+ __ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
@@ -663,7 +788,7 @@ class LiftoffCompiler {
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
- case kI32Const:
+ case KIntConst:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
@@ -701,7 +826,6 @@ class LiftoffCompiler {
return unsupported(decoder, "global > kPointerSize");
__ Load(value, addr, no_reg, global->offset, type, pinned);
__ PushRegister(global->type, value);
- CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
@@ -742,16 +866,76 @@ class LiftoffCompiler {
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister(kGpReg).gp();
- __ emit_i32_test(value);
- __ emit_cond_jump(kEqual, &cont_false);
+ __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
Br(target);
__ bind(&cont_false);
}
+ // Generate a branch table case, potentially reusing previously generated
+ // stack transfer code.
+ void GenerateBrCase(Decoder* decoder, uint32_t br_depth,
+ std::map<uint32_t, MovableLabel>& br_targets) {
+ MovableLabel& label = br_targets[br_depth];
+ if (label.get()->is_bound()) {
+ __ jmp(label.get());
+ } else {
+ __ bind(label.get());
+ Br(decoder->control_at(br_depth));
+ }
+ }
+
+ // Generate a branch table for input in [min, max).
+ // TODO(wasm): Generate a real branch table (like TF TableSwitch).
+ void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp,
+ LiftoffRegister value, uint32_t min, uint32_t max,
+ BranchTableIterator<validate>& table_iterator,
+ std::map<uint32_t, MovableLabel>& br_targets) {
+ DCHECK_LT(min, max);
+ // Check base case.
+ if (max == min + 1) {
+ DCHECK_EQ(min, table_iterator.cur_index());
+ GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ return;
+ }
+
+ uint32_t split = min + (max - min) / 2;
+ Label upper_half;
+ __ LoadConstant(tmp, WasmValue(split));
+ __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
+ tmp.gp());
+ // Emit br table for lower half:
+ GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
+ br_targets);
+ __ bind(&upper_half);
+ // Emit br table for upper half:
+ GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
+ br_targets);
+ }
+
void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
const Value& key) {
- unsupported(decoder, "br_table");
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(kGpReg));
+ BranchTableIterator<validate> table_iterator(decoder, operand);
+ std::map<uint32_t, MovableLabel> br_targets;
+
+ if (operand.table_count > 0) {
+ LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
+ __ LoadConstant(tmp, WasmValue(uint32_t{operand.table_count}));
+ Label case_default;
+ __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
+ value.gp(), tmp.gp());
+
+ GenerateBrTable(decoder, tmp, value, 0, operand.table_count,
+ table_iterator, br_targets);
+
+ __ bind(&case_default);
+ }
+
+ // Generate the default case.
+ GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ DCHECK(!table_iterator.has_next());
}
void Else(Decoder* decoder, Control* if_block) {
@@ -760,28 +944,45 @@ class LiftoffCompiler {
__ cache_state()->Steal(if_block->else_state->state);
}
- Label* AddOutOfLineTrap(wasm::WasmCodePosition position, uint32_t pc = 0) {
+ Label* AddOutOfLineTrap(wasm::WasmCodePosition position,
+ Builtins::Name builtin, uint32_t pc = 0) {
DCHECK(!FLAG_wasm_no_bounds_checks);
- // The pc is needed exactly if trap handlers are enabled.
- DCHECK_EQ(pc != 0, env_->use_trap_handler);
+ // The pc is needed for memory OOB trap with trap handler enabled. Other
+ // callers should not even compute it.
+ DCHECK_EQ(pc != 0, builtin == Builtins::kThrowWasmTrapMemOutOfBounds &&
+ env_->use_trap_handler);
- out_of_line_code_.push_back(OutOfLineCode::Trap(
- Builtins::kThrowWasmTrapMemOutOfBounds, position, pc));
+ out_of_line_code_.push_back(OutOfLineCode::Trap(builtin, position, pc));
return out_of_line_code_.back().label.get();
}
- void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
- wasm::WasmCodePosition position, LiftoffRegList pinned) {
- DCHECK(!env_->use_trap_handler);
- if (FLAG_wasm_no_bounds_checks) return;
+ // Returns true if the memory access is statically known to be out of bounds
+ // (a jump to the trap was generated then); return false otherwise.
+ bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
+ Register index, LiftoffRegList pinned) {
+ const bool statically_oob =
+ access_size > max_size_ || offset > max_size_ - access_size;
+
+ if (!statically_oob &&
+ (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
+ return false;
+ }
- Label* trap_label = AddOutOfLineTrap(position);
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds);
- if (access_size > max_size_ || offset > max_size_ - access_size) {
- // The access will be out of bounds, even for the largest memory.
+ if (statically_oob) {
__ emit_jump(trap_label);
- return;
+ Control* current_block = decoder->control_at(0);
+ if (current_block->reachable()) {
+ current_block->reachability = kSpecOnlyReachable;
+ }
+ return true;
}
+
+ DCHECK(!env_->use_trap_handler);
+ DCHECK(!FLAG_wasm_no_bounds_checks);
+
uint32_t end_offset = offset + access_size - 1;
// If the end offset is larger than the smallest memory, dynamically check
@@ -793,8 +994,8 @@ class LiftoffCompiler {
__ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
if (end_offset >= min_size_) {
- __ emit_i32_compare(end_offset_reg.gp(), mem_size.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32,
+ end_offset_reg.gp(), mem_size.gp());
}
// Just reuse the end_offset register for computing the effective size.
@@ -802,8 +1003,9 @@ class LiftoffCompiler {
__ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
- __ emit_i32_compare(index, effective_size_reg.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index,
+ effective_size_reg.gp());
+ return false;
}
void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
@@ -842,22 +1044,23 @@ class LiftoffCompiler {
}
void GenerateRuntimeCall(int num_args, Register* args) {
- compiler::CallDescriptor* desc =
- compiler::Linkage::GetRuntimeCallDescriptor(
- compilation_zone_, Runtime::kWasmTraceMemory, num_args,
- compiler::Operator::kNoProperties,
- compiler::CallDescriptor::kNoFlags);
+ auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
+ compilation_zone_, Runtime::kWasmTraceMemory, num_args,
+ compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
// Currently, only one argument is supported. More arguments require some
// caution for the parallel register moves (reuse StackTransferRecipe).
DCHECK_EQ(1, num_args);
constexpr size_t kInputShift = 1; // Input 0 is the call target.
- compiler::LinkageLocation param_loc = desc->GetInputLocation(kInputShift);
+ compiler::LinkageLocation param_loc =
+ call_descriptor->GetInputLocation(kInputShift);
if (param_loc.IsRegister()) {
Register reg = Register::from_code(param_loc.AsRegister());
- __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]));
+ __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
+ LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
- __ PushCallerFrameSlot(LiftoffRegister(args[0]));
+ __ PushCallerFrameSlot(LiftoffRegister(args[0]),
+ LiftoffAssembler::kWasmIntPtr);
}
// Allocate the codegen zone if not done before.
@@ -873,14 +1076,11 @@ class LiftoffCompiler {
const MemoryAccessOperand<validate>& operand,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
- if (value_type != kWasmI32 && value_type != kWasmF32)
- return unsupported(decoder, "unsupported load type");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
- if (!env_->use_trap_handler) {
- // Emit an explicit bounds check.
- BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
- pinned);
+ if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
+ return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
@@ -890,10 +1090,11 @@ class LiftoffCompiler {
__ Load(value, addr, index, operand.offset, type, pinned,
&protected_load_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(), protected_load_pc);
+ AddOutOfLineTrap(decoder->position(),
+ Builtins::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
}
__ PushRegister(value_type, value);
- CheckStackSizeLimit(decoder);
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -905,16 +1106,13 @@ class LiftoffCompiler {
const MemoryAccessOperand<validate>& operand,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
- if (value_type != kWasmI32 && value_type != kWasmF32)
- return unsupported(decoder, "unsupported store type");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "store")) return;
RegClass rc = reg_class_for(value_type);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc));
Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
- if (!env_->use_trap_handler) {
- // Emit an explicit bounds check.
- BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
- pinned);
+ if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
+ return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
@@ -922,7 +1120,9 @@ class LiftoffCompiler {
__ Store(addr, index, operand.offset, value, type, pinned,
&protected_store_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(), protected_store_pc);
+ AddOutOfLineTrap(decoder->position(),
+ Builtins::kThrowWasmTrapMemOutOfBounds,
+ protected_store_pc);
}
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, operand.offset,
@@ -942,11 +1142,17 @@ class LiftoffCompiler {
const Value args[], Value returns[]) {
if (operand.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
+ if (operand.sig->return_count() == 1 &&
+ !CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
+ "return"))
+ return;
- compiler::CallDescriptor* call_desc =
+ auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- __ PrepareCall(operand.sig, call_desc);
+ __ PrepareCall(operand.sig, call_descriptor);
source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -965,14 +1171,166 @@ class LiftoffCompiler {
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ FinishCall(operand.sig, call_desc);
+ __ FinishCall(operand.sig, call_descriptor);
}
- void CallIndirect(Decoder* decoder, const Value& index,
+ void CallIndirect(Decoder* decoder, const Value& index_val,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
- unsupported(decoder, "call_indirect");
+ if (operand.sig->return_count() > 1) {
+ return unsupported(decoder, "multi-return");
+ }
+ if (operand.sig->return_count() == 1 &&
+ !CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
+ "return")) {
+ return;
+ }
+
+ // Assume only one table for now.
+ uint32_t table_index = 0;
+
+ // Pop the index.
+ LiftoffRegister index = __ PopToRegister(kGpReg);
+ // If that register is still being used after popping, we move it to another
+ // register, because we want to modify that register.
+ if (__ cache_state()->is_used(index)) {
+ LiftoffRegister new_index =
+ __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
+ __ Move(new_index, index, kWasmI32);
+ index = new_index;
+ }
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ // Get three temporary registers.
+ LiftoffRegister table = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp_const =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ LiftoffRegister* explicit_context = nullptr;
+
+ // Bounds check against the table size.
+ Label* invalid_func_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncInvalid);
+
+ static constexpr LoadType kPointerLoadType =
+ kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+ static constexpr int kFixedArrayOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag;
+
+ uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index];
+ DCHECK_GE(canonical_sig_num, 0);
+ DCHECK_GE(kMaxInt, canonical_sig_num);
+
+ if (WASM_CONTEXT_TABLES) {
+ // Compare against table size stored in {wasm_context->table_size}.
+ __ LoadFromContext(tmp_const.gp(), offsetof(WasmContext, table_size),
+ sizeof(uint32_t));
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+ // Load the table from {wasm_context->table}
+ __ LoadFromContext(table.gp(), offsetof(WasmContext, table),
+ kPointerSize);
+ // Load the signature from {wasm_context->table[$index].sig_id}
+ // == wasm_context.table + $index * #sizeof(IndirectionFunctionTableEntry)
+ // + #offsetof(sig_id)
+ __ LoadConstant(
+ tmp_const,
+ WasmValue(static_cast<uint32_t>(sizeof(IndirectFunctionTableEntry))));
+ __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
+ __ Load(scratch, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, sig_id), LoadType::kI32Load,
+ pinned);
+
+ __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ // Load the target address from {wasm_context->table[$index].target}
+ __ Load(scratch, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, target), kPointerLoadType,
+ pinned);
+
+ // Load the context from {wasm_context->table[$index].context}
+ // TODO(wasm): directly allocate the correct context register to avoid
+ // any potential moves.
+ __ Load(tmp_const, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, context), kPointerLoadType,
+ pinned);
+ explicit_context = &tmp_const;
+ } else {
+ // Compare against table size, which is a patchable constant.
+ uint32_t table_size =
+ env_->module->function_tables[table_index].initial_size;
+
+ __ LoadConstant(tmp_const, WasmValue(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+
+ wasm::GlobalHandleAddress function_table_handle_address =
+ env_->function_tables[table_index];
+ __ LoadConstant(table, WasmPtrValue(function_table_handle_address),
+ RelocInfo::WASM_GLOBAL_HANDLE);
+ __ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
+
+ // Load signature from the table and check.
+ // The table is a FixedArray; signatures are encoded as SMIs.
+ // [sig1, code1, sig2, code2, sig3, code3, ...]
+ static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
+ static_assert(compiler::kFunctionTableSignatureOffset == 0,
+ "consistency");
+ static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
+ __ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
+ // Shift index such that it's the offset of the signature in the
+ // FixedArray.
+ __ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
+
+ // Load the signature.
+ __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
+ kPointerLoadType, pinned);
+
+ __ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ // Load code object.
+ __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
+ kPointerLoadType, pinned);
+
+ // Move the pointer from the Code object to the instruction start.
+ __ LoadConstant(tmp_const,
+ WasmPtrValue(Code::kHeaderSize - kHeapObjectTag));
+ __ emit_ptrsize_add(scratch.gp(), scratch.gp(), tmp_const.gp());
+ }
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+
+ auto call_descriptor =
+ compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+
+ Register target = scratch.gp();
+ __ PrepareCall(operand.sig, call_descriptor, &target, explicit_context);
+ __ CallIndirect(operand.sig, call_descriptor, target);
+
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ __ FinishCall(operand.sig, call_descriptor);
}
+
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
@@ -1009,11 +1367,11 @@ class LiftoffCompiler {
private:
LiftoffAssembler* const asm_;
- compiler::CallDescriptor* const call_desc_;
+ compiler::CallDescriptor* const descriptor_;
compiler::ModuleEnv* const env_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
- const uint32_t min_size_;
- const uint32_t max_size_;
+ const uint64_t min_size_;
+ const uint64_t max_size_;
const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
@@ -1027,6 +1385,9 @@ class LiftoffCompiler {
// code generation (in FinishCompilation).
std::unique_ptr<Zone>* codegen_zone_;
SafepointTableBuilder safepoint_table_builder_;
+ // The pc offset of the instructions to reserve the stack frame. Needed to
+ // patch the actually needed stack size in the end.
+ uint32_t pc_offset_stack_frame_construction_ = 0;
void TraceCacheState(Decoder* decoder) const {
#ifdef DEBUG
@@ -1061,11 +1422,11 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
Zone zone(isolate_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module = env_ ? env_->module : nullptr;
- auto* call_desc = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, counters()->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_,
+ decoder(&zone, module, func_body_, &liftoff_.asm_, call_descriptor, env_,
runtime_exception_support_,
&liftoff_.source_position_table_builder_,
protected_instructions_.get(), &zone, &liftoff_.codegen_zone_);
diff --git a/chromium/v8/src/wasm/baseline/liftoff-register.h b/chromium/v8/src/wasm/baseline/liftoff-register.h
index bb5ef5be4ae..eedbf54a17a 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-register.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-register.h
@@ -8,8 +8,6 @@
#include <iosfwd>
#include <memory>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-opcodes.h"
@@ -18,24 +16,29 @@ namespace v8 {
namespace internal {
namespace wasm {
-enum RegClass { kNoReg, kGpReg, kFpReg };
+static constexpr bool kNeedI64RegPair = kPointerSize == 4;
+
+enum RegClass : uint8_t {
+ kGpReg,
+ kFpReg,
+ // {kGpRegPair} equals {kNoReg} if {kNeedI64RegPair} is false.
+ kGpRegPair,
+ kNoReg = kGpRegPair + kNeedI64RegPair
+};
+
+enum RegPairHalf : uint8_t { kLowWord, kHighWord };
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
- return type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
+ return kNeedI64RegPair && type == kWasmI64 // i64 on 32 bit
+ ? kGpRegPair
+ : type == kWasmI32 || type == kWasmI64 // int types
+ ? kGpReg
+ : type == kWasmF32 || type == kWasmF64 // float types
+ ? kFpReg
+ : kNoReg; // other (unsupported) types
}
-// RegForClass<rc>: Register for rc==kGpReg, DoubleRegister for rc==kFpReg, void
-// for all other values of rc.
-template <RegClass rc>
-using RegForClass = typename std::conditional<
- rc == kGpReg, Register,
- typename std::conditional<rc == kFpReg, DoubleRegister, void>::type>::type;
-
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
@@ -47,14 +50,28 @@ static constexpr int kMaxFpRegCode =
// LiftoffRegister encodes both gp and fp in a unified index space.
// [0 .. kMaxGpRegCode] encodes gp registers,
// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
+// I64 values on 32 bit platforms are stored in two registers, both encoded in
+// the same LiftoffRegister value.
static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
static constexpr int kAfterMaxLiftoffFpRegCode =
kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
-static_assert(kAfterMaxLiftoffRegCode < 256,
- "liftoff register codes can be stored in one uint8_t");
+static constexpr int kBitsPerLiftoffRegCode =
+ 32 - base::bits::CountLeadingZeros<uint32_t>(kAfterMaxLiftoffRegCode - 1);
+static constexpr int kBitsPerGpRegCode =
+ 32 - base::bits::CountLeadingZeros<uint32_t>(kMaxGpRegCode);
+static constexpr int kBitsPerGpRegPair = 1 + 2 * kBitsPerGpRegCode;
class LiftoffRegister {
+ static constexpr int needed_bits =
+ Max(kNeedI64RegPair ? kBitsPerGpRegPair : 0, kBitsPerLiftoffRegCode);
+ using storage_t = std::conditional<
+ needed_bits <= 8, uint8_t,
+ std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
+ static_assert(8 * sizeof(storage_t) >= needed_bits &&
+ 8 * sizeof(storage_t) < 2 * needed_bits,
+ "right type has been chosen");
+
public:
explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
DCHECK_EQ(reg, gp());
@@ -67,6 +84,7 @@ class LiftoffRegister {
static LiftoffRegister from_liftoff_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
+ DCHECK_EQ(code, static_cast<storage_t>(code));
return LiftoffRegister(code);
}
@@ -81,12 +99,40 @@ class LiftoffRegister {
}
}
+ static LiftoffRegister ForPair(LiftoffRegister low, LiftoffRegister high) {
+ DCHECK(kNeedI64RegPair);
+ DCHECK_NE(low, high);
+ storage_t combined_code = low.gp().code() |
+ high.gp().code() << kBitsPerGpRegCode |
+ 1 << (2 * kBitsPerGpRegCode);
+ return LiftoffRegister(combined_code);
+ }
+
+ constexpr bool is_pair() const {
+ return kNeedI64RegPair && (code_ & (1 << (2 * kBitsPerGpRegCode))) != 0;
+ }
constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
constexpr bool is_fp() const {
return code_ >= kAfterMaxLiftoffGpRegCode &&
code_ < kAfterMaxLiftoffFpRegCode;
}
+ LiftoffRegister low() const { return LiftoffRegister(low_gp()); }
+
+ LiftoffRegister high() const { return LiftoffRegister(high_gp()); }
+
+ Register low_gp() const {
+ DCHECK(is_pair());
+ static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
+ return Register::from_code(code_ & kCodeMask);
+ }
+
+ Register high_gp() const {
+ DCHECK(is_pair());
+ static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
+ return Register::from_code((code_ >> kBitsPerGpRegCode) & kCodeMask);
+ }
+
Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
@@ -97,31 +143,46 @@ class LiftoffRegister {
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- int liftoff_code() const { return code_; }
+ uint32_t liftoff_code() const {
+ DCHECK(is_gp() || is_fp());
+ return code_;
+ }
RegClass reg_class() const {
- DCHECK(is_gp() || is_fp());
- return is_gp() ? kGpReg : kFpReg;
+ return is_pair() ? kGpRegPair : is_gp() ? kGpReg : kFpReg;
}
bool operator==(const LiftoffRegister other) const {
+ DCHECK_EQ(is_pair(), other.is_pair());
return code_ == other.code_;
}
bool operator!=(const LiftoffRegister other) const {
+ DCHECK_EQ(is_pair(), other.is_pair());
return code_ != other.code_;
}
+ bool overlaps(const LiftoffRegister other) const {
+ if (is_pair()) return low().overlaps(other) || high().overlaps(other);
+ if (other.is_pair()) return *this == other.low() || *this == other.high();
+ return *this == other;
+ }
private:
- uint8_t code_;
+ storage_t code_;
- explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
+ explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
"LiftoffRegister can efficiently be passed by value");
inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
- return reg.is_gp() ? os << "gp" << reg.gp().code()
- : os << "fp" << reg.fp().code();
+ if (reg.is_pair()) {
+ return os << "<gp" << reg.low_gp().code() << "+" << reg.high_gp().code()
+ << ">";
+ } else if (reg.is_gp()) {
+ return os << "gp" << reg.gp().code();
+ } else {
+ return os << "fp" << reg.fp().code();
+ }
}
class LiftoffRegList {
@@ -144,16 +205,30 @@ class LiftoffRegList {
}
LiftoffRegister set(LiftoffRegister reg) {
- regs_ |= storage_t{1} << reg.liftoff_code();
+ if (reg.is_pair()) {
+ regs_ |= storage_t{1} << reg.low().liftoff_code();
+ regs_ |= storage_t{1} << reg.high().liftoff_code();
+ } else {
+ regs_ |= storage_t{1} << reg.liftoff_code();
+ }
return reg;
}
LiftoffRegister clear(LiftoffRegister reg) {
- regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ if (reg.is_pair()) {
+ regs_ &= ~(storage_t{1} << reg.low().liftoff_code());
+ regs_ &= ~(storage_t{1} << reg.high().liftoff_code());
+ } else {
+ regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ }
return reg;
}
bool has(LiftoffRegister reg) const {
+ if (reg.is_pair()) {
+ DCHECK_EQ(has(reg.low()), has(reg.high()));
+ reg = reg.low();
+ }
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
@@ -211,7 +286,7 @@ class LiftoffRegList {
template <typename... Regs>
static LiftoffRegList ForRegs(Regs... regs) {
std::array<LiftoffRegister, sizeof...(regs)> regs_arr{
- LiftoffRegister(regs)...};
+ {LiftoffRegister(regs)...}};
LiftoffRegList list;
for (LiftoffRegister reg : regs_arr) list.set(reg);
return list;
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 50ab1e82c87..fda98aea624 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -2,180 +2,542 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
+#define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("mips " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+namespace liftoff {
+
+// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
+// is located at sp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+
+} // namespace liftoff
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ addiu(sp, sp, 0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ DCHECK_LE(bytes, kMaxInt);
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.addiu(sp, sp, -bytes);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::li(reg.low_gp(), Operand(low_word));
+ TurboAssembler::li(reg.high_gp(), Operand(high_word));
+ break;
+ }
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ DCHECK_LE(offset, kMaxInt);
+ lw(dst, liftoff::GetContextOperand());
+ DCHECK_EQ(4, size);
+ lw(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ sw(context, liftoff::GetContextOperand());
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ lw(dst, liftoff::GetContextOperand());
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register src = no_reg;
+ if (offset_reg != no_reg) {
+ src = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(src, src_addr, offset_reg);
+ }
+ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
+ : MemOperand(src_addr, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8U:
+ lbu(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI32Load8S:
+ lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8S:
+ lb(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI32Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI32Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI32Load:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI64Load: {
+ MemOperand src_op_upper = (offset_reg != no_reg)
+ ? MemOperand(src, offset_imm + 4)
+ : MemOperand(src_addr, offset_imm + 4);
+ TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ break;
+ }
+ case LoadType::kF32Load:
+ TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store16:
+ TurboAssembler::Ush(src.gp(), dst_op, t8);
+ break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store: {
+ MemOperand dst_op_upper = (offset_reg != no_reg)
+ ? MemOperand(dst, offset_imm + 4)
+ : MemOperand(dst_addr, offset_imm + 4);
+ TurboAssembler::Usw(src.high_gp(), dst_op_upper);
+ TurboAssembler::Usw(src.low_gp(), dst_op);
+ break;
+ }
+ case StoreType::kF32Store:
+ TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_pair()
+ ? LiftoffRegister::ForPair(LiftoffRegister(v0), LiftoffRegister(v1))
+ : reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::mov(dst, src);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ sw(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ sw(reg.low_gp(), dst);
+ sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ swc1(reg.fp(), dst);
+ break;
+ case kWasmF64:
+ TurboAssembler::Sdc1(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ sw(tmp.gp(), dst);
+ break;
+ }
+ case kWasmI64: {
+ LiftoffRegister low = GetUnusedRegister(kGpReg);
+ LiftoffRegister high = GetUnusedRegister(kGpReg);
+
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::li(low.gp(), Operand(low_word));
+ TurboAssembler::li(high.gp(), Operand(high_word));
+
+ sw(low.gp(), dst);
+ sw(high.gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ lw(reg.gp(), src);
+ break;
+ case kWasmI64:
+ lw(reg.low_gp(), src);
+ lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ lwc1(reg.fp(), src);
+ break;
+ case kWasmF64:
+ TurboAssembler::Ldc1(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- UNIMPLEMENTED(); \
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
+ lw(reg, liftoff::GetHalfStackSlot(half_index));
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+
+// clang-format off
+I32_BINOP(add, addu)
+I32_BINOP(sub, subu)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ emit_i32_add(dst, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+
+I32_SHIFTOP(shl, sllv)
+I32_SHIFTOP(sar, srav)
+I32_SHIFTOP(shr, srlv)
+
+#undef I32_SHIFTOP
+
+#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
+ instruction(dst, lhs, rhs); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+
+FP_BINOP(f32_add, add_s)
+FP_BINOP(f32_sub, sub_s)
+FP_BINOP(f32_mul, mul_s)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f64_add, add_d)
+FP_BINOP(f64_sub, sub_d)
+FP_BINOP(f64_mul, mul_d)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
+
+#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ Label true_label;
+ if (dst != lhs) {
+ ori(dst, zero_reg, 0x1);
+ }
+
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ }
+ // If not true, set on 0.
+ TurboAssembler::mov(dst, zero_reg);
+
+ if (dst != lhs) {
+ bind(&true_label);
+ } else {
+ Label end_label;
+ TurboAssembler::Branch(&end_label);
+ bind(&true_label);
+
+ ori(dst, zero_reg, 0x1);
+ bind(&end_label);
+ }
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index fd63198e249..d215f4178ce 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -2,180 +2,487 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("mips64 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+namespace liftoff {
+
+// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
+// is located at sp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+
+} // namespace liftoff
+
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ daddiu(sp, sp, 0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ DCHECK_LE(bytes, kMaxInt);
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.daddiu(sp, sp, -bytes);
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ DCHECK_LE(offset, kMaxInt);
+ ld(dst, liftoff::GetContextOperand());
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ lw(dst, MemOperand(dst, offset));
+ } else {
+ ld(dst, MemOperand(dst, offset));
+ }
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ sd(context, liftoff::GetContextOperand());
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ ld(dst, liftoff::GetContextOperand());
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ MemOperand src_op(src_addr, offset_imm);
+ if (offset_reg != no_reg) {
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(src, src_addr, offset_reg);
+ src_op = MemOperand(src, offset_imm);
+ }
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulwu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Uld(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::Ush(src.gp(), dst_op, t8);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::Usd(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ LiftoffRegister dst = reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ sw(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ sd(reg.gp(), dst);
+ break;
+ case kWasmF32:
+ swc1(reg.fp(), dst);
+ break;
+ case kWasmF64:
+ TurboAssembler::Sdc1(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ sw(tmp.gp(), dst);
+ break;
+ }
+ case kWasmI64: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ sd(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ lw(reg.gp(), src);
+ break;
+ case kWasmI64:
+ ld(reg.gp(), src);
+ break;
+ case kWasmF32:
+ lwc1(reg.fp(), src);
+ break;
+ case kWasmF64:
+ TurboAssembler::Ldc1(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- UNIMPLEMENTED(); \
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+
+// clang-format off
+I32_BINOP(add, addu)
+I32_BINOP(sub, subu)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ TurboAssembler::Daddu(dst, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+
+I32_SHIFTOP(shl, sllv)
+I32_SHIFTOP(sar, srav)
+I32_SHIFTOP(shr, srlv)
+
+#undef I32_SHIFTOP
+
+#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
+ instruction(dst, lhs, rhs); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+
+FP_BINOP(f32_add, add_s)
+FP_BINOP(f32_sub, sub_s)
+FP_BINOP(f32_mul, mul_s)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f64_add, add_d)
+FP_BINOP(f64_sub, sub_d)
+FP_BINOP(f64_mul, mul_d)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
+
+#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ Label true_label;
+ if (dst != lhs) {
+ ori(dst, zero_reg, 0x1);
+ }
+
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ }
+ // If not true, set on 0.
+ TurboAssembler::mov(dst, zero_reg);
+
+ if (dst != lhs) {
+ bind(&true_label);
+ } else {
+ Label end_label;
+ TurboAssembler::Branch(&end_label);
+ bind(&true_label);
+
+ ori(dst, zero_reg, 0x1);
+ bind(&end_label);
+ }
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 2d62d88decf..efbb6896d62 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#ifndef V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
+#define V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("ppc " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index eebb8e4720e..62145fadca4 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
+#define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("s390 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 2b3b750fc47..c1f316072da 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#ifndef V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
+#define V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -16,12 +16,20 @@ namespace wasm {
namespace liftoff {
+// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
+// is located at rbp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
inline Operand GetStackSlot(uint32_t index) {
- // rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
- // is located at rbp-24.
- constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(
- rbp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return Operand(rbp, -kFirstStackSlotOffset - offset);
+}
+
+inline Operand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return Operand(rbp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
@@ -31,25 +39,58 @@ inline Operand GetContextOperand() { return Operand(rbp, -16); }
// stack for a call to C.
static constexpr Register kCCallLastArgAddrReg = rax;
+inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
+ uint32_t offset_imm, LiftoffRegList pinned) {
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
+ if (offset == no_reg) return Operand(addr, offset_imm);
+ return Operand(addr, offset, times_1, offset_imm);
+}
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ sub_sp_32(0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
- subp(rsp, Immediate(bytes));
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.sub_sp_32(bytes);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
- if (value.to_i32() == 0) {
+ if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
- movl(reg.gp(), Immediate(value.to_i32()));
+ movl(reg.gp(), Immediate(value.to_i32(), rmode));
+ }
+ break;
+ case kWasmI64:
+ if (RelocInfo::IsNone(rmode)) {
+ TurboAssembler::Set(reg.gp(), value.to_i64());
+ } else {
+ movq(reg.gp(), value.to_i64(), rmode);
}
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
default:
UNREACHABLE();
}
@@ -79,42 +120,46 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- Operand src_op = offset_reg == no_reg
- ? Operand(src_addr, offset_imm)
- : Operand(src_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
- movl(src, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(src, src, offset_reg);
- }
- src_op = Operand(src_addr, src, times_1, 0);
- }
+ Operand src_op =
+ liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
movzxbl(dst.gp(), src_op);
break;
case LoadType::kI32Load8S:
movsxbl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load8S:
+ movsxbq(dst.gp(), src_op);
+ break;
case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
movzxwl(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
movsxwl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load16S:
+ movsxwq(dst.gp(), src_op);
+ break;
case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
movl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load32S:
+ movsxlq(dst.gp(), src_op);
+ break;
case LoadType::kI64Load:
movq(dst.gp(), src_op);
break;
case LoadType::kF32Load:
Movss(dst.fp(), src_op);
break;
+ case LoadType::kF64Load:
+ Movsd(dst.fp(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -124,28 +169,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- Operand dst_op = offset_reg == no_reg
- ? Operand(dst_addr, offset_imm)
- : Operand(dst_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register dst = GetUnusedRegister(kGpReg, pinned).gp();
- movl(dst, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(dst, dst, offset_reg);
- }
- dst_op = Operand(dst_addr, dst, times_1, 0);
- }
+ Operand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
movb(dst_op, src.gp());
break;
case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
movw(dst_op, src.gp());
break;
case StoreType::kI32Store:
+ case StoreType::kI64Store32:
movl(dst_op, src.gp());
break;
case StoreType::kI64Store:
@@ -154,72 +191,118 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kF32Store:
Movss(dst_op, src.fp());
break;
+ case StoreType::kF64Store:
+ Movsd(dst_op, src.fp());
+ break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
+ uint32_t caller_slot_idx,
+ ValueType type) {
Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- movq(dst.gp(), src);
- } else {
- Movsd(dst.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ movl(dst.gp(), src);
+ break;
+ case kWasmI64:
+ movq(dst.gp(), src);
+ break;
+ case kWasmF32:
+ Movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ Movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index);
- Spill(dst_index, reg);
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
}
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
- if (reg != dst) Move(dst, reg);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- // The caller should check that the registers are not equal. For most
- // occurences, this is already guaranteed, so no need to check within this
- // method.
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- movq(dst.gp(), src.gp());
+ if (type == kWasmI32) {
+ movl(dst, src);
} else {
- Movsd(dst.fp(), src.fp());
+ DCHECK_EQ(kWasmI64, type);
+ movq(dst, src);
}
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- Operand dst = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- movq(dst, reg.gp());
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ Movss(dst, src);
} else {
- Movsd(dst, reg.fp());
+ DCHECK_EQ(kWasmF64, type);
+ Movsd(dst, src);
+ }
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ movl(dst, reg.gp());
+ break;
+ case kWasmI64:
+ movq(dst, reg.gp());
+ break;
+ case kWasmF32:
+ Movss(dst, reg.fp());
+ break;
+ case kWasmF64:
+ Movsd(dst, reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
movl(dst, Immediate(value.to_i32()));
break;
+ case kWasmI64: {
+ // We could use movq, but this would require a temporary register. For
+ // simplicity (and to avoid potentially having to spill another register),
+ // we use two movl instructions.
+ int32_t low_word = static_cast<int32_t>(value.to_i64());
+ int32_t high_word = static_cast<int32_t>(value.to_i64() >> 32);
+ movl(dst, Immediate(low_word));
+ movl(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ break;
+ }
case kWasmF32:
movl(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
@@ -228,16 +311,31 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
Operand src = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- movq(reg.gp(), src);
- } else {
- Movsd(reg.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ movl(reg.gp(), src);
+ break;
+ case kWasmI64:
+ movq(reg.gp(), src);
+ break;
+ case kWasmF32:
+ Movss(reg.fp(), src);
+ break;
+ case kWasmF64:
+ Movsd(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ UNREACHABLE();
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
leal(dst, Operand(lhs, rhs, times_1, 0));
@@ -279,7 +377,8 @@ COMMUTATIVE_I32_BINOP(xor, xor)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
- void (Assembler::*emit_shift)(Register)) {
+ void (Assembler::*emit_shift)(Register),
+ LiftoffRegList pinned) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
assm->movl(kScratchRegister, lhs);
@@ -293,9 +392,10 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// register. If lhs is rcx, lhs is now the scratch register.
bool use_scratch = false;
if (rhs != rcx) {
- use_scratch =
- lhs == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
- if (use_scratch) assm->movl(kScratchRegister, rcx);
+ use_scratch = lhs == rcx ||
+ assm->cache_state()->is_used(LiftoffRegister(rcx)) ||
+ pinned.has(LiftoffRegister(rcx));
+ if (use_scratch) assm->movq(kScratchRegister, rcx);
if (lhs == rcx) lhs = kScratchRegister;
assm->movl(rcx, rhs);
}
@@ -305,27 +405,23 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
(assm->*emit_shift)(dst);
// Restore rcx if needed.
- if (use_scratch) assm->movl(rcx, kScratchRegister);
+ if (use_scratch) assm->movq(rcx, kScratchRegister);
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl);
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl);
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl);
-}
-
-bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- testl(src, src);
- setcc(zero, dst);
- movzxbl(dst, dst);
- return true;
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -419,18 +515,128 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
-void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorps(dst, src);
+ }
+}
+
+void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ addsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ subsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ subsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ mulsd(dst, rhs);
+ }
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- cmpl(lhs, rhs);
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorpd(dst, src);
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ switch (type) {
+ case kWasmI32:
+ cmpl(lhs, rhs);
+ break;
+ case kWasmI64:
+ cmpq(lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, kWasmI32);
+ testl(lhs, lhs);
+ }
+
j(cond, label);
}
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ if (rhs != no_reg) {
+ cmpl(lhs, rhs);
+ } else {
+ testl(lhs, lhs);
+ }
+
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+}
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label cont;
+ Label not_nan;
+
+ Ucomiss(lhs, rhs);
+ // IF PF is one, one of the operands was Nan. This needs special handling.
+ j(parity_odd, &not_nan, Label::kNear);
+ // Return 1 for f32.ne, 0 for all other cases.
+ if (cond == not_equal) {
+ movl(dst, Immediate(1));
+ } else {
+ xorl(dst, dst);
+ }
+ jmp(&cont, Label::kNear);
+ bind(&not_nan);
+
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+ bind(&cont);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) {
Register limit = GetUnusedRegister(kGpReg).gp();
LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
@@ -449,26 +655,37 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
+ uint32_t src_index, RegPairHalf) {
switch (src.loc()) {
case VarState::kStack:
pushq(liftoff::GetStackSlot(src_index));
break;
case VarState::kRegister:
- PushCallerFrameSlot(src.reg());
+ PushCallerFrameSlot(src.reg(), src.type());
break;
- case VarState::kI32Const:
+ case VarState::KIntConst:
pushq(Immediate(src.i32_const()));
break;
}
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- if (reg.is_gp()) {
- pushq(reg.gp());
- } else {
- subp(rsp, Immediate(kPointerSize));
- Movsd(Operand(rsp, 0), reg.fp());
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ case kWasmI64:
+ pushq(reg.gp());
+ break;
+ case kWasmF32:
+ subp(rsp, Immediate(kPointerSize));
+ Movss(Operand(rsp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ subp(rsp, Immediate(kPointerSize));
+ Movsd(Operand(rsp, 0), reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -552,6 +769,16 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ popq(kScratchRegister);
+ target = kScratchRegister;
+ }
+ call(target);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
movp(addr, rsp);
@@ -565,4 +792,4 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#endif // V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
diff --git a/chromium/v8/src/wasm/compilation-manager.cc b/chromium/v8/src/wasm/compilation-manager.cc
index a19a228f1fe..4779a9f4237 100644
--- a/chromium/v8/src/wasm/compilation-manager.cc
+++ b/chromium/v8/src/wasm/compilation-manager.cc
@@ -4,6 +4,7 @@
#include "src/wasm/compilation-manager.h"
#include "src/base/template-utils.h"
+#include "src/wasm/module-compiler.h"
#include "src/objects-inl.h"
@@ -46,6 +47,15 @@ std::shared_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
void CompilationManager::TearDown() { jobs_.clear(); }
+void CompilationManager::AbortAllJobs() {
+ // Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}.
+ std::vector<AsyncCompileJob*> copy;
+
+ for (auto entry : jobs_) copy.push_back(entry.first);
+
+ for (auto job : copy) job->Abort();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/compilation-manager.h b/chromium/v8/src/wasm/compilation-manager.h
index e359b11c265..279f3e872e0 100644
--- a/chromium/v8/src/wasm/compilation-manager.h
+++ b/chromium/v8/src/wasm/compilation-manager.h
@@ -9,12 +9,13 @@
#include "src/handles.h"
#include "src/isolate.h"
-#include "src/wasm/module-compiler.h"
namespace v8 {
namespace internal {
namespace wasm {
+class AsyncCompileJob;
+
// The CompilationManager manages a list of active WebAssembly compile jobs. The
// manager owns the memory of the compile jobs and can trigger the abortion of
// compile jobs. If the isolate tears down, the CompilationManager makes sure
@@ -29,11 +30,17 @@ class CompilationManager {
std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
- // Removes {job} from the list of active compile jobs.
+ // Remove {job} from the list of active compile jobs.
std::shared_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
+ // Cancel all AsyncCompileJobs and delete their state immediately.
void TearDown();
+ // Cancel all AsyncCompileJobs so that they are not processed any further,
+ // but delay the deletion of their state until all tasks accessing the
+ // AsyncCompileJob finish their execution.
+ void AbortAllJobs();
+
private:
AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
std::unique_ptr<byte[]> bytes_copy,
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index 04d918b0a4f..98aad07fcb8 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -37,6 +37,12 @@ struct WasmException;
return true; \
}())
+#define RET_ON_PROTOTYPE_OPCODE(flag) \
+ DCHECK(!this->module_ || !this->module_->is_asm_js()); \
+ if (!FLAG_experimental_wasm_##flag) { \
+ this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
+ }
+
#define CHECK_PROTOTYPE_OPCODE(flag) \
DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
@@ -50,25 +56,25 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(I32AtomicLoad, Uint32) \
- V(I32AtomicAdd, Uint32) \
- V(I32AtomicSub, Uint32) \
- V(I32AtomicAnd, Uint32) \
- V(I32AtomicOr, Uint32) \
- V(I32AtomicXor, Uint32) \
- V(I32AtomicExchange, Uint32) \
V(I32AtomicLoad8U, Uint8) \
- V(I32AtomicAdd8U, Uint8) \
- V(I32AtomicSub8U, Uint8) \
- V(I32AtomicAnd8U, Uint8) \
- V(I32AtomicOr8U, Uint8) \
- V(I32AtomicXor8U, Uint8) \
- V(I32AtomicExchange8U, Uint8) \
V(I32AtomicLoad16U, Uint16) \
+ V(I32AtomicAdd, Uint32) \
+ V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \
+ V(I32AtomicSub, Uint32) \
+ V(I32AtomicSub8U, Uint8) \
V(I32AtomicSub16U, Uint16) \
+ V(I32AtomicAnd, Uint32) \
+ V(I32AtomicAnd8U, Uint8) \
V(I32AtomicAnd16U, Uint16) \
+ V(I32AtomicOr, Uint32) \
+ V(I32AtomicOr8U, Uint8) \
V(I32AtomicOr16U, Uint16) \
+ V(I32AtomicXor, Uint32) \
+ V(I32AtomicXor8U, Uint8) \
V(I32AtomicXor16U, Uint16) \
+ V(I32AtomicExchange, Uint32) \
+ V(I32AtomicExchange8U, Uint8) \
V(I32AtomicExchange16U, Uint16) \
V(I32AtomicCompareExchange, Uint32) \
V(I32AtomicCompareExchange8U, Uint8) \
@@ -246,12 +252,12 @@ struct BreakDepthOperand {
template <Decoder::ValidateFlag validate>
struct CallIndirectOperand {
uint32_t table_index;
- uint32_t index;
+ uint32_t sig_index;
FunctionSig* sig = nullptr;
unsigned length = 0;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
- index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
if (!VALIDATE(table_index == 0)) {
@@ -648,7 +654,8 @@ class WasmDecoder : public Decoder {
uint32_t count = decoder->consume_u32v("local count");
if (decoder->failed()) return false;
- if ((count + type_list->size()) > kV8MaxWasmFunctionLocals) {
+ DCHECK_LE(type_list->size(), kV8MaxWasmFunctionLocals);
+ if (count > kV8MaxWasmFunctionLocals - type_list->size()) {
decoder->error(decoder->pc() - 1, "local count too large");
return false;
}
@@ -674,7 +681,7 @@ class WasmDecoder : public Decoder {
type = kWasmS128;
break;
}
- // else fall through to default.
+ V8_FALLTHROUGH;
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -789,10 +796,10 @@ class WasmDecoder : public Decoder {
inline bool Complete(const byte* pc, CallIndirectOperand<validate>& operand) {
if (!VALIDATE(module_ != nullptr &&
- operand.index < module_->signatures.size())) {
+ operand.sig_index < module_->signatures.size())) {
return false;
}
- operand.sig = module_->signatures[operand.index];
+ operand.sig = module_->signatures[operand.sig_index];
return true;
}
@@ -802,7 +809,7 @@ class WasmDecoder : public Decoder {
return false;
}
if (!Complete(pc, operand)) {
- errorf(pc + 1, "invalid signature index: #%u", operand.index);
+ errorf(pc + 1, "invalid signature index: #%u", operand.sig_index);
return false;
}
return true;
@@ -1097,6 +1104,7 @@ class WasmDecoder : public Decoder {
}
}
}
+ V8_FALLTHROUGH;
}
default:
V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
@@ -1534,8 +1542,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, operand, control_.size())) break;
Control* c = control_at(operand.depth);
if (!TypeCheckBreak(c)) break;
- CALL_INTERFACE_IF_REACHABLE(Br, c);
- BreakTo(c);
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
len = 1 + operand.length;
EndControl();
break;
@@ -1543,28 +1553,38 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrIf: {
BreakDepthOperand<validate> operand(this, this->pc_);
auto cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
if (!this->Validate(this->pc_, operand, control_.size())) break;
Control* c = control_at(operand.depth);
if (!TypeCheckBreak(c)) break;
- CALL_INTERFACE_IF_REACHABLE(BrIf, cond, c);
- BreakTo(c);
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrIf, cond, c);
+ c->br_merge()->reached = true;
+ }
len = 1 + operand.length;
break;
}
case kExprBrTable: {
BranchTableOperand<validate> operand(this, this->pc_);
BranchTableIterator<validate> iterator(this, operand);
- if (!this->Validate(this->pc_, operand, control_.size())) break;
auto key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, operand, control_.size())) break;
uint32_t br_arity = 0;
+ std::vector<bool> br_targets(control_.size());
while (iterator.has_next()) {
const uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
if (!VALIDATE(target < control_.size())) {
- this->error(pos, "improper branch in br_table");
+ this->errorf(pos,
+ "improper branch in br_table target %u (depth %u)",
+ i, target);
break;
}
+ // Avoid redundant break target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
// Check that label types match up.
Control* c = control_at(target);
uint32_t arity = c->br_merge()->arity;
@@ -1572,15 +1592,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
br_arity = arity;
} else if (!VALIDATE(br_arity == arity)) {
this->errorf(pos,
- "inconsistent arity in br_table target %d"
+ "inconsistent arity in br_table target %u"
" (previous was %u, this one %u)",
i, br_arity, arity);
}
if (!TypeCheckBreak(c)) break;
- BreakTo(c);
}
+ if (this->failed()) break;
+
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrTable, operand, key);
- CALL_INTERFACE_IF_REACHABLE(BrTable, operand, key);
+ for (uint32_t depth = control_depth(); depth-- > 0;) {
+ if (!br_targets[depth]) continue;
+ control_at(depth)->br_merge()->reached = true;
+ }
+ }
len = 1 + iterator.length();
EndControl();
@@ -2249,10 +2276,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- inline void BreakTo(Control* c) {
- if (control_.back().reachable()) c->br_merge()->reached = true;
- }
-
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!TypeCheckFallThru(c)) return;
@@ -2344,6 +2367,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ if (WasmOpcodes::IsSignExtensionOpcode(opcode)) {
+ RET_ON_PROTOTYPE_OPCODE(se);
+ }
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc
index 57ee78f91cf..217a5ff3b1a 100644
--- a/chromium/v8/src/wasm/function-body-decoder.cc
+++ b/chromium/v8/src/wasm/function-body-decoder.cc
@@ -369,13 +369,13 @@ class WasmGraphBuildingInterface {
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, nullptr, operand, args, returns, false);
+ DoCall(decoder, nullptr, operand.sig, operand.index, args, returns);
}
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, index.node, operand, args, returns, true);
+ DoCall(decoder, index.node, operand.sig, operand.sig_index, args, returns);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -782,30 +782,29 @@ class WasmGraphBuildingInterface {
return result;
}
- template <typename Operand>
void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder,
- TFNode* index_node, const Operand& operand, const Value args[],
- Value returns[], bool is_indirect) {
- int param_count = static_cast<int>(operand.sig->parameter_count());
+ TFNode* index_node, FunctionSig* sig, uint32_t index,
+ const Value args[], Value returns[]) {
+ int param_count = static_cast<int>(sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
arg_nodes[0] = index_node;
for (int i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
- if (is_indirect) {
- builder_->CallIndirect(operand.index, arg_nodes, &return_nodes,
+ if (index_node) {
+ builder_->CallIndirect(index, arg_nodes, &return_nodes,
decoder->position());
} else {
- builder_->CallDirect(operand.index, arg_nodes, &return_nodes,
+ builder_->CallDirect(index, arg_nodes, &return_nodes,
decoder->position());
}
- int return_count = static_cast<int>(operand.sig->return_count());
+ int return_count = static_cast<int>(sig->return_count());
for (int i = 0; i < return_count; ++i) {
returns[i].node = return_nodes[i];
}
// The invoked function could have used grow_memory, so we need to
- // reload mem_size and mem_start
+ // reload mem_size and mem_start.
LoadContextIntoSsa(ssa_env_);
}
};
@@ -1002,7 +1001,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
case kExprCallIndirect: {
CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
- os << " // sig #" << operand.index;
+ os << " // sig #" << operand.sig_index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
}
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index 4a2e610b99b..0a09feddf2b 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -207,18 +207,12 @@ class ModuleCompiler {
compiler::ModuleEnv* module_env,
ErrorThrower* thrower);
- static MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower,
- std::unique_ptr<WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- private:
MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes);
+ private:
Isolate* isolate_;
WasmModule* module_;
const std::shared_ptr<Counters> async_counters_;
@@ -268,7 +262,7 @@ class JSToWasmWrapperCache {
target->builtin_index() == Builtins::kIllegal ||
target->builtin_index() == Builtins::kWasmCompileLazy) {
it.rinfo()->set_target_address(
- isolate, wasm_code.GetCode()->instruction_start());
+ wasm_code.GetCode()->instruction_start());
break;
}
}
@@ -277,9 +271,9 @@ class JSToWasmWrapperCache {
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
DCHECK(!it.done());
it.rinfo()->set_js_to_wasm_address(
- isolate, wasm_code.is_null()
- ? nullptr
- : wasm_code.GetWasmCode()->instructions().start());
+ wasm_code.is_null()
+ ? nullptr
+ : wasm_code.GetWasmCode()->instructions().start());
}
return code;
}
@@ -308,11 +302,12 @@ class InstanceBuilder {
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback);
+ MaybeHandle<JSArrayBuffer> memory);
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build();
+ // Run the start function, if any.
+ bool ExecuteStartFunction();
private:
// Represents the initialized state of a table.
@@ -340,8 +335,8 @@ class InstanceBuilder {
Handle<WasmCompiledModule> compiled_module_;
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
+ Handle<WasmExportedFunction> start_function_;
JSToWasmWrapperCache js_to_wasm_cache_;
- WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
std::vector<SanitizedImport> sanitized_imports_;
const std::shared_ptr<Counters>& async_counters() const {
@@ -424,91 +419,6 @@ class InstanceBuilder {
Handle<WasmInstanceObject> instance);
};
-// TODO(titzer): move to wasm-objects.cc
-void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- // If a link to shared memory instances exists, update the list of memory
- // instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = owner->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- if (FLAG_wasm_jit_to_native) {
- if (native_module) {
- TRACE("Finalizing %zu {\n", native_module->instance_id);
- } else {
- TRACE("Finalized already cleaned up compiled module\n");
- }
- } else {
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
-
- if (compiled_module->use_trap_handler()) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- DisallowHeapAllocation no_gc;
- FixedArray* code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Code* code = Code::cast(code_table->get(i));
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(
- Smi::FromInt(trap_handler::kInvalidIndex));
- }
- }
- }
- }
- WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
-
- // Since the order of finalizers is not guaranteed, it can be the case
- // that {instance->compiled_module()->module()}, which is a
- // {Managed<WasmModule>} has been collected earlier in this GC cycle.
- // Weak references to this instance won't be cleared until
- // the next GC cycle, so we need to manually break some links (such as
- // the weak references from {WasmMemoryObject::instances}.
- if (owner->has_memory_object()) {
- Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
- Handle<WasmInstanceObject> instance(owner, isolate);
- WasmMemoryObject::RemoveInstance(isolate, memory, instance);
- }
-
- // weak_wasm_module may have been cleared, meaning the module object
- // was GC-ed. We still want to maintain the links between instances, to
- // release the WasmCompiledModule corresponding to the WasmModuleInstance
- // being finalized here.
- WasmModuleObject* wasm_module = nullptr;
- if (!weak_wasm_module->cleared()) {
- wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
- WasmCompiledModule* current_template = wasm_module->compiled_module();
-
- TRACE("chain before {\n");
- TRACE_CHAIN(current_template);
- TRACE("}\n");
-
- DCHECK(!current_template->has_prev_instance());
- if (current_template == compiled_module) {
- if (!compiled_module->has_next_instance()) {
- WasmCompiledModule::Reset(isolate, compiled_module);
- } else {
- WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(compiled_module->next_instance());
- }
- }
- }
-
- compiled_module->RemoveFromChain();
-
- if (wasm_module != nullptr) {
- TRACE("chain after {\n");
- TRACE_CHAIN(wasm_module->compiled_module());
- TRACE("}\n");
- }
- compiled_module->reset_weak_owning_instance();
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- TRACE("}\n");
-}
-
// This is used in ProcessImports.
// When importing other modules' exports, we need to ask
// the exporter for a WasmToWasm wrapper. To do that, we need to
@@ -517,8 +427,9 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
class SetOfNativeModuleModificationScopes final {
public:
void Add(NativeModule* module) {
- module->SetExecutable(false);
- native_modules_.insert(module);
+ if (native_modules_.insert(module).second) {
+ module->SetExecutable(false);
+ }
}
~SetOfNativeModuleModificationScopes() {
@@ -531,138 +442,28 @@ class SetOfNativeModuleModificationScopes final {
std::unordered_set<NativeModule*> native_modules_;
};
-} // namespace
-
-MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kAsmJsOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
+void EnsureWasmContextTable(WasmContext* wasm_context, int table_size) {
+ if (wasm_context->table) return;
+ wasm_context->table_size = table_size;
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ calloc(table_size, sizeof(IndirectFunctionTableEntry)));
+ for (int i = 0; i < table_size; i++) {
+ wasm_context->table[i].sig_id = kInvalidSigIndex;
}
-
- // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
- // in {CompileToModuleObject}.
- return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes, asm_js_script,
- asm_js_offset_table_bytes);
}
-MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
- ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kWasmOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
- // in {CompileToModuleObject}.
- return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes, Handle<Script>(),
- Vector<const byte>());
-}
+} // namespace
-MaybeHandle<WasmInstanceObject> SyncInstantiate(
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
- &InstanceFinalizer);
- return builder.Build();
-}
-
-MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
- MaybeHandle<WasmModuleObject> module = SyncCompile(isolate, thrower, bytes);
- DCHECK_EQ(thrower->error(), module.is_null());
- if (module.is_null()) return {};
-
- return SyncInstantiate(isolate, thrower, module.ToHandleChecked(), imports,
- memory);
-}
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise) {
- Local<Promise::Resolver> resolver =
- Utils::PromiseToLocal(promise).As<Promise::Resolver>();
- auto maybe = resolver->Reject(Utils::ToLocal(context),
- Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result) {
- Local<Promise::Resolver> resolver =
- Utils::PromiseToLocal(promise).As<Promise::Resolver>();
- auto maybe =
- resolver->Resolve(Utils::ToLocal(context), Utils::ToLocal(result));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
- MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
- isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ auto instance = builder.Build();
+ if (!instance.is_null() && builder.ExecuteStartFunction()) {
+ return instance;
}
- ResolvePromise(isolate, handle(isolate->context()), promise,
- instance_object.ToHandleChecked());
-}
-
-void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes, bool is_shared) {
- if (!FLAG_wasm_async_compilation) {
- // Asynchronous compilation disabled; fall back on synchronous compilation.
- ErrorThrower thrower(isolate, "WasmCompile");
- MaybeHandle<WasmModuleObject> module_object;
- if (is_shared) {
- // Make a copy of the wire bytes to avoid concurrent modification.
- std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- i::wasm::ModuleWireBytes bytes_copy(copy.get(),
- copy.get() + bytes.length());
- module_object = SyncCompile(isolate, &thrower, bytes_copy);
- } else {
- // The wire bytes are not shared, OK to use them directly.
- module_object = SyncCompile(isolate, &thrower, bytes);
- }
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- Handle<WasmModuleObject> module = module_object.ToHandleChecked();
- ResolvePromise(isolate, handle(isolate->context()), promise, module);
- return;
- }
-
- if (FLAG_wasm_test_streaming) {
- std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_engine()
- ->compilation_manager()
- ->StartStreamingCompilation(isolate, handle(isolate->context()),
- promise);
- streaming_decoder->OnBytesReceived(bytes.module_bytes());
- streaming_decoder->Finish();
- return;
- }
- // Make a copy of the wire bytes in case the user program changes them
- // during asynchronous compilation.
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
- isolate, std::move(copy), bytes.length(), handle(isolate->context()),
- promise);
+ return {};
}
Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
@@ -845,6 +646,7 @@ Address CompileLazy(Isolate* isolate) {
int func_index = static_cast<int>(result->index());
if (!exp_deopt_data_entry.is_null() && exp_deopt_data_entry->IsFixedArray()) {
+ int patched = 0;
Handle<FixedArray> exp_deopt_data =
Handle<FixedArray>::cast(exp_deopt_data_entry);
@@ -854,22 +656,36 @@ Address CompileLazy(Isolate* isolate) {
// See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
// of <export_table, index> followed by undefined values. Use this
// information here to patch all export tables.
+ Address target = result->instructions().start();
Handle<Foreign> foreign_holder =
- isolate->factory()->NewForeign(result->instructions().start(), TENURED);
+ isolate->factory()->NewForeign(target, TENURED);
for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
DisallowHeapAllocation no_gc;
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- exp_table->set(compiler::FunctionTableCodeOffset(exp_index),
- *foreign_holder);
+
+ if (WASM_CONTEXT_TABLES) {
+ // TODO(titzer): patching of function tables for lazy compilation
+ // only works for a single instance.
+ instance->wasm_context()->get()->table[exp_index].target = target;
+ } else {
+ int table_index = compiler::FunctionTableCodeOffset(exp_index);
+ DCHECK_EQ(Foreign::cast(exp_table->get(table_index))->foreign_address(),
+ lazy_stub_or_copy->instructions().start());
+
+ exp_table->set(table_index, *foreign_holder);
+ ++patched;
+ }
}
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
// After processing, remove the list of exported entries, such that we don't
// do the patching redundantly.
compiled_module->lazy_compile_data()->set(
func_index, isolate->heap()->undefined_value());
+ if (!WASM_CONTEXT_TABLES) {
+ DCHECK_LT(0, patched);
+ USE(patched);
+ }
}
return result->instructions().start();
@@ -880,8 +696,7 @@ compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
DisallowHeapAllocation no_gc;
WasmModule* module = compiled_module->shared()->module();
if (FLAG_wasm_jit_to_native) {
- NativeModule* native_module = compiled_module->GetNativeModule();
- compiler::ModuleEnv result(module, native_module->function_tables(),
+ compiler::ModuleEnv result(module, std::vector<Address>{},
std::vector<Handle<Code>>{},
BUILTIN_CODE(isolate, WasmCompileLazy),
compiled_module->use_trap_handler());
@@ -911,6 +726,20 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
compilation_timer.Start();
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
+
+ // TODO(wasm): Refactor this to only get the name if it is really needed for
+ // tracing / debugging.
+ std::string func_name;
+ {
+ WasmName name = Vector<const char>::cast(
+ compiled_module->shared()->GetRawFunctionName(func_index));
+ // Copy to std::string, because the underlying string object might move on
+ // the heap.
+ func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ }
+
+ TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index);
+
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
static_cast<uint32_t>(func_index));
@@ -937,16 +766,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
FunctionBody body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
- // TODO(wasm): Refactor this to only get the name if it is really needed for
- // tracing / debugging.
- std::string func_name;
- {
- WasmName name = Vector<const char>::cast(
- compiled_module->shared()->GetRawFunctionName(func_index));
- // Copy to std::string, because the underlying string object might move on
- // the heap.
- func_name.assign(name.start(), static_cast<size_t>(name.length()));
- }
+
ErrorThrower thrower(isolate, "WasmLazyCompile");
compiler::WasmCompilationUnit unit(isolate, &module_env,
compiled_module->GetNativeModule(), body,
@@ -989,7 +809,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
if (!code_wrapper.IsCodeObject()) {
const wasm::WasmCode* wasm_code = code_wrapper.GetWasmCode();
- Assembler::FlushICache(isolate, wasm_code->instructions().start(),
+ Assembler::FlushICache(wasm_code->instructions().start(),
wasm_code->instructions().size());
counters->wasm_generated_code_size()->Increment(
static_cast<int>(wasm_code->instructions().size()));
@@ -997,8 +817,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
static_cast<int>(wasm_code->reloc_info().size()));
} else {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
@@ -1062,8 +881,12 @@ const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager,
wasm_to_wasm->constant_pool(), \
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \
DCHECK(!it.done()); \
- it.rinfo()->set_js_to_wasm_address(isolate, \
- new_target->instructions().start()); \
+ DCHECK_EQ(WasmCode::kLazyStub, \
+ isolate->wasm_engine() \
+ ->code_manager() \
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) \
+ ->kind()); \
+ it.rinfo()->set_js_to_wasm_address(new_target->instructions().start()); \
it.next(); \
DCHECK(it.done()); \
} while (0)
@@ -1077,7 +900,7 @@ void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
DCHECK_EQ(Builtins::kWasmCompileLazy,
Code::GetCodeFromTargetAddress(it.rinfo()->target_address())
->builtin_index());
- it.rinfo()->set_target_address(isolate, new_target->instruction_start());
+ it.rinfo()->set_target_address(new_target->instruction_start());
#ifdef DEBUG
it.next();
DCHECK(it.done());
@@ -1169,8 +992,6 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
DCHECK(!non_compiled_functions.empty() || !wasm_to_wasm_callee.is_null());
}
- TRACE_LAZY("Compiling function %d.\n", func_to_return_idx);
-
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
CompileFunction(isolate, instance, func_to_return_idx);
@@ -1224,8 +1045,7 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
continue;
}
DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
- it.rinfo()->set_target_address(isolate,
- callee_compiled->instruction_start());
+ it.rinfo()->set_target_address(callee_compiled->instruction_start());
++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
@@ -1251,6 +1071,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
CompileFunction(isolate, instance, exported_func_index);
{
DisallowHeapAllocation no_gc;
+ int patched = 0;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
RelocIterator it(*js_to_wasm_caller,
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
@@ -1263,10 +1084,21 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
DCHECK_NOT_NULL(callee_compiled);
if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) {
WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled);
+ ++patched;
} else {
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
+ ->kind());
it.rinfo()->set_js_to_wasm_address(
- isolate, callee_compiled->instructions().start());
+ callee_compiled->instructions().start());
+ ++patched;
}
+ DCHECK_LT(0, patched);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ USE(patched);
+
#ifdef DEBUG
it.next();
DCHECK(it.done());
@@ -1313,6 +1145,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
->module()
->functions[caller_func_index]
.code.offset();
+ int num_non_compiled_functions = 0;
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1333,6 +1166,8 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
non_compiled_functions.push_back(Nothing<uint32_t>());
continue;
}
+ ++num_non_compiled_functions;
+
uint32_t called_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
DCHECK_LT(called_func_index,
@@ -1344,6 +1179,10 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
maybe_func_to_return_idx = Just(called_func_index);
}
}
+
+ TRACE_LAZY("Found %d non-compiled functions in caller.\n",
+ num_non_compiled_functions);
+ USE(num_non_compiled_functions);
}
uint32_t func_to_return_idx = 0;
@@ -1365,10 +1204,12 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx);
DCHECK_NOT_NULL(ret);
+ int patched = 0;
if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
// We can finish it all here by compiling the target wasm function and
// patching the wasm_to_wasm caller.
WasmPatchWasmToWasmWrapper(isolate, last_callee, ret);
+ ++patched;
} else {
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
@@ -1376,7 +1217,6 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
// Now patch the code object with all functions which are now compiled. This
// will pick up any other compiled functions, not only {ret}.
size_t idx = 0;
- size_t patched = 0;
for (RelocIterator
it(wasm_caller->instructions(), wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1388,13 +1228,22 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
const WasmCode* callee_compiled =
compiled_module->GetNativeModule()->GetCode(lookup);
if (callee_compiled->kind() != WasmCode::kFunction) continue;
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->wasm_call_address())
+ ->kind());
it.rinfo()->set_wasm_call_address(
- isolate, callee_compiled->instructions().start());
+ callee_compiled->instructions().start());
++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
- TRACE_LAZY("Patched %zu location(s) in the caller.\n", patched);
}
+
+ DCHECK_LT(0, patched);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ USE(patched);
+
return ret;
}
@@ -1679,8 +1528,7 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
}
}
-// static
-MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
@@ -1703,21 +1551,20 @@ bool compile_lazy(const WasmModule* module) {
(FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
}
-void FlushICache(Isolate* isolate, const wasm::NativeModule* native_module) {
+void FlushICache(const wasm::NativeModule* native_module) {
for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
const wasm::WasmCode* code = native_module->GetCode(i);
if (code == nullptr) continue;
- Assembler::FlushICache(isolate, code->instructions().start(),
+ Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
-void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
+void FlushICache(Handle<FixedArray> functions) {
for (int i = 0, e = functions->length(); i < e; ++i) {
if (!functions->get(i)->IsCode()) continue;
Code* code = Code::cast(functions->get(i));
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
}
@@ -1811,7 +1658,8 @@ WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
return WasmCodeWrapper(code);
}
// Clone the lazy builtin into the native module.
- return WasmCodeWrapper(native_module->CloneLazyBuiltinInto(func_index));
+ return WasmCodeWrapper(
+ native_module->CloneLazyBuiltinInto(code, func_index));
}
}
@@ -1825,7 +1673,7 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<FixedArray> code_table, wasm::NativeModule* native_module,
uint32_t func_index, Handle<FixedArray> export_table, int export_index,
- std::unordered_map<uint32_t, uint32_t>* table_export_count) {
+ std::unordered_map<uint32_t, uint32_t>* num_table_exports) {
if (!FLAG_wasm_jit_to_native) {
Handle<Code> code =
EnsureExportedLazyDeoptData(isolate, instance, code_table,
@@ -1845,10 +1693,10 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
// [#4: export table
// #5: export table index]
// ...
- // table_export_count counts down and determines the index for the new
+ // num_table_exports counts down and determines the index for the new
// export table entry.
- auto table_export_entry = table_export_count->find(func_index);
- DCHECK(table_export_entry != table_export_count->end());
+ auto table_export_entry = num_table_exports->find(func_index);
+ DCHECK(table_export_entry != num_table_exports->end());
DCHECK_LT(0, table_export_entry->second);
uint32_t this_idx = 2 * table_export_entry->second;
--table_export_entry->second;
@@ -1881,10 +1729,10 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
// [#2: export table
// #3: export table index]
// ...
- // table_export_count counts down and determines the index for the new
+ // num_table_exports counts down and determines the index for the new
// export table entry.
- auto table_export_entry = table_export_count->find(func_index);
- DCHECK(table_export_entry != table_export_count->end());
+ auto table_export_entry = num_table_exports->find(func_index);
+ DCHECK(table_export_entry != num_table_exports->end());
DCHECK_LT(0, table_export_entry->second);
--table_export_entry->second;
uint32_t this_idx = 2 * table_export_entry->second;
@@ -2192,19 +2040,17 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
return result;
}
-InstanceBuilder::InstanceBuilder(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback)
+InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
module_(module_object->compiled_module()->shared()->module()),
async_counters_(isolate->async_counters()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
- memory_(memory),
- instance_finalizer_callback_(instance_finalizer_callback) {
+ memory_(memory) {
sanitized_imports_.reserve(module_->import_table.size());
}
@@ -2222,12 +2068,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (thrower_->error()) return {};
// TODO(6792): No longer needed once WebAssembly code is off heap.
- // Use base::Optional to be able to close the scope before executing the start
- // function.
- base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), isolate_->heap());
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
// From here on, we expect the build pipeline to run without exiting to JS.
- // Exception is when we run the startup function.
DisallowJavascriptExecution no_js(isolate_);
// Record build time into correct bucket, then build instance.
TimedHistogramScope wasm_instantiate_module_time_scope(
@@ -2238,14 +2080,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Reuse the compiled module (if no owner), otherwise clone.
//--------------------------------------------------------------------------
- // TODO(mtrofin): remove code_table and old_code_table
+ // TODO(mtrofin): remove code_table
// when FLAG_wasm_jit_to_native is not needed
Handle<FixedArray> code_table;
Handle<FixedArray> wrapper_table;
- // We keep around a copy of the old code table, because we'll be replacing
- // imports for the new instance, and then we need the old imports to be
- // able to relocate.
- Handle<FixedArray> old_code_table;
MaybeHandle<WasmInstanceObject> owner;
// native_module is the one we're building now, old_module
// is the one we clone from. They point to the same place if
@@ -2284,7 +2122,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
} else {
TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = handle(original->code_table(), isolate_);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
code_table = handle(compiled_module_->code_table(), isolate_);
wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
@@ -2345,7 +2182,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
compiled_module_->GetNativeModule()->instance_id);
} else {
code_table = handle(compiled_module_->code_table(), isolate_);
- old_code_table = factory->CopyFixedArray(code_table);
TRACE("Reusing existing instance %d\n",
compiled_module_->instance_id());
}
@@ -2549,11 +2385,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
if (FLAG_wasm_jit_to_native) {
- FlushICache(isolate_, native_module);
+ FlushICache(native_module);
} else {
- FlushICache(isolate_, code_table);
+ FlushICache(code_table);
}
- FlushICache(isolate_, wrapper_table);
+ FlushICache(wrapper_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
@@ -2570,8 +2406,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Insert the compiled module into the weak list of compiled modules.
//--------------------------------------------------------------------------
{
- Handle<Object> global_handle =
- isolate_->global_handles()->Create(*instance);
Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
if (!owner.is_null()) {
// Publish the new instance to the instances chain.
@@ -2580,9 +2414,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
module_object_->set_compiled_module(*compiled_module_);
compiled_module_->set_weak_owning_instance(*link_to_owning_instance);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- instance_finalizer_callback_,
- v8::WeakCallbackType::kFinalizer);
+ WasmInstanceObject::InstallFinalizer(isolate_, instance);
}
//--------------------------------------------------------------------------
@@ -2607,41 +2439,20 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Execute the start function if one was specified.
+ // Create a wrapper for the start function.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
- HandleScope scope(isolate_);
int start_index = module_->start_function_index;
- WasmCodeWrapper startup_code = EnsureExportedLazyDeoptData(
+ WasmCodeWrapper start_code = EnsureExportedLazyDeoptData(
isolate_, instance, code_table, native_module, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index,
+ isolate_, module_, start_code, start_index,
compiled_module_->use_trap_handler());
- Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
+ start_function_ = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
- RecordStats(startup_code, counters());
- // Call the JS function.
- Handle<Object> undefined = factory->undefined_value();
- // Close the modification scopes, so we can execute the start function.
- modification_scope.reset();
- native_module_modification_scope.reset();
- {
- // We're OK with JS execution here. The instance is fully setup.
- AllowJavascriptExecution allow_js(isolate_);
- MaybeHandle<Object> retval =
- Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- DCHECK(isolate_->has_pending_exception());
- // It's unfortunate that the new instance is already linked in the
- // chain. However, we need to set up everything before executing the
- // startup unction, such that stack trace information can be generated
- // correctly already in the start function.
- return {};
- }
- }
+ RecordStats(start_code, counters());
}
DCHECK(!isolate_->has_pending_exception());
@@ -2655,6 +2466,22 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
return instance;
}
+bool InstanceBuilder::ExecuteStartFunction() {
+ if (start_function_.is_null()) return true; // No start function.
+
+ HandleScope scope(isolate_);
+ // Call the JS function.
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ return false;
+ }
+ return true;
+}
+
// Look up an import value in the {ffi_} object.
MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
Handle<String> module_name,
@@ -2939,6 +2766,11 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
i += kFunctionTableEntrySize) {
table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
+ WasmContext* wasm_context = nullptr;
+ if (WASM_CONTEXT_TABLES) {
+ wasm_context = instance->wasm_context()->get();
+ EnsureWasmContextTable(wasm_context, imported_cur_size);
+ }
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_cur_size; ++i) {
@@ -2956,7 +2788,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
auto target = Handle<WasmExportedFunction>::cast(val);
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
FunctionSig* sig = nullptr;
Handle<Code> code =
MakeWasmToWasmWrapper(isolate_, target, nullptr, &sig,
@@ -2968,34 +2800,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
table_instance.function_table->set(
compiler::FunctionTableCodeOffset(i), *code);
} else {
- const wasm::WasmCode* exported_code =
- target->GetWasmCode().GetWasmCode();
- wasm::NativeModule* exporting_module = exported_code->owner();
Handle<WasmInstanceObject> imported_instance =
handle(target->instance());
- imported_wasm_instances.Set(imported_instance, imported_instance);
+ const wasm::WasmCode* exported_code =
+ target->GetWasmCode().GetWasmCode();
FunctionSig* sig = imported_instance->module()
->functions[exported_code->index()]
.sig;
- wasm::WasmCode* wrapper_code =
- exporting_module->GetExportedWrapper(exported_code->index());
- if (wrapper_code == nullptr) {
- WasmContext* other_context =
- imported_instance->wasm_context()->get();
- Handle<Code> wrapper = compiler::CompileWasmToWasmWrapper(
- isolate_, target->GetWasmCode(), sig,
- reinterpret_cast<Address>(other_context));
- set_of_native_module_scopes.Add(exporting_module);
- wrapper_code = exporting_module->AddExportedWrapper(
- wrapper, exported_code->index());
- }
- int sig_index = module_->signature_map.Find(sig);
- Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
- wrapper_code->instructions().start(), TENURED);
- table_instance.function_table->set(
- compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
- table_instance.function_table->set(
- compiler::FunctionTableCodeOffset(i), *foreign_holder);
+ auto& entry = wasm_context->table[i];
+ entry.context = imported_instance->wasm_context()->get();
+ entry.sig_id = module_->signature_map.Find(sig);
+ entry.target = exported_code->instructions().start();
}
}
@@ -3187,6 +3002,20 @@ void InstanceBuilder::ProcessExports(
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
Handle<JSFunction>::null());
+
+ // If an imported WebAssembly function gets exported, the exported function
+ // has to be identical to to imported function. Therefore we put all
+ // imported WebAssembly functions into the js_wrappers_ list.
+ for (int index = 0, end = static_cast<int>(module_->import_table.size());
+ index < end; ++index) {
+ WasmImport& import = module_->import_table[index];
+ if (import.kind == kExternalFunction) {
+ Handle<Object> value = sanitized_imports_[index].value;
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
+ }
+ }
+ }
}
Handle<JSObject> exports_object;
@@ -3345,12 +3174,6 @@ void InstanceBuilder::InitializeTables(
Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization) {
size_t function_table_count = module_->function_tables.size();
- std::vector<GlobalHandleAddress> new_function_tables(function_table_count);
-
- wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
- std::vector<GlobalHandleAddress> empty;
- std::vector<GlobalHandleAddress>& old_function_tables =
- FLAG_wasm_jit_to_native ? native_module->function_tables() : empty;
Handle<FixedArray> old_function_tables_gc =
FLAG_wasm_jit_to_native
@@ -3372,9 +3195,7 @@ void InstanceBuilder::InitializeTables(
instance->set_function_tables(*rooted_function_tables);
- if (FLAG_wasm_jit_to_native) {
- DCHECK_EQ(old_function_tables.size(), new_function_tables.size());
- } else {
+ if (!FLAG_wasm_jit_to_native) {
DCHECK_EQ(old_function_tables_gc->length(),
new_function_tables_gc->length());
}
@@ -3386,6 +3207,11 @@ void InstanceBuilder::InitializeTables(
int num_table_entries = static_cast<int>(table.initial_size);
int table_size = compiler::kFunctionTableEntrySize * num_table_entries;
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance->wasm_context()->get();
+ EnsureWasmContextTable(wasm_context, num_table_entries);
+ }
+
if (table_instance.function_table.is_null()) {
// Create a new dispatch table if necessary.
table_instance.function_table =
@@ -3427,24 +3253,18 @@ void InstanceBuilder::InitializeTables(
GlobalHandleAddress new_func_table_addr = global_func_table.address();
GlobalHandleAddress old_func_table_addr;
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
int_index, new_func_table_addr);
old_func_table_addr =
WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
- } else {
- new_function_tables[int_index] = new_func_table_addr;
-
- old_func_table_addr = old_function_tables[int_index];
+ code_specialization->RelocatePointer(old_func_table_addr,
+ new_func_table_addr);
}
- code_specialization->RelocatePointer(old_func_table_addr,
- new_func_table_addr);
}
- if (FLAG_wasm_jit_to_native) {
- native_module->function_tables() = new_function_tables;
- } else {
+ if (!WASM_CONTEXT_TABLES) {
compiled_module_->set_function_tables(*new_function_tables_gc);
}
}
@@ -3499,10 +3319,12 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
- uint32_t sig_index = module_->signature_ids[function->sig_index];
+
+ // Update the local dispatch table first.
+ uint32_t sig_id = module_->signature_ids[function->sig_index];
table_instance.function_table->set(
compiler::FunctionTableSigOffset(table_index),
- Smi::FromInt(sig_index));
+ Smi::FromInt(sig_id));
WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
isolate_, instance, code_table, native_module, func_index,
table_instance.function_table, table_index, &num_table_exports);
@@ -3517,7 +3339,17 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
table_instance.function_table->set(
compiler::FunctionTableCodeOffset(table_index),
*value_to_update_with);
+
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance->wasm_context()->get();
+ auto& entry = wasm_context->table[table_index];
+ entry.sig_id = sig_id;
+ entry.context = wasm_context;
+ entry.target = wasm_code.instructions().start();
+ }
+
if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
// TODO(titzer): We compile JS->wasm wrappers for functions are
@@ -3546,31 +3378,10 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
}
table_instance.js_wrappers->set(table_index,
*js_wrappers_[func_index]);
- // When updating dispatch tables, we need to provide a wasm-to-wasm
- // wrapper for wasm_code - unless wasm_code is already a wrapper. If
- // it's a wasm-to-js wrapper, we don't need to construct a
- // wasm-to-wasm wrapper because there's no context switching required.
- // The remaining case is that it's a wasm-to-wasm wrapper, in which
- // case it's already doing "the right thing", and wrapping it again
- // would be redundant.
- if (func_index >= module_->num_imported_functions) {
- value_to_update_with = GetOrCreateIndirectCallWrapper(
- isolate_, instance, wasm_code, func_index, function->sig);
- } else {
- if (wasm_code.IsCodeObject()) {
- DCHECK(wasm_code.GetCode()->kind() == Code::WASM_TO_JS_FUNCTION ||
- wasm_code.GetCode()->kind() ==
- Code::WASM_TO_WASM_FUNCTION);
- } else {
- DCHECK(wasm_code.GetWasmCode()->kind() ==
- WasmCode::kWasmToJsWrapper ||
- wasm_code.GetWasmCode()->kind() ==
- WasmCode::kWasmToWasmWrapper);
- }
- }
- WasmTableObject::UpdateDispatchTables(table_instance.table_object,
- table_index, function->sig,
- value_to_update_with);
+ // UpdateDispatchTables() should update this instance as well.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_instance.table_object, table_index, function->sig,
+ instance, wasm_code, func_index);
}
}
}
@@ -3686,14 +3497,18 @@ void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
- RejectPromise(isolate_, context_, thrower, module_promise_);
+ MaybeHandle<Object> promise_result =
+ JSPromise::Reject(module_promise_, thrower.Reify());
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
- ResolvePromise(isolate_, context_, module_promise_, result);
+ MaybeHandle<Object> promise_result =
+ JSPromise::Resolve(module_promise_, result);
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
// A closure to run a compilation step (either as foreground or background
diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h
index 3a8b1972d67..b41ca28ceaf 100644
--- a/chromium/v8/src/wasm/module-compiler.h
+++ b/chromium/v8/src/wasm/module-compiler.h
@@ -23,34 +23,20 @@ namespace wasm {
class ModuleCompiler;
class WasmCode;
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory);
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes,
- bool is_shared);
-
-V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
- Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports);
-
-V8_EXPORT_PRIVATE void CompileJsToWasmWrappers(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- Counters* counters);
+V8_EXPORT_PRIVATE
+void CompileJsToWasmWrappers(Isolate* isolate,
+ Handle<WasmCompiledModule> compiled_module,
+ Counters* counters);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes);
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index 010f1912630..109b2fc230a 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -270,7 +270,7 @@ class ModuleDecoderImpl : public Decoder {
pc_ = end_; // On error, terminate section decoding loop.
}
- void DumpModule(const ModuleResult& result) {
+ void DumpModule(const Vector<const byte> module_bytes) {
std::string path;
if (FLAG_dump_wasm_module_path) {
path = FLAG_dump_wasm_module_path;
@@ -280,12 +280,13 @@ class ModuleDecoderImpl : public Decoder {
}
}
// File are named `HASH.{ok,failed}.wasm`.
- size_t hash = base::hash_range(start_, end_);
+ size_t hash = base::hash_range(module_bytes.start(), module_bytes.end());
EmbeddedVector<char, 32> buf;
- SNPrintF(buf, "%016zx.%s.wasm", hash, result.ok() ? "ok" : "failed");
+ SNPrintF(buf, "%016zx.%s.wasm", hash, ok() ? "ok" : "failed");
std::string name(buf.start());
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
- if (fwrite(start_, end_ - start_, 1, wasm_file) != 1) {
+ if (fwrite(module_bytes.start(), module_bytes.length(), 1, wasm_file) !=
+ 1) {
OFStream os(stderr);
os << "Error while dumping wasm file" << std::endl;
}
@@ -848,7 +849,6 @@ class ModuleDecoderImpl : public Decoder {
// Copy error code and location.
result.MoveErrorFrom(intermediate_result_);
}
- if (FLAG_dump_wasm_module) DumpModule(result);
return result;
}
@@ -856,6 +856,7 @@ class ModuleDecoderImpl : public Decoder {
ModuleResult DecodeModule(Isolate* isolate, bool verify_functions = true) {
StartDecoding(isolate);
uint32_t offset = 0;
+ Vector<const byte> orig_bytes(start(), end() - start());
DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
if (failed()) {
return FinishDecoding(verify_functions);
@@ -878,6 +879,8 @@ class ModuleDecoderImpl : public Decoder {
section_iter.advance(true);
}
+ if (FLAG_dump_wasm_module) DumpModule(orig_bytes);
+
if (decoder.failed()) {
return decoder.toResult<std::unique_ptr<WasmModule>>(nullptr);
}
diff --git a/chromium/v8/src/wasm/wasm-api.cc b/chromium/v8/src/wasm/wasm-api.cc
deleted file mode 100644
index 4c51dc54cdc..00000000000
--- a/chromium/v8/src/wasm/wasm-api.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-api.h"
-
-#include "src/isolate-inl.h"
-#include "src/isolate.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-ScheduledErrorThrower::~ScheduledErrorThrower() {
- // There should never be both a pending and a scheduled exception.
- DCHECK(!isolate()->has_scheduled_exception() ||
- !isolate()->has_pending_exception());
- // Don't throw another error if there is already a scheduled error.
- if (isolate()->has_scheduled_exception()) {
- Reset();
- } else if (isolate()->has_pending_exception()) {
- Reset();
- isolate()->OptionalRescheduleException(false);
- } else if (error()) {
- isolate()->ScheduleThrow(*Reify());
- }
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-api.h b/chromium/v8/src/wasm/wasm-api.h
deleted file mode 100644
index 464cdfa6f1d..00000000000
--- a/chromium/v8/src/wasm/wasm-api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_API_H_
-#define V8_WASM_API_H_
-
-#include "src/wasm/wasm-result.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Like an ErrorThrower, but turns all pending exceptions into scheduled
-// exceptions when going out of scope. Use this in API methods.
-// Note that pending exceptions are not necessarily created by the ErrorThrower,
-// but e.g. by the wasm start function. There might also be a scheduled
-// exception, created by another API call (e.g. v8::Object::Get). But there
-// should never be both pending and scheduled exceptions.
-class V8_EXPORT_PRIVATE ScheduledErrorThrower : public ErrorThrower {
- public:
- ScheduledErrorThrower(v8::Isolate* isolate, const char* context)
- : ScheduledErrorThrower(reinterpret_cast<Isolate*>(isolate), context) {}
-
- ScheduledErrorThrower(Isolate* isolate, const char* context)
- : ErrorThrower(isolate, context) {}
-
- ~ScheduledErrorThrower();
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_API_H_
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index 2b8f3097339..25f61d2e120 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -30,7 +30,6 @@ namespace internal {
namespace wasm {
namespace {
-size_t native_module_ids = 0;
#if V8_TARGET_ARCH_X64
#define __ masm->
@@ -71,10 +70,11 @@ void PatchTrampolineAndStubCalls(
#else
Address new_target = old_target;
#endif
- it.rinfo()->set_target_address(nullptr, new_target, SKIP_WRITE_BARRIER,
+ it.rinfo()->set_target_address(new_target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
}
}
+
} // namespace
DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
@@ -212,18 +212,21 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate,
instructions().start() + instruction_size, nullptr);
os << "\n";
- Object* source_positions_or_undef =
- owner_->compiled_module()->source_positions()->get(index());
- if (!source_positions_or_undef->IsUndefined(isolate)) {
- os << "Source positions:\n pc offset position\n";
- for (SourcePositionTableIterator it(
- ByteArray::cast(source_positions_or_undef));
- !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
+ // Anonymous functions don't have source positions.
+ if (!IsAnonymous()) {
+ Object* source_positions_or_undef =
+ owner_->compiled_module()->source_positions()->get(index());
+ if (!source_positions_or_undef->IsUndefined(isolate)) {
+ os << "Source positions:\n pc offset position\n";
+ for (SourcePositionTableIterator it(
+ ByteArray::cast(source_positions_or_undef));
+ !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
}
- os << "\n";
}
os << "RelocInfo (size = " << reloc_size_ << ")\n";
@@ -268,10 +271,12 @@ WasmCode::~WasmCode() {
}
}
+base::AtomicNumber<size_t> NativeModule::next_id_;
+
NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* mem,
WasmCodeManager* code_manager)
- : instance_id(native_module_ids++),
+ : instance_id(next_id_.Increment(1)),
code_table_(num_functions),
num_imported_functions_(num_imports),
free_memory_(reinterpret_cast<Address>(mem->address()),
@@ -296,11 +301,6 @@ void NativeModule::ResizeCodeTableForTest(size_t last_index) {
source_positions = isolate->factory()->CopyFixedArrayAndGrow(
source_positions, grow_by, TENURED);
compiled_module()->set_source_positions(*source_positions);
- Handle<FixedArray> handler_table(compiled_module()->handler_table(),
- isolate);
- handler_table = isolate->factory()->CopyFixedArrayAndGrow(handler_table,
- grow_by, TENURED);
- compiled_module()->set_handler_table(*handler_table);
}
}
@@ -318,19 +318,24 @@ WasmCode* NativeModule::AddOwnedCode(
std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
// both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
Address executable_buffer = AllocateForCode(orig_instructions.size());
- if (executable_buffer == nullptr) return nullptr;
+ if (executable_buffer == nullptr) {
+ V8::FatalProcessOutOfMemory("NativeModule::AddOwnedCode");
+ UNREACHABLE();
+ }
memcpy(executable_buffer, orig_instructions.start(),
orig_instructions.size());
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
reloc_size, this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, std::move(protected_instructions), is_liftoff));
+ safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), is_liftoff));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
@@ -339,8 +344,8 @@ WasmCode* NativeModule::AddOwnedCode(
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
code, owned_code_comparer_);
owned_code_.insert(insert_before, std::move(code));
- wasm_code_manager_->FlushICache(ret->instructions().start(),
- ret->instructions().size());
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
return ret;
}
@@ -348,12 +353,10 @@ WasmCode* NativeModule::AddOwnedCode(
WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, kind);
- SetCodeTable(index, ret);
+ code_table_[index] = ret;
ret->index_ = Just(index);
compiled_module()->source_positions()->set(static_cast<int>(index),
code->source_position_table());
- compiled_module()->handler_table()->set(static_cast<int>(index),
- code->handler_table());
return ret;
}
@@ -364,15 +367,11 @@ WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
return ret;
}
-WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
- DCHECK_NULL(lazy_builtin_);
- lazy_builtin_ = AddAnonymousCode(code, WasmCode::kLazyStub);
-
+void NativeModule::SetLazyBuiltin(Handle<Code> code) {
+ WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
- SetCodeTable(i, lazy_builtin_);
+ code_table_[i] = lazy_builtin;
}
-
- return lazy_builtin_;
}
WasmCompiledModule* NativeModule::compiled_module() const {
@@ -392,13 +391,16 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
reloc_info.reset(new byte[code->relocation_size()]);
memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
}
+ std::shared_ptr<ProtectedInstructions> protected_instructions(
+ new ProtectedInstructions(0));
WasmCode* ret = AddOwnedCode(
{code->instruction_start(),
static_cast<size_t>(code->instruction_size())},
std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
Nothing<uint32_t>(), kind, code->constant_pool_offset(),
(code->has_safepoint_info() ? code->stack_slots() : 0),
- (code->has_safepoint_info() ? code->safepoint_table_offset() : 0), {});
+ (code->has_safepoint_info() ? code->safepoint_table_offset() : 0),
+ code->handler_table_offset(), protected_instructions, false);
if (ret == nullptr) return nullptr;
intptr_t delta = ret->instructions().start() - code->instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
@@ -411,8 +413,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
Code* call_target =
Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
- it.rinfo()->set_target_address(nullptr,
- GetLocalAddressFor(handle(call_target)),
+ it.rinfo()->set_target_address(GetLocalAddressFor(handle(call_target)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
@@ -427,7 +428,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
- size_t safepoint_table_offset,
+ size_t safepoint_table_offset, size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
std::unique_ptr<byte[]> reloc_info;
@@ -441,11 +442,11 @@ WasmCode* NativeModule::AddCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
WasmCode::kFunction, desc.instr_size - desc.constant_pool_size,
- frame_slots, safepoint_table_offset, std::move(protected_instructions),
- is_liftoff);
+ frame_slots, safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), is_liftoff);
if (ret == nullptr) return nullptr;
- SetCodeTable(index, ret);
+ code_table_[index] = ret;
// TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@@ -467,12 +468,12 @@ WasmCode* NativeModule::AddCode(
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(nullptr, GetLocalAddressFor(handle(code)),
+ it.rinfo()->set_target_address(GetLocalAddressFor(handle(code)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(
- origin->isolate(), p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = ret->instructions().start() - desc.buffer;
it.rinfo()->apply(delta);
@@ -490,8 +491,7 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
masm.GetCode(nullptr, &code_desc);
WasmCode* wasm_code = AddOwnedCode(
{code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
- Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, {});
- if (wasm_code == nullptr) return nullptr;
+ Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, 0, {}, false);
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
return ret;
@@ -560,7 +560,7 @@ void NativeModule::Link(uint32_t index) {
if (target == nullptr) continue;
Address target_addr = target->instructions().start();
DCHECK_NOT_NULL(target);
- it.rinfo()->set_wasm_call_address(nullptr, target_addr,
+ it.rinfo()->set_wasm_call_address(target_addr,
ICacheFlushMode::SKIP_ICACHE_FLUSH);
}
}
@@ -655,29 +655,29 @@ WasmCode* NativeModule::Lookup(Address pc) {
return nullptr;
}
-WasmCode* NativeModule::CloneLazyBuiltinInto(uint32_t index) {
- DCHECK_NOT_NULL(lazy_builtin());
- WasmCode* ret = CloneCode(lazy_builtin());
- SetCodeTable(index, ret);
+WasmCode* NativeModule::CloneLazyBuiltinInto(const WasmCode* code,
+ uint32_t index) {
+ DCHECK_EQ(wasm::WasmCode::kLazyStub, code->kind());
+ WasmCode* ret = CloneCode(code);
+ code_table_[index] = ret;
ret->index_ = Just(index);
return ret;
}
-bool NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
+void NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
for (auto& pair : other->trampolines_) {
Address key = pair.first;
Address local =
GetLocalAddressFor(handle(Code::GetCodeFromTargetAddress(key)));
- if (local == nullptr) return false;
+ DCHECK_NOT_NULL(local);
trampolines_.emplace(std::make_pair(key, local));
}
for (auto& pair : other->stubs_) {
uint32_t key = pair.first;
WasmCode* clone = CloneCode(pair.second);
- if (!clone) return false;
+ DCHECK_NOT_NULL(clone);
stubs_.emplace(std::make_pair(key, clone));
}
- return true;
}
WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
@@ -692,10 +692,10 @@ WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
original_code->reloc_info().size(), original_code->index_,
original_code->kind(), original_code->constant_pool_offset_,
original_code->stack_slots(), original_code->safepoint_table_offset_,
- original_code->protected_instructions_);
- if (ret == nullptr) return nullptr;
+ original_code->handler_table_offset_,
+ original_code->protected_instructions_, original_code->is_liftoff());
if (!ret->IsAnonymous()) {
- SetCodeTable(ret->index(), ret);
+ code_table_[ret->index()] = ret;
}
intptr_t delta =
ret->instructions().start() - original_code->instructions().start();
@@ -707,10 +707,6 @@ WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
return ret;
}
-void NativeModule::SetCodeTable(uint32_t index, wasm::WasmCode* code) {
- code_table_[index] = code;
-}
-
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
wasm_code_manager_->FreeNativeModuleMemories(this);
@@ -889,11 +885,7 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
TRACE_HEAP("%zu cloned from %zu\n", ret->instance_id, instance_id);
if (!ret) return ret;
- if (lazy_builtin() != nullptr) {
- ret->lazy_builtin_ = ret->CloneCode(lazy_builtin());
- }
-
- if (!ret->CloneTrampolinesAndStubs(this)) return nullptr;
+ ret->CloneTrampolinesAndStubs(this);
std::unordered_map<Address, Address, AddressHasher> reverse_lookup;
for (auto& pair : trampolines_) {
@@ -917,20 +909,29 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
WasmCode* old_stub = stubs_.find(pair.first)->second;
PatchTrampolineAndStubCalls(old_stub, new_stub, reverse_lookup);
}
- if (lazy_builtin_ != nullptr) {
- PatchTrampolineAndStubCalls(lazy_builtin_, ret->lazy_builtin_,
- reverse_lookup);
- }
+ WasmCode* anonymous_lazy_builtin = nullptr;
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
const WasmCode* original_code = GetCode(i);
switch (original_code->kind()) {
case WasmCode::kLazyStub: {
- if (original_code->IsAnonymous()) {
- ret->SetCodeTable(i, ret->lazy_builtin());
- } else {
- if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
+ // Use the first anonymous lazy compile stub hit in this loop as the
+ // canonical copy for all further ones by remembering it locally via
+ // the {anonymous_lazy_builtin} variable. All non-anonymous such stubs
+ // are just cloned directly via {CloneLazyBuiltinInto} below.
+ if (!original_code->IsAnonymous()) {
+ WasmCode* new_code = ret->CloneLazyBuiltinInto(original_code, i);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ break;
+ }
+ if (anonymous_lazy_builtin == nullptr) {
+ WasmCode* new_code = ret->CloneCode(original_code);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ anonymous_lazy_builtin = new_code;
}
+ ret->code_table_[i] = anonymous_lazy_builtin;
} break;
case WasmCode::kFunction: {
WasmCode* new_code = ret->CloneCode(original_code);
@@ -941,7 +942,6 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
UNREACHABLE();
}
}
- ret->specialization_data_ = specialization_data_;
return ret;
}
@@ -1009,22 +1009,17 @@ intptr_t WasmCodeManager::remaining_uncommitted() const {
return remaining_uncommitted_.Value();
}
-void WasmCodeManager::FlushICache(Address start, size_t size) {
- Assembler::FlushICache(reinterpret_cast<internal::Isolate*>(isolate_), start,
- size);
-}
-
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
- if (native_module_) {
+ if (native_module_ && (native_module_->modification_scope_depth_++) == 0) {
bool success = native_module_->SetExecutable(false);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (native_module_) {
+ if (native_module_ && (native_module_->modification_scope_depth_--) == 1) {
bool success = native_module_->SetExecutable(true);
CHECK(success);
}
@@ -1039,8 +1034,8 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
*(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
#else
- rinfo->set_target_address(nullptr, reinterpret_cast<Address>(tag),
- SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ rinfo->set_target_address(reinterpret_cast<Address>(tag), SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
#endif
}
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 3e2a0918fb8..e398f1bcfd4 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_HEAP_H_
-#define V8_WASM_HEAP_H_
+#ifndef V8_WASM_WASM_CODE_MANAGER_H_
+#define V8_WASM_WASM_CODE_MANAGER_H_
#include <functional>
#include <list>
@@ -111,6 +111,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
Address constant_pool() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
+ size_t handler_table_offset() const { return handler_table_offset_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return is_liftoff_; }
@@ -120,6 +121,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
void ResetTrapHandlerIndex();
const ProtectedInstructions& protected_instructions() const {
+ // TODO(mstarzinger): Code that doesn't have trapping instruction should
+ // not be required to have this vector, make it possible to be null.
+ DCHECK_NOT_NULL(protected_instructions_);
return *protected_instructions_.get();
}
@@ -139,9 +143,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
NativeModule* owner, Maybe<uint32_t> index, Kind kind,
size_t constant_pool_offset, uint32_t stack_slots,
- size_t safepoint_table_offset,
+ size_t safepoint_table_offset, size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
- bool is_liftoff = false)
+ bool is_liftoff)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
reloc_size_(reloc_size),
@@ -151,6 +155,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
+ handler_table_offset_(handler_table_offset),
protected_instructions_(std::move(protected_instructions)),
is_liftoff_(is_liftoff) {}
@@ -169,6 +174,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
// since there may be stack/register tagged values for large number
// conversions.
size_t safepoint_table_offset_ = 0;
+ size_t handler_table_offset_ = 0;
intptr_t trap_handler_index_ = -1;
std::shared_ptr<ProtectedInstructions> protected_instructions_;
bool is_liftoff_;
@@ -189,9 +195,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<NativeModule> Clone();
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
- size_t safepoint_table_offset,
- std::unique_ptr<ProtectedInstructions>,
- bool is_liftoff = false);
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ std::unique_ptr<ProtectedInstructions>, bool is_liftoff);
// A way to copy over JS-allocated code. This is because we compile
// certain wrappers using a different pipeline.
@@ -204,11 +209,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddInterpreterWrapper(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
- // calling SetLazyBuiltin. It will initialize the code table with it, and the
- // lazy_builtin_ field. The latter is used when creating entries for exported
+ // calling SetLazyBuiltin. It will initialize the code table with it. Copies
+ // of it might be cloned from them later when creating entries for exported
// functions and indirect callable functions, so that they may be identified
// by the runtime.
- WasmCode* SetLazyBuiltin(Handle<Code> code);
+ void SetLazyBuiltin(Handle<Code> code);
// ExportedWrappers are WasmToWasmWrappers for functions placed on import
// tables. We construct them as-needed.
@@ -219,8 +224,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t FunctionCount() const;
WasmCode* GetCode(uint32_t index) const;
- WasmCode* lazy_builtin() const { return lazy_builtin_; }
-
// We special-case lazy cloning because we currently rely on making copies
// of the lazy builtin, to be able to identify, in the runtime, which function
// the lazy builtin is a placeholder of. If we used trampolines, we would call
@@ -229,7 +232,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// builtin. The logic for seeking though frames would change, though.
// TODO(mtrofin): perhaps we can do exactly that - either before or after
// this change.
- WasmCode* CloneLazyBuiltinInto(uint32_t);
+ WasmCode* CloneLazyBuiltinInto(const WasmCode* code, uint32_t);
bool SetExecutable(bool executable);
@@ -239,24 +242,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
void LinkAll();
void Link(uint32_t index);
- // TODO(mtrofin): needed until we sort out exception handlers and
- // source positions, which are still on the GC-heap.
+ // TODO(mstarzinger): needed until we sort out source positions, which are
+ // still on the GC-heap.
WasmCompiledModule* compiled_module() const;
void SetCompiledModule(Handle<WasmCompiledModule>);
- // Shorthand accessors to the specialization data content.
- std::vector<wasm::GlobalHandleAddress>& function_tables() {
- return specialization_data_.function_tables;
- }
-
- std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
- return specialization_data_.empty_function_tables;
- }
-
uint32_t num_imported_functions() const { return num_imported_functions_; }
- size_t num_function_tables() const {
- return specialization_data_.empty_function_tables.size();
- }
size_t committed_memory() const { return committed_memory_; }
const size_t instance_id = 0;
@@ -266,6 +257,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class WasmCodeManager;
friend class NativeModuleSerializer;
friend class NativeModuleDeserializer;
+ friend class NativeModuleModificationScope;
struct WasmCodeUniquePtrComparer {
bool operator()(const std::unique_ptr<WasmCode>& a,
@@ -276,7 +268,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
}
};
- static base::AtomicNumber<uint32_t> next_id_;
+ static base::AtomicNumber<size_t> next_id_;
NativeModule(const NativeModule&) = delete;
NativeModule& operator=(const NativeModule&) = delete;
NativeModule(uint32_t num_functions, uint32_t num_imports,
@@ -295,11 +287,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t reloc_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions>,
- bool is_liftoff = false);
- void SetCodeTable(uint32_t, wasm::WasmCode*);
+ bool is_liftoff);
WasmCode* CloneCode(const WasmCode*);
- bool CloneTrampolinesAndStubs(const NativeModule* other);
+ void CloneTrampolinesAndStubs(const NativeModule* other);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>);
@@ -319,20 +311,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
DisjointAllocationPool allocated_memory_;
std::list<VirtualMemory> owned_memory_;
WasmCodeManager* wasm_code_manager_;
- wasm::WasmCode* lazy_builtin_ = nullptr;
base::Mutex allocation_mutex_;
Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
bool is_executable_ = false;
-
- // Specialization data that needs to be serialized and cloned.
- // Keeping it groupped together because it makes cloning of all these
- // elements a 1 line copy.
- struct {
- std::vector<wasm::GlobalHandleAddress> function_tables;
- std::vector<wasm::GlobalHandleAddress> empty_function_tables;
- } specialization_data_;
+ int modification_scope_depth_ = 0;
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
@@ -356,10 +340,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCode* GetCodeFromStartAddress(Address pc) const;
intptr_t remaining_uncommitted() const;
- // TODO(mtrofin): replace this API with an alternative that is Isolate-
- // independent.
- void FlushICache(Address start, size_t size);
-
private:
friend class NativeModule;
@@ -416,4 +396,5 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo);
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_CODE_MANAGER_H_
diff --git a/chromium/v8/src/wasm/wasm-code-specialization.cc b/chromium/v8/src/wasm/wasm-code-specialization.cc
index 416d1d600ab..f261f44991a 100644
--- a/chromium/v8/src/wasm/wasm-code-specialization.cc
+++ b/chromium/v8/src/wasm/wasm-code-specialization.cc
@@ -83,32 +83,33 @@ bool IsAtWasmDirectCallTarget(RelocIterator& it) {
} // namespace
-CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
- : isolate_(isolate) {}
+CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
CodeSpecialization::~CodeSpecialization() {}
void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
DCHECK_NOT_NULL(new_context);
- DCHECK_NULL(new_wasm_context_address);
- new_wasm_context_address = new_context;
+ DCHECK_NULL(new_wasm_context_address_);
+ new_wasm_context_address_ = new_context;
}
void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
- DCHECK(old_function_table_size == 0 && new_function_table_size == 0);
- old_function_table_size = old_size;
- new_function_table_size = new_size;
+ DCHECK(old_function_table_size_ == 0 && new_function_table_size_ == 0);
+ old_function_table_size_ = old_size;
+ new_function_table_size_ = new_size;
}
void CodeSpecialization::RelocateDirectCalls(
Handle<WasmInstanceObject> instance) {
- DCHECK(relocate_direct_calls_instance.is_null());
+ DCHECK(relocate_direct_calls_instance_.is_null());
DCHECK(!instance.is_null());
- relocate_direct_calls_instance = instance;
+ relocate_direct_calls_instance_ = instance;
}
void CodeSpecialization::RelocatePointer(Address old_ptr, Address new_ptr) {
- pointers_to_relocate.insert(std::make_pair(old_ptr, new_ptr));
+ DCHECK_EQ(0, pointers_to_relocate_.count(old_ptr));
+ DCHECK_EQ(0, pointers_to_relocate_.count(new_ptr));
+ pointers_to_relocate_.insert(std::make_pair(old_ptr, new_ptr));
}
bool CodeSpecialization::ApplyToWholeInstance(
@@ -147,14 +148,14 @@ bool CodeSpecialization::ApplyToWholeInstance(
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
- if (new_wasm_context_address) {
+ if (new_wasm_context_address_) {
reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
}
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
- // calls, the instance registered for that (relocate_direct_calls_instance)
+ // calls, the instance registered for that (relocate_direct_calls_instance_)
// should match the instance we currently patch (instance).
- if (!relocate_direct_calls_instance.is_null()) {
- DCHECK_EQ(instance, *relocate_direct_calls_instance);
+ if (!relocate_direct_calls_instance_.is_null()) {
+ DCHECK_EQ(instance, *relocate_direct_calls_instance_);
reloc_mode |=
RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
: RelocInfo::CODE_TARGET);
@@ -170,24 +171,23 @@ bool CodeSpecialization::ApplyToWholeInstance(
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::WASM_CONTEXT_REFERENCE:
- it.rinfo()->set_wasm_context_reference(export_wrapper->GetIsolate(),
- new_wasm_context_address,
+ it.rinfo()->set_wasm_context_reference(new_wasm_context_address_,
icache_flush_mode);
break;
case RelocInfo::JS_TO_WASM_CALL: {
DCHECK(FLAG_wasm_jit_to_native);
const WasmCode* new_code = native_module->GetCode(exp.index);
- it.rinfo()->set_js_to_wasm_address(
- nullptr, new_code->instructions().start(), SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_js_to_wasm_address(new_code->instructions().start(),
+ SKIP_ICACHE_FLUSH);
} break;
case RelocInfo::CODE_TARGET: {
DCHECK(!FLAG_wasm_jit_to_native);
// Ignore calls to other builtins like ToNumber.
if (!IsAtWasmDirectCallTarget(it)) continue;
Code* new_code = Code::cast(code_table->get(exp.index));
- it.rinfo()->set_target_address(
- new_code->GetIsolate(), new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} break;
default:
UNREACHABLE();
@@ -210,9 +210,9 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
DCHECK_EQ(wasm::WasmCode::kFunction, code.GetWasmCode()->kind());
}
- bool patch_table_size = old_function_table_size || new_function_table_size;
- bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
- bool reloc_pointers = pointers_to_relocate.size() > 0;
+ bool patch_table_size = old_function_table_size_ || new_function_table_size_;
+ bool reloc_direct_calls = !relocate_direct_calls_instance_.is_null();
+ bool reloc_pointers = pointers_to_relocate_.size() > 0;
int reloc_mode = 0;
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
@@ -253,7 +253,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
// bytes to find the new compiled function.
size_t offset = it.rinfo()->pc() - code.GetCode()->instruction_start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
*code.GetCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
@@ -262,10 +262,9 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
FixedArray* code_table =
- relocate_direct_calls_instance->compiled_module()->code_table();
+ relocate_direct_calls_instance_->compiled_module()->code_table();
Code* new_code = Code::cast(code_table->get(called_func_index));
- it.rinfo()->set_target_address(new_code->GetIsolate(),
- new_code->instruction_start(),
+ it.rinfo()->set_target_address(new_code->instruction_start(),
UPDATE_WRITE_BARRIER, icache_flush_mode);
changed = true;
} break;
@@ -280,7 +279,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
size_t offset =
it.rinfo()->pc() - code.GetWasmCode()->instructions().start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
code.GetWasmCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
@@ -289,23 +288,24 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
const WasmCode* new_code = native_module->GetCode(called_func_index);
- it.rinfo()->set_wasm_call_address(
- isolate_, new_code->instructions().start(), icache_flush_mode);
+ it.rinfo()->set_wasm_call_address(new_code->instructions().start(),
+ icache_flush_mode);
changed = true;
} break;
case RelocInfo::WASM_GLOBAL_HANDLE: {
DCHECK(reloc_pointers);
Address old_ptr = it.rinfo()->global_handle();
- if (pointers_to_relocate.count(old_ptr) == 1) {
- Address new_ptr = pointers_to_relocate[old_ptr];
- it.rinfo()->set_global_handle(isolate_, new_ptr, icache_flush_mode);
+ auto entry = pointers_to_relocate_.find(old_ptr);
+ if (entry != pointers_to_relocate_.end()) {
+ Address new_ptr = entry->second;
+ it.rinfo()->set_global_handle(new_ptr, icache_flush_mode);
changed = true;
}
} break;
case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
DCHECK(patch_table_size);
it.rinfo()->update_wasm_function_table_size_reference(
- isolate_, old_function_table_size, new_function_table_size,
+ old_function_table_size_, new_function_table_size_,
icache_flush_mode);
changed = true;
break;
diff --git a/chromium/v8/src/wasm/wasm-code-specialization.h b/chromium/v8/src/wasm/wasm-code-specialization.h
index 8f68677fbf8..bed565cf05d 100644
--- a/chromium/v8/src/wasm/wasm-code-specialization.h
+++ b/chromium/v8/src/wasm/wasm-code-specialization.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CODE_SPECIALIZATION_H_
-#define V8_WASM_CODE_SPECIALIZATION_H_
+#ifndef V8_WASM_WASM_CODE_SPECIALIZATION_H_
+#define V8_WASM_WASM_CODE_SPECIALIZATION_H_
#include "src/assembler.h"
#include "src/identity-map.h"
@@ -47,19 +47,18 @@ class CodeSpecialization {
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
- Isolate* isolate_;
- Address new_wasm_context_address = 0;
+ Address new_wasm_context_address_ = 0;
- uint32_t old_function_table_size = 0;
- uint32_t new_function_table_size = 0;
+ uint32_t old_function_table_size_ = 0;
+ uint32_t new_function_table_size_ = 0;
- Handle<WasmInstanceObject> relocate_direct_calls_instance;
+ Handle<WasmInstanceObject> relocate_direct_calls_instance_;
- std::map<Address, Address> pointers_to_relocate;
+ std::unordered_map<Address, Address> pointers_to_relocate_;
};
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CODE_SPECIALIZATION_H_
+#endif // V8_WASM_WASM_CODE_SPECIALIZATION_H_
diff --git a/chromium/v8/src/wasm/wasm-code-wrapper.cc b/chromium/v8/src/wasm/wasm-code-wrapper.cc
index 9256391543f..c9eee24f3d6 100644
--- a/chromium/v8/src/wasm/wasm-code-wrapper.cc
+++ b/chromium/v8/src/wasm/wasm-code-wrapper.cc
@@ -7,7 +7,7 @@
#include "src/objects-inl.h"
#include "src/objects/code.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -59,10 +59,17 @@ Vector<uint8_t> WasmCodeWrapper::instructions() const {
static_cast<size_t>(code->instruction_size())};
}
-Handle<WasmInstanceObject> WasmCodeWrapper::wasm_instance() const {
- return IsCodeObject()
- ? handle(WasmInstanceObject::GetOwningInstanceGC(*GetCode()))
- : handle(WasmInstanceObject::GetOwningInstance(GetWasmCode()));
+WasmInstanceObject* WasmCodeWrapper::wasm_instance() const {
+ if (IsCodeObject()) {
+ WeakCell* weak_instance =
+ WeakCell::cast(GetCode()->deoptimization_data()->get(0));
+ return WasmInstanceObject::cast(weak_instance->value());
+ }
+ return GetWasmCode()->owner()->compiled_module()->owning_instance();
+}
+
+WasmContext* WasmCodeWrapper::wasm_context() const {
+ return wasm_instance()->wasm_context()->get();
}
} // namespace internal
diff --git a/chromium/v8/src/wasm/wasm-code-wrapper.h b/chromium/v8/src/wasm/wasm-code-wrapper.h
index 7d978152f1c..d51bc085aae 100644
--- a/chromium/v8/src/wasm/wasm-code-wrapper.h
+++ b/chromium/v8/src/wasm/wasm-code-wrapper.h
@@ -1,8 +1,8 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CODE_WRAPPER_H_
-#define V8_WASM_CODE_WRAPPER_H_
+#ifndef V8_WASM_WASM_CODE_WRAPPER_H_
+#define V8_WASM_WASM_CODE_WRAPPER_H_
#include "src/handles.h"
@@ -13,6 +13,7 @@ class WasmCode;
} // namespace wasm
class Code;
+struct WasmContext;
class WasmInstanceObject;
// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
@@ -30,7 +31,8 @@ class WasmCodeWrapper {
Vector<uint8_t> instructions() const;
- Handle<WasmInstanceObject> wasm_instance() const;
+ WasmInstanceObject* wasm_instance() const;
+ WasmContext* wasm_context() const;
#ifdef ENABLE_DISASSEMBLER
void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
@@ -45,4 +47,4 @@ class WasmCodeWrapper {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CODE_WRAPPER_H_
+#endif // V8_WASM_WASM_CODE_WRAPPER_H_
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index 5e7ce1e4f58..932501d776a 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CONSTANTS_H_
-#define V8_WASM_CONSTANTS_H_
+#ifndef V8_WASM_WASM_CONSTANTS_H_
+#define V8_WASM_WASM_CONSTANTS_H_
namespace v8 {
namespace internal {
@@ -80,4 +80,4 @@ constexpr WasmCodePosition kNoCodePosition = -1;
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CONSTANTS_H_
+#endif // V8_WASM_WASM_CONSTANTS_H_
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index 87995df4e6d..08d436ffa44 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -438,7 +438,6 @@ class InterpreterHandle {
Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = debug_info->GetIsolate();
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<JSObject> local_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
@@ -497,8 +496,6 @@ class InterpreterHandle {
Handle<JSArray> GetScopeDetails(Address frame_pointer, int frame_index,
Handle<WasmDebugInfo> debug_info) {
auto frame = GetInterpretedFrame(frame_pointer, frame_index);
- Isolate* isolate = debug_info->GetIsolate();
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<FixedArray> global_scope =
isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
@@ -591,8 +588,7 @@ void RedirectCallsitesInCodeGC(Code* code, CodeRelocationMapGC& map) {
Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
Handle<Code>* new_target = map.Find(target);
if (!new_target) continue;
- it.rinfo()->set_target_address(code->GetIsolate(),
- (*new_target)->instruction_start());
+ it.rinfo()->set_target_address((*new_target)->instruction_start());
}
}
@@ -606,7 +602,7 @@ void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
Address target = it.rinfo()->target_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
- it.rinfo()->set_wasm_call_address(isolate, new_target->second);
+ it.rinfo()->set_wasm_call_address(new_target->second);
}
}
@@ -618,7 +614,7 @@ void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
Address target = it.rinfo()->js_to_wasm_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
- it.rinfo()->set_js_to_wasm_address(isolate, new_target->second);
+ it.rinfo()->set_js_to_wasm_address(new_target->second);
}
}
@@ -685,7 +681,9 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
auto interp_handle =
Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
debug_info->set(kInterpreterHandleIndex, *interp_handle);
- return interp_handle->get()->interpreter();
+ auto ret = interp_handle->get()->interpreter();
+ ret->SetCallIndirectTestMode();
+ return ret;
}
bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
@@ -850,12 +848,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
- Address context_address = reinterpret_cast<Address>(
- debug_info->wasm_instance()->has_memory_object()
- ? debug_info->wasm_instance()->wasm_context()
- : nullptr);
- Handle<Code> new_entry_code =
- compiler::CompileCWasmEntry(isolate, sig, context_address);
+ Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
Handle<SharedFunctionInfo> shared =
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index 4c84b70dbd7..460742d15a4 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/wasm/wasm-engine.h"
+
#include "src/objects-inl.h"
#include "src/wasm/module-compiler.h"
@@ -18,6 +19,106 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
return result.ok();
}
+MaybeHandle<WasmModuleObject> WasmEngine::SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kAsmJsOrigin);
+ CHECK(!result.failed());
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
+ asm_js_script, asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kWasmOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
+ Handle<Script>(), Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
+ memory);
+}
+
+void WasmEngine::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+ Handle<WasmInstanceObject> instance = instance_object.ToHandleChecked();
+ MaybeHandle<Object> result = JSPromise::Resolve(promise, instance);
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+}
+
+void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes, bool is_shared) {
+ if (!FLAG_wasm_async_compilation) {
+ // Asynchronous compilation disabled; fall back on synchronous compilation.
+ ErrorThrower thrower(isolate, "WasmCompile");
+ MaybeHandle<WasmModuleObject> module_object;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_object = SyncCompile(isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_object = SyncCompile(isolate, &thrower, bytes);
+ }
+ if (thrower.error()) {
+ MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ MaybeHandle<Object> result = JSPromise::Resolve(promise, module);
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+
+ if (FLAG_wasm_test_streaming) {
+ std::shared_ptr<StreamingDecoder> streaming_decoder =
+ isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(isolate, handle(isolate->context()),
+ promise);
+ streaming_decoder->OnBytesReceived(bytes.module_bytes());
+ streaming_decoder->Finish();
+ return;
+ }
+ // Make a copy of the wire bytes in case the user program changes them
+ // during asynchronous compilation.
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(), handle(isolate->context()),
+ promise);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h
index bf06b47ed7b..8a698c83b90 100644
--- a/chromium/v8/src/wasm/wasm-engine.h
+++ b/chromium/v8/src/wasm/wasm-engine.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef WASM_ENGINE_H_
-#define WASM_ENGINE_H_
+#ifndef V8_WASM_WASM_ENGINE_H_
+#define V8_WASM_WASM_ENGINE_H_
#include <memory>
@@ -14,8 +14,14 @@
namespace v8 {
namespace internal {
+class WasmModuleObject;
+class WasmInstanceObject;
+
namespace wasm {
+class ErrorThrower;
+struct ModuleWireBytes;
+
// The central data structure that represents an engine instance capable of
// loading, instantiating, and executing WASM code.
class V8_EXPORT_PRIVATE WasmEngine {
@@ -23,8 +29,44 @@ class V8_EXPORT_PRIVATE WasmEngine {
explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
: code_manager_(std::move(code_manager)) {}
+ // Synchronously validates the given bytes that represent an encoded WASM
+ // module.
bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes);
+ // Synchronously compiles the given bytes that represent a translated
+ // asm.js module.
+ MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ // Synchronously compiles the given bytes that represent an encoded WASM
+ // module.
+ MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes);
+
+ // Synchronously instantiate the given WASM module with the given imports.
+ // If the module represents an asm.js module, then the supplied {memory}
+ // should be used as the memory of the instance.
+ MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+ // Begin an asynchronous compilation of the given bytes that represent an
+ // encoded WASM module, placing the result in the supplied {promise}.
+ // The {is_shared} flag indicates if the bytes backing the module could
+ // be shared across threads, i.e. could be concurrently modified.
+ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes, bool is_shared);
+
+ // Begin an asynchronous instantiation of the given WASM module, placing the
+ // result in the supplied {promise}.
+ void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports);
+
CompilationManager* compilation_manager() { return &compilation_manager_; }
WasmCodeManager* code_manager() const { return code_manager_.get(); }
@@ -43,4 +85,4 @@ class V8_EXPORT_PRIVATE WasmEngine {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_ENGINE_H_
diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h
index dea620338a0..d44f5b242f4 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.h
+++ b/chromium/v8/src/wasm/wasm-external-refs.h
@@ -4,8 +4,8 @@
#include <stdint.h>
-#ifndef WASM_EXTERNAL_REFS_H
-#define WASM_EXTERNAL_REFS_H
+#ifndef V8_WASM_WASM_EXTERNAL_REFS_H_
+#define V8_WASM_WASM_EXTERNAL_REFS_H_
namespace v8 {
namespace internal {
@@ -77,4 +77,5 @@ void call_trap_callback_for_testing();
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_EXTERNAL_REFS_H_
diff --git a/chromium/v8/src/wasm/wasm-interpreter.cc b/chromium/v8/src/wasm/wasm-interpreter.cc
index 2f8fb0bf4a6..3bcb1b5ef62 100644
--- a/chromium/v8/src/wasm/wasm-interpreter.cc
+++ b/chromium/v8/src/wasm/wasm-interpreter.cc
@@ -477,6 +477,17 @@ int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
int64_t output;
if (!float64_to_int64_wrapper(&a, &output)) {
@@ -485,6 +496,17 @@ int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
uint64_t output;
if (!float32_to_uint64_wrapper(&a, &output)) {
@@ -493,6 +515,17 @@ uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
uint64_t output;
if (!float64_to_uint64_wrapper(&a, &output)) {
@@ -501,6 +534,17 @@ uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64UConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<int64_t>(a);
}
@@ -924,6 +968,9 @@ class CodeMap {
// This handle is set and reset by the SetInstanceObject() /
// ClearInstanceObject() method, which is used by the HeapObjectsScope.
Handle<WasmInstanceObject> instance_;
+ // TODO(wasm): Remove this testing wart. It is needed because interpreter
+ // entry stubs are not generated in testing the interpreter in cctests.
+ bool call_indirect_through_module_ = false;
public:
CodeMap(Isolate* isolate, const WasmModule* module,
@@ -942,6 +989,12 @@ class CodeMap {
}
}
+ bool call_indirect_through_module() { return call_indirect_through_module_; }
+
+ void set_call_indirect_through_module(bool val) {
+ call_indirect_through_module_ = val;
+ }
+
void SetInstanceObject(Handle<WasmInstanceObject> instance) {
DCHECK(instance_.is_null());
instance_ = instance;
@@ -987,12 +1040,34 @@ class CodeMap {
}
InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
+ uint32_t saved_index;
+ USE(saved_index);
if (table_index >= module_->function_tables.size()) return nullptr;
+ // Mask table index for SSCA mitigation.
+ saved_index = table_index;
+ table_index &=
+ static_cast<int32_t>((table_index - module_->function_tables.size()) &
+ ~static_cast<int32_t>(table_index)) >>
+ 31;
+ DCHECK_EQ(table_index, saved_index);
const WasmIndirectFunctionTable* table =
&module_->function_tables[table_index];
if (entry_index >= table->values.size()) return nullptr;
+ // Mask entry_index for SSCA mitigation.
+ saved_index = entry_index;
+ entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
+ ~static_cast<int32_t>(entry_index)) >>
+ 31;
+ DCHECK_EQ(entry_index, saved_index);
uint32_t index = table->values[entry_index];
if (index >= interpreter_code_.size()) return nullptr;
+ // Mask index for SSCA mitigation.
+ saved_index = index;
+ index &= static_cast<int32_t>((index - interpreter_code_.size()) &
+ ~static_cast<int32_t>(index)) >>
+ 31;
+ DCHECK_EQ(index, saved_index);
+
return GetCode(index);
}
@@ -1543,9 +1618,21 @@ class ThreadImpl {
case kExprI32UConvertSatF64:
Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
return true;
+ case kExprI64SConvertSatF32:
+ Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64UConvertSatF32:
+ Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64SConvertSatF64:
+ Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
+ return true;
+ case kExprI64UConvertSatF64:
+ Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
+ return true;
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
return false;
@@ -1912,7 +1999,7 @@ class ThreadImpl {
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, operand.index);
+ CallIndirectFunction(0, entry_index, operand.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -2071,6 +2158,9 @@ class ThreadImpl {
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
Push(WasmValue(result));
len = 1 + operand.length;
+ // Treat one grow_memory instruction like 1000 other instructions,
+ // because it is a really expensive operation.
+ if (max > 0) max = std::max(0, max - 1000);
break;
}
case kExprMemorySize: {
@@ -2152,8 +2242,8 @@ class ThreadImpl {
#undef EXECUTE_UNOP
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
@@ -2386,18 +2476,24 @@ class ThreadImpl {
arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer data pointer in a handle. As this is an aligned
- // pointer, to the GC it will look like a Smi.
+ // Wrap the arg_buffer data pointer and the WasmContext* in a handle. As
+ // this is an aligned pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
+ static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
+ "code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
+ WasmContext* context = code.wasm_context();
+ Handle<Object> context_obj(reinterpret_cast<Object*>(context), isolate);
+ DCHECK(!context_obj->IsHeapObject());
args[compiler::CWasmEntryParameters::kCodeObject] =
code.IsCodeObject()
? Handle<Object>::cast(code.GetCode())
: Handle<Object>::cast(isolate->factory()->NewForeign(
code.GetWasmCode()->instructions().start(), TENURED));
+ args[compiler::CWasmEntryParameters::kWasmContext] = context_obj;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -2466,13 +2562,19 @@ class ThreadImpl {
DCHECK(AllowHeapAllocation::IsAllowed());
if (code->kind() == wasm::WasmCode::kFunction) {
- DCHECK_EQ(code->owner()->compiled_module()->owning_instance(),
- codemap()->instance());
+ if (code->owner()->compiled_module()->owning_instance() !=
+ codemap()->instance()) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
+ }
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
+
if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- } else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
+ }
+ if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper ||
+ code->kind() == wasm::WasmCode::kInterpreterStub) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
@@ -2501,23 +2603,8 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- bool no_func_tables = !codemap()->has_instance();
- if (FLAG_wasm_jit_to_native) {
- no_func_tables = no_func_tables || codemap()
- ->instance()
- ->compiled_module()
- ->GetNativeModule()
- ->function_tables()
- .empty();
- } else {
- no_func_tables =
- no_func_tables ||
- !codemap()->instance()->compiled_module()->has_function_tables();
- }
- if (no_func_tables) {
- // No instance. Rely on the information stored in the WasmModule.
- // TODO(wasm): This is only needed for testing. Refactor testing to use
- // the same paths as production.
+ if (codemap()->call_indirect_through_module()) {
+ // Rely on the information stored in the WasmModule.
InterpreterCode* code =
codemap()->GetIndirectCode(table_index, entry_index);
if (!code) return {ExternalCallResult::INVALID_FUNC};
@@ -2551,7 +2638,7 @@ class ThreadImpl {
DCHECK_EQ(canonical_sig_index,
module()->signature_map.Find(module()->signatures[sig_index]));
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
// Check signature.
FixedArray* fun_tables = compiled_module->function_tables();
if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
@@ -2578,33 +2665,23 @@ class ThreadImpl {
target_gc = Code::cast(fun_table->get(
compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
- // Check signature.
- std::vector<GlobalHandleAddress>& fun_tables =
- compiled_module->GetNativeModule()->function_tables();
- if (table_index >= fun_tables.size()) {
+ // The function table is stored in the wasm context.
+ // TODO(wasm): the wasm interpreter currently supports only one table.
+ CHECK_EQ(0, table_index);
+ // Bounds check against table size.
+ if (entry_index >= wasm_context_->table_size) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to the function table, from the
- // address stored in the respective table of tables.
- FixedArray* fun_table =
- *reinterpret_cast<FixedArray**>(fun_tables[table_index]);
- // Function tables store <smi, code> pairs.
- int num_funcs_in_table =
- fun_table->length() / compiler::kFunctionTableEntrySize;
- if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(fun_table->get(
- compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ // Signature check.
+ int32_t entry_sig = wasm_context_->table[entry_index].sig_id;
+ if (entry_sig != static_cast<int32_t>(canonical_sig_index)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
-
+ // Load the target address (first instruction of code).
+ Address first_instr = wasm_context_->table[entry_index].target;
+ // TODO(titzer): load the wasm context instead of relying on the
+ // target code being specialized to the target instance.
// Get code object.
- Address first_instr =
- Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
- static_cast<int>(entry_index))))
- ->foreign_address();
target =
isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
first_instr);
@@ -2897,6 +2974,10 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
internals_->codemap_.SetFunctionCode(function, start, end);
}
+void WasmInterpreter::SetCallIndirectTestMode() {
+ internals_->codemap_.set_call_indirect_through_module(true);
+}
+
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
// Create some dummy structures, to avoid special-casing the implementation
diff --git a/chromium/v8/src/wasm/wasm-interpreter.h b/chromium/v8/src/wasm/wasm-interpreter.h
index b0c100b5a94..88d21c37d14 100644
--- a/chromium/v8/src/wasm/wasm-interpreter.h
+++ b/chromium/v8/src/wasm/wasm-interpreter.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_INTERPRETER_H_
-#define V8_WASM_INTERPRETER_H_
+#ifndef V8_WASM_WASM_INTERPRETER_H_
+#define V8_WASM_WASM_INTERPRETER_H_
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
@@ -215,6 +215,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Manually adds code to the interpreter for the given function.
void SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
+ void SetCallIndirectTestMode();
// Computes the control transfers for the given bytecode. Used internally in
// the interpreter, but exposed for testing.
@@ -230,4 +231,4 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_INTERPRETER_H_
+#endif // V8_WASM_WASM_INTERPRETER_H_
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index ce2bf42455b..915d4d9ead6 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -16,8 +16,6 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -40,6 +38,35 @@ namespace {
} \
} while (false)
+// Like an ErrorThrower, but turns all pending exceptions into scheduled
+// exceptions when going out of scope. Use this in API methods.
+// Note that pending exceptions are not necessarily created by the ErrorThrower,
+// but e.g. by the wasm start function. There might also be a scheduled
+// exception, created by another API call (e.g. v8::Object::Get). But there
+// should never be both pending and scheduled exceptions.
+class ScheduledErrorThrower : public ErrorThrower {
+ public:
+ ScheduledErrorThrower(i::Isolate* isolate, const char* context)
+ : ErrorThrower(isolate, context) {}
+
+ ~ScheduledErrorThrower();
+};
+
+ScheduledErrorThrower::~ScheduledErrorThrower() {
+ // There should never be both a pending and a scheduled exception.
+ DCHECK(!isolate()->has_scheduled_exception() ||
+ !isolate()->has_pending_exception());
+ // Don't throw another error if there is already a scheduled error.
+ if (isolate()->has_scheduled_exception()) {
+ Reset();
+ } else if (isolate()->has_pending_exception()) {
+ Reset();
+ isolate()->OptionalRescheduleException(false);
+ } else if (error()) {
+ isolate()->ScheduleThrow(*Reify());
+ }
+}
+
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
@@ -123,8 +150,7 @@ void WebAssemblyCompileStreaming(
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.compileStreaming()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()");
thrower.CompileError("Wasm code generation disallowed by embedder");
auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
CHECK_IMPLIES(!maybe.FromMaybe(false),
@@ -144,7 +170,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
thrower.CompileError("Wasm code generation disallowed by embedder");
@@ -165,7 +191,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
// Asynchronous compilation handles copying wire bytes if necessary.
- i::wasm::AsyncCompile(i_isolate, promise, bytes, is_shared);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, promise, bytes, is_shared);
}
// WebAssembly.validate(bytes) -> bool
@@ -173,7 +199,7 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
bool is_shared = false;
auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
@@ -209,7 +235,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i_isolate->wasm_module_callback()(args)) return;
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Module must be invoked with 'new'");
@@ -233,10 +259,12 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes_copy);
+ module_obj =
+ i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ module_obj =
+ i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes);
}
if (module_obj.is_null()) return;
@@ -250,8 +278,7 @@ void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.imports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -264,8 +291,7 @@ void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -279,8 +305,8 @@ void WebAssemblyModuleCustomSections(
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.customSections()");
+ ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.Module.customSections()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -303,18 +329,23 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
i::MaybeHandle<i::Object> instance_object;
{
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly Instantiation");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
+
+ // TODO(ahaas): These checks on the module should not be necessary here They
+ // are just a workaround for https://crbug.com/837417.
+ i::Handle<i::Object> module_obj = Utils::OpenHandle(*module);
+ if (!module_obj->IsWasmModuleObject()) {
+ thrower.TypeError("Argument 0 must be a WebAssembly.Module object");
+ return {};
+ }
+
i::MaybeHandle<i::JSReceiver> maybe_imports =
GetValueAsImports(ffi, &thrower);
if (thrower.error()) return {};
- i::Handle<i::WasmModuleObject> module_obj =
- i::Handle<i::WasmModuleObject>::cast(
- Utils::OpenHandle(Object::Cast(*module)));
- instance_object =
- i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj, maybe_imports,
- i::MaybeHandle<i::JSArrayBuffer>());
+ instance_object = i_isolate->wasm_engine()->SyncInstantiate(
+ i_isolate, &thrower, i::Handle<i::WasmModuleObject>::cast(module_obj),
+ maybe_imports, i::MaybeHandle<i::JSArrayBuffer>());
}
DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
@@ -322,25 +353,7 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
return Utils::ToLocal(instance_object.ToHandleChecked());
}
-// Entered as internal implementation detail of sync and async instantiate.
-// args[0] *must* be a WebAssembly.Module.
-void WebAssemblyInstantiateImplCallback(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- DCHECK_GE(args.Length(), 1);
- v8::Isolate* isolate = args.GetIsolate();
- MicrotasksScope does_not_run_microtasks(isolate,
- MicrotasksScope::kDoNotRunMicrotasks);
-
- HandleScope scope(args.GetIsolate());
- Local<Value> module = args[0];
- Local<Value> ffi = args.Data();
- Local<Value> instance;
- if (WebAssemblyInstantiateImpl(isolate, module, ffi).ToLocal(&instance)) {
- args.GetReturnValue().Set(instance);
- }
-}
-
-void WebAssemblyInstantiateToPairCallback(
+void WebAssemblyInstantiateCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
DCHECK_GE(args.Length(), 1);
Isolate* isolate = args.GetIsolate();
@@ -386,7 +399,7 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
return;
@@ -429,7 +442,7 @@ void WebAssemblyInstantiateStreaming(
DCHECK(!module_promise.IsEmpty());
Local<Value> data = args[1];
ASSIGN(Function, instantiate_impl,
- Function::New(context, WebAssemblyInstantiateToPairCallback, data));
+ Function::New(context, WebAssemblyInstantiateCallback, data));
ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl));
args.GetReturnValue().Set(result);
}
@@ -444,18 +457,19 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.instantiate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
Local<Context> context = isolate->GetCurrentContext();
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
- Local<Promise> module_promise = resolver->GetPromise();
- args.GetReturnValue().Set(module_promise);
+ Local<Promise> promise = resolver->GetPromise();
+ args.GetReturnValue().Set(promise);
Local<Value> first_arg_value = args[0];
+ // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
+ Local<Value> ffi = args[1];
i::Handle<i::Object> first_arg = Utils::OpenHandle(*first_arg_value);
if (!first_arg->IsJSObject()) {
thrower.TypeError(
@@ -466,26 +480,35 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- FunctionCallback instantiator = nullptr;
if (first_arg->IsWasmModuleObject()) {
- module_promise = resolver->GetPromise();
- if (!resolver->Resolve(context, first_arg_value).IsJust()) return;
- instantiator = WebAssemblyInstantiateImplCallback;
- } else {
- ASSIGN(Function, async_compile, Function::New(context, WebAssemblyCompile));
- ASSIGN(Value, async_compile_retval,
- async_compile->Call(context, args.Holder(), 1, &first_arg_value));
- module_promise = Local<Promise>::Cast(async_compile_retval);
- instantiator = WebAssemblyInstantiateToPairCallback;
+ i::Handle<i::WasmModuleObject> module_obj =
+ i::Handle<i::WasmModuleObject>::cast(first_arg);
+ // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(ffi, &thrower);
+
+ if (thrower.error()) {
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
+ return;
+ }
+
+ i_isolate->wasm_engine()->AsyncInstantiate(
+ i_isolate, Utils::OpenHandle(*promise), module_obj, maybe_imports);
+ return;
}
- DCHECK(!module_promise.IsEmpty());
- DCHECK_NOT_NULL(instantiator);
- // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
- // We'll check for that in WebAssemblyInstantiateImpl.
- Local<Value> data = args[1];
+
+ // We did not get a WasmModuleObject as input, we first have to compile the
+ // input.
+ ASSIGN(Function, async_compile, Function::New(context, WebAssemblyCompile));
+ ASSIGN(Value, async_compile_retval,
+ async_compile->Call(context, args.Holder(), 1, &first_arg_value));
+ promise = Local<Promise>::Cast(async_compile_retval);
+ DCHECK(!promise.IsEmpty());
ASSIGN(Function, instantiate_impl,
- Function::New(context, instantiator, data));
- ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl));
+ Function::New(context, WebAssemblyInstantiateCallback, ffi));
+ ASSIGN(Promise, result, promise->Then(context, instantiate_impl));
args.GetReturnValue().Set(result);
}
@@ -521,7 +544,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Table must be invoked with 'new'");
return;
@@ -578,7 +601,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Memory must be invoked with 'new'");
return;
@@ -672,8 +695,7 @@ void WebAssemblyInstanceGetExports(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Instance.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance.exports()");
EXTRACT_THIS(receiver, WasmInstanceObject);
i::Handle<i::JSObject> exports_object(receiver->exports_object());
args.GetReturnValue().Set(Utils::ToLocal(exports_object));
@@ -684,8 +706,7 @@ void WebAssemblyTableGetLength(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Table.length()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
EXTRACT_THIS(receiver, WasmTableObject);
args.GetReturnValue().Set(
v8::Number::New(isolate, receiver->current_length()));
@@ -696,7 +717,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
@@ -738,7 +759,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
@@ -759,7 +780,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
@@ -807,8 +828,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Memory.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmMemoryObject);
@@ -826,7 +846,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
uint32_t old_size =
- old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
+ old_buffer->byte_length()->Number() / i::wasm::kWasmPageSize;
int64_t new_size64 = old_size + delta_size;
if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
@@ -849,8 +869,7 @@ void WebAssemblyMemoryGetBuffer(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Memory.buffer");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
EXTRACT_THIS(receiver, WasmMemoryObject);
i::Handle<i::Object> buffer_obj(receiver->array_buffer(), i_isolate);
@@ -931,7 +950,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
name, isolate->strict_function_map(), LanguageMode::kStrict);
Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
diff --git a/chromium/v8/src/wasm/wasm-js.h b/chromium/v8/src/wasm/wasm-js.h
index 926bd7647ac..bdcc1f061ec 100644
--- a/chromium/v8/src/wasm/wasm-js.h
+++ b/chromium/v8/src/wasm/wasm-js.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_JS_H_
-#define V8_WASM_JS_H_
+#ifndef V8_WASM_WASM_JS_H_
+#define V8_WASM_WASM_JS_H_
-#include "src/allocation.h"
-#include "src/base/hashmap.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -16,14 +15,9 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
-
- // WebAssembly.Table.
- static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
-
- // WebAssembly.Memory
- static bool IsWasmMemoryObject(Isolate* isolate, Handle<Object> value);
};
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_JS_H_
diff --git a/chromium/v8/src/wasm/wasm-limits.h b/chromium/v8/src/wasm/wasm-limits.h
index 184b6329baf..c1011c3f89a 100644
--- a/chromium/v8/src/wasm/wasm-limits.h
+++ b/chromium/v8/src/wasm/wasm-limits.h
@@ -48,6 +48,8 @@ static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize;
+static_assert(kV8MaxWasmMemoryBytes <= std::numeric_limits<int32_t>::max(),
+ "max memory bytes should fit in int32_t");
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
diff --git a/chromium/v8/src/wasm/wasm-memory.cc b/chromium/v8/src/wasm/wasm-memory.cc
index fcbe60ae0e8..38cd8973a6b 100644
--- a/chromium/v8/src/wasm/wasm-memory.cc
+++ b/chromium/v8/src/wasm/wasm-memory.cc
@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+#else
+ static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
return true;
}
allocated_address_space_ -= num_bytes;
-#endif
return false;
}
@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
- // TODO(eholk): Right now require_guard_regions has no effect on 32-bit
- // systems. It may be safer to fail instead, given that other code might do
- // things that would be unsafe if they expected guard pages where there
- // weren't any.
- if (require_guard_regions) {
- // TODO(eholk): On Windows we want to make sure we don't commit the guard
- // pages yet.
-
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- *allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
- DCHECK_EQ(0, size % CommitPageSize());
-
- WasmAllocationTracker* const allocation_tracker =
- isolate->wasm_engine()->allocation_tracker();
-
- // Let the WasmAllocationTracker know we are going to reserve a bunch of
- // address space.
- if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
- // If we are over the address space limit, fail.
- return nullptr;
- }
-
- // The Reserve makes the whole region inaccessible by default.
- *allocation_base =
- isolate->array_buffer_allocator()->Reserve(*allocation_length);
- if (*allocation_base == nullptr) {
- allocation_tracker->ReleaseAddressSpace(*allocation_length);
- return nullptr;
- }
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ *allocation_length = require_guard_regions
+ ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
+ : base::bits::RoundUpToPowerOfTwo32(RoundUp(
+ static_cast<uint32_t>(size), kWasmPageSize));
+ DCHECK_GE(*allocation_length, size);
+
+ WasmAllocationTracker* const allocation_tracker =
+ isolate->wasm_engine()->allocation_tracker();
+
+ // Let the WasmAllocationTracker know we are going to reserve a bunch of
+ // address space.
+ if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
+ // If we are over the address space limit, fail.
+ return nullptr;
+ }
- void* memory = *allocation_base;
-
- // Make the part we care about accessible.
- isolate->array_buffer_allocator()->SetProtection(
- memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- return memory;
- } else {
- // TODO(titzer): use guard regions for minicage and merge with above code.
- CHECK_LE(size, kV8MaxWasmMemoryBytes);
- *allocation_length =
- base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
- void* memory =
- size == 0
- ? nullptr
- : isolate->array_buffer_allocator()->Allocate(*allocation_length);
- *allocation_base = memory;
- return memory;
+ // The Reserve makes the whole region inaccessible by default.
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base == nullptr) {
+ allocation_tracker->ReleaseAddressSpace(*allocation_length);
+ return nullptr;
}
+
+ void* memory = *allocation_base;
+
+ // Make the part we care about accessible.
+ CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
+ PageAllocator::kReadWrite));
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ return memory;
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif
constexpr bool is_external = false;
+ // All buffers have guard regions now, but sometimes they are small.
+ constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, require_guard_regions, shared);
+ size, is_external, has_guard_region, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
diff --git a/chromium/v8/src/wasm/wasm-memory.h b/chromium/v8/src/wasm/wasm-memory.h
index c5d6ef51547..438014b4175 100644
--- a/chromium/v8/src/wasm/wasm-memory.h
+++ b/chromium/v8/src/wasm/wasm-memory.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_MEMORY_H_
-#define V8_WASM_MEMORY_H_
+#ifndef V8_WASM_WASM_MEMORY_H_
+#define V8_WASM_WASM_MEMORY_H_
#include "src/flags.h"
#include "src/handles.h"
@@ -49,4 +49,4 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
} // namespace internal
} // namespace v8
-#endif // V8_WASM_MODULE_H_
+#endif // V8_WASM_WASM_MEMORY_H_
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index b6b9117ae55..909b62a16f3 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -18,7 +18,6 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
#include "src/wasm/compilation-manager.h"
-#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
@@ -157,7 +156,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig) {
+ WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig) {
Address new_context_address =
reinterpret_cast<Address>(owning_instance->wasm_context()->get());
if (!wasm_code.IsCodeObject()) {
@@ -173,6 +172,8 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
wasm::WasmCode* exported_wrapper =
native_module->GetExportedWrapper(wasm_code.GetWasmCode()->index());
if (exported_wrapper == nullptr) {
+ wasm::NativeModuleModificationScope native_modification_scope(
+ native_module);
Handle<Code> new_wrapper = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
exported_wrapper = native_module->AddExportedWrapper(
@@ -181,10 +182,11 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
Address target = exported_wrapper->instructions().start();
return isolate->factory()->NewForeign(target, TENURED);
}
+ CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
Handle<Code> code = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
AttachWasmFunctionInfo(isolate, code, owning_instance,
- static_cast<int>(index));
+ static_cast<int>(func_index));
return code;
}
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index 492c51487fd..405b5f3ff4b 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_MODULE_H_
-#define V8_WASM_MODULE_H_
+#ifndef V8_WASM_WASM_MODULE_H_
+#define V8_WASM_WASM_MODULE_H_
#include <memory>
@@ -275,7 +275,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
+ WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig);
void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
Handle<FixedArray> code_table);
@@ -323,4 +323,4 @@ class TruncatedUserString {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_MODULE_H_
+#endif // V8_WASM_WASM_MODULE_H_
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index 0a858621741..4891ad671a1 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OBJECTS_INL_H_
-#define V8_WASM_OBJECTS_INL_H_
+#ifndef V8_WASM_WASM_OBJECTS_INL_H_
+#define V8_WASM_WASM_OBJECTS_INL_H_
#include "src/heap/heap-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -158,4 +158,4 @@ void WasmCompiledModule::ReplaceCodeTableForTesting(
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OBJECTS_INL_H_
+#endif // V8_WASM_WASM_OBJECTS_INL_H_
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index c92a51716ab..f06f3240f08 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -248,11 +248,44 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
}
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ if (count == 0) return; // Degenerate case: nothing to do.
+
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
+ constexpr int kInvalidSigIndex = -1;
+
+ if (WASM_CONTEXT_TABLES) {
+ // If tables are stored in the WASM context, no code patching is
+ // necessary. We simply have to grow the raw tables in the WasmContext
+ // for each instance that has imported this table.
+
+ // TODO(titzer): replace the dispatch table with a weak list of all
+ // the instances that import a given table.
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // TODO(titzer): potentially racy update of WasmContext::table
+ WasmContext* wasm_context =
+ WasmInstanceObject::cast(dispatch_tables->get(i))
+ ->wasm_context()
+ ->get();
+ DCHECK_EQ(old_size, wasm_context->table_size);
+ uint32_t new_size = old_size + count;
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ realloc(wasm_context->table,
+ new_size * sizeof(IndirectFunctionTableEntry)));
+ for (uint32_t j = old_size; j < new_size; j++) {
+ wasm_context->table[j].sig_id = kInvalidSigIndex;
+ wasm_context->table[j].context = nullptr;
+ wasm_context->table[j].target = nullptr;
+ }
+ wasm_context->table_size = new_size;
+ }
+ return;
+ }
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
for (int i = 0; i < dispatch_tables->length();
@@ -272,24 +305,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
*new_function_table);
// Patch the code of the respective instance.
- if (FLAG_wasm_jit_to_native) {
- DisallowHeapAllocation no_gc;
- wasm::CodeSpecialization code_specialization(isolate,
- &specialization_zone);
- WasmInstanceObject* instance =
- WasmInstanceObject::cast(dispatch_tables->get(i));
- WasmCompiledModule* compiled_module = instance->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- wasm::NativeModuleModificationScope native_module_modification_scope(
- native_module);
- GlobalHandleAddress old_function_table_addr =
- native_module->function_tables()[table_index];
- code_specialization.PatchTableSize(old_size, old_size + count);
- code_specialization.RelocatePointer(old_function_table_addr,
- new_function_table_addr);
- code_specialization.ApplyToWholeInstance(instance);
- native_module->function_tables()[table_index] = new_function_table_addr;
- } else {
+ if (!WASM_CONTEXT_TABLES) {
DisallowHeapAllocation no_gc;
wasm::CodeSpecialization code_specialization(isolate,
&specialization_zone);
@@ -311,70 +327,104 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
}
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t index, Handle<JSFunction> function) {
+ int32_t table_index, Handle<JSFunction> function) {
Handle<FixedArray> array(table->functions(), isolate);
+ if (function.is_null()) {
+ ClearDispatchTables(table, table_index); // Degenerate case of null value.
+ array->set(table_index, isolate->heap()->null_value());
+ return;
+ }
+
+ // TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
+ auto exported_function = Handle<WasmExportedFunction>::cast(function);
+ auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ DCHECK_NOT_NULL(wasm_function);
+ DCHECK_NOT_NULL(wasm_function->sig);
+ WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
+ handle(exported_function->instance()), wasm_code,
+ exported_function->function_index());
+ array->set(table_index, *function);
+}
+
+void WasmTableObject::UpdateDispatchTables(
+ Isolate* isolate, Handle<WasmTableObject> table, int table_index,
+ wasm::FunctionSig* sig, Handle<WasmInstanceObject> from_instance,
+ WasmCodeWrapper wasm_code, int func_index) {
+ if (WASM_CONTEXT_TABLES) {
+ // We simply need to update the WASM contexts for each instance
+ // that imports this table.
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
-
- wasm::FunctionSig* sig = nullptr;
- Handle<Object> code = Handle<Object>::null();
- Handle<Object> value = isolate->factory()->null_value();
-
- if (!function.is_null()) {
- auto exported_function = Handle<WasmExportedFunction>::cast(function);
- auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
- // The verification that {function} is an export was done
- // by the caller.
- DCHECK(wasm_function != nullptr && wasm_function->sig != nullptr);
- sig = wasm_function->sig;
- value = function;
- // TODO(titzer): Make JSToWasm wrappers just call the WASM to WASM wrapper,
- // and then we can just reuse the WASM to WASM wrapper.
- WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
- wasm::NativeModule* native_module =
- wasm_code.IsCodeObject() ? nullptr : wasm_code.GetWasmCode()->owner();
- CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
- wasm::NativeModuleModificationScope native_modification_scope(
- native_module);
- code = wasm::GetOrCreateIndirectCallWrapper(
- isolate, handle(exported_function->instance()), wasm_code,
- exported_function->function_index(), sig);
- }
- UpdateDispatchTables(table, index, sig, code);
- array->set(index, *value);
-}
-
-void WasmTableObject::UpdateDispatchTables(Handle<WasmTableObject> table,
- int index, wasm::FunctionSig* sig,
- Handle<Object> code_or_foreign) {
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+ auto& entry = to_instance->wasm_context()->get()->table[table_index];
+ entry.sig_id = sig_id;
+ entry.context = from_instance->wasm_context()->get();
+ entry.target = wasm_code.instructions().start();
+ }
+ } else {
+ // We may need to compile a new WASM->WASM wrapper for this.
+ Handle<Object> code_or_foreign = wasm::GetOrCreateIndirectCallWrapper(
+ isolate, from_instance, wasm_code, func_index, sig);
+
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+
+ function_table->set(compiler::FunctionTableSigOffset(table_index),
+ Smi::FromInt(sig_id));
+ function_table->set(compiler::FunctionTableCodeOffset(table_index),
+ *code_or_foreign);
+ }
+ }
+}
+
+void WasmTableObject::ClearDispatchTables(Handle<WasmTableObject> table,
+ int index) {
DisallowHeapAllocation no_gc;
FixedArray* dispatch_tables = table->dispatch_tables();
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
- FixedArray* function_table = FixedArray::cast(
- dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
- Smi* sig_smi = Smi::FromInt(-1);
- Object* code = Smi::kZero;
- if (sig) {
- DCHECK(code_or_foreign->IsCode() || code_or_foreign->IsForeign());
- WasmInstanceObject* instance = WasmInstanceObject::cast(
+ if (WASM_CONTEXT_TABLES) {
+ constexpr int kInvalidSigIndex = -1; // TODO(titzer): move to header.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset));
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- auto sig_index = instance->module()->signature_map.Find(sig);
- sig_smi = Smi::FromInt(sig_index);
- code = *code_or_foreign;
+ DCHECK_LT(index, to_instance->wasm_context()->get()->table_size);
+ auto& entry = to_instance->wasm_context()->get()->table[index];
+ entry.sig_id = kInvalidSigIndex;
+ entry.context = nullptr;
+ entry.target = nullptr;
} else {
- DCHECK(code_or_foreign.is_null());
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+ function_table->set(compiler::FunctionTableSigOffset(index),
+ Smi::FromInt(-1));
+ function_table->set(compiler::FunctionTableCodeOffset(index), Smi::kZero);
}
- function_table->set(compiler::FunctionTableSigOffset(index), sig_smi);
- function_table->set(compiler::FunctionTableCodeOffset(index), code);
}
}
namespace {
-
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
uint32_t pages, uint32_t maximum_pages,
@@ -393,20 +443,22 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions =
- old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null();
}
- if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
+ // Reusing the backing store from externalized buffers causes problems with
+ // Blink's array buffers. The connection between the two is lost, which can
+ // lead to Blink not knowing about the other reference to the buffer and
+ // freeing it too early.
+ if (!old_buffer->is_external() && old_size != 0 &&
+ ((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
- isolate->array_buffer_allocator()->SetProtection(
- old_mem_start, new_size,
- v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ CHECK(i::SetPermissions(old_mem_start, new_size,
+ PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}
@@ -426,23 +478,13 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
} else {
bool free_memory = false;
Handle<JSArrayBuffer> new_buffer;
- if (pages != 0) {
- // Allocate a new buffer and memcpy the old contents.
- free_memory = true;
- new_buffer =
- wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null() || old_size == 0) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
- DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
- DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
- } else {
- // Reuse the prior backing store, but allocate a new array buffer.
- new_buffer = wasm::SetupArrayBuffer(
- isolate, old_buffer->allocation_base(),
- old_buffer->allocation_length(), old_buffer->backing_store(),
- new_size, old_buffer->is_external(), old_buffer->has_guard_region());
- }
+ // Allocate a new buffer and memcpy the old contents.
+ free_memory = true;
+ new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
@@ -667,6 +709,91 @@ void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
CHECK(compiled_module->weak_wasm_module()->cleared());
}
+namespace {
+void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ // If a link to shared memory instances exists, update the list of memory
+ // instances before the instance is destroyed.
+ WasmCompiledModule* compiled_module = owner->compiled_module();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ if (FLAG_wasm_jit_to_native) {
+ if (native_module) {
+ TRACE("Finalizing %zu {\n", native_module->instance_id);
+ } else {
+ TRACE("Finalized already cleaned up compiled module\n");
+ }
+ } else {
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
+
+ if (compiled_module->use_trap_handler()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ DisallowHeapAllocation no_gc;
+ FixedArray* code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Code* code = Code::cast(code_table->get(i));
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(
+ Smi::FromInt(trap_handler::kInvalidIndex));
+ }
+ }
+ }
+ }
+ WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
+
+ // Since the order of finalizers is not guaranteed, it can be the case
+ // that {instance->compiled_module()->module()}, which is a
+ // {Managed<WasmModule>} has been collected earlier in this GC cycle.
+ // Weak references to this instance won't be cleared until
+ // the next GC cycle, so we need to manually break some links (such as
+ // the weak references from {WasmMemoryObject::instances}.
+ if (owner->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
+ Handle<WasmInstanceObject> instance(owner, isolate);
+ WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ }
+
+ // weak_wasm_module may have been cleared, meaning the module object
+ // was GC-ed. We still want to maintain the links between instances, to
+ // release the WasmCompiledModule corresponding to the WasmModuleInstance
+ // being finalized here.
+ WasmModuleObject* wasm_module = nullptr;
+ if (!weak_wasm_module->cleared()) {
+ wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
+ WasmCompiledModule* current_template = wasm_module->compiled_module();
+
+ DCHECK(!current_template->has_prev_instance());
+ if (current_template == compiled_module) {
+ if (!compiled_module->has_next_instance()) {
+ WasmCompiledModule::Reset(isolate, compiled_module);
+ } else {
+ WasmModuleObject::cast(wasm_module)
+ ->set_compiled_module(compiled_module->next_instance());
+ }
+ }
+ }
+
+ compiled_module->RemoveFromChain();
+
+ compiled_module->reset_weak_owning_instance();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ TRACE("}\n");
+}
+
+} // namespace
+
+void WasmInstanceObject::InstallFinalizer(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
+ Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ InstanceFinalizer, v8::WeakCallbackType::kFinalizer);
+}
+
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
if (!object->IsJSFunction()) return false;
Handle<JSFunction> js_function(JSFunction::cast(object));
@@ -721,8 +848,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
shared->set_length(arity);
shared->set_internal_formal_parameter_count(arity);
NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, export_wrapper, isolate->sloppy_function_map());
+ name, export_wrapper, isolate->sloppy_function_without_prototype_map());
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
+ // According to the spec, exported functions should not have a [[Construct]]
+ // method.
+ DCHECK(!js_function->IsConstructor());
js_function->set_shared(*shared);
Handle<Symbol> instance_symbol(isolate->factory()->wasm_instance_symbol());
@@ -917,7 +1047,7 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
int position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = shared->GetIsolate();
Handle<FixedArray> breakpoint_infos;
if (shared->has_breakpoint_infos()) {
@@ -937,7 +1067,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
position) {
Handle<BreakPointInfo> old_info(
BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
- BreakPointInfo::SetBreakPoint(old_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(old_info, break_point);
return;
}
@@ -964,7 +1094,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
// Generate new BreakpointInfo.
Handle<BreakPointInfo> breakpoint_info =
isolate->factory()->NewBreakPointInfo(position);
- BreakPointInfo::SetBreakPoint(breakpoint_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(breakpoint_info, break_point);
// Now insert new position at insert_pos.
new_breakpoint_infos->set(insert_pos, *breakpoint_info);
@@ -1005,6 +1135,7 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
Handle<WasmSharedModuleData> shared) {
if (shared->has_lazy_compilation_orchestrator()) return;
Isolate* isolate = shared->GetIsolate();
+ // TODO(titzer): remove dependency on module-compiler.h
auto orch_handle =
Managed<wasm::LazyCompilationOrchestrator>::Allocate(isolate);
shared->set_lazy_compilation_orchestrator(*orch_handle);
@@ -1235,9 +1366,8 @@ MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
if (breakpoint_info->source_position() != position) return {};
- Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
- isolate);
- return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+ Handle<Object> break_points(breakpoint_info->break_points(), isolate);
+ return isolate->debug()->GetHitBreakPoints(break_points);
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
@@ -1303,13 +1433,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
// has_code_table and pass undefined.
compiled_module->set_code_table(*code_table);
- native_module->function_tables() = function_tables;
- native_module->empty_function_tables() = function_tables;
-
int function_count = static_cast<int>(module->functions.size());
- Handle<FixedArray> handler_table =
- isolate->factory()->NewFixedArray(function_count, TENURED);
- compiled_module->set_handler_table(*handler_table);
Handle<FixedArray> source_positions =
isolate->factory()->NewFixedArray(function_count, TENURED);
compiled_module->set_source_positions(*source_positions);
@@ -1338,6 +1462,10 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
return ret;
}
+ Handle<FixedArray> export_copy = isolate->factory()->CopyFixedArray(
+ handle(module->export_wrappers(), isolate));
+ ret->set_export_wrappers(*export_copy);
+
std::unique_ptr<wasm::NativeModule> native_module =
module->GetNativeModule()->Clone();
// construct the wrapper in 2 steps, because its construction may trigger GC,
@@ -1387,65 +1515,6 @@ wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
return Managed<wasm::NativeModule>::cast(native_module())->get();
}
-void WasmCompiledModule::ResetGCModel(Isolate* isolate,
- WasmCompiledModule* compiled_module) {
- DisallowHeapAllocation no_gc;
- TRACE("Resetting %d\n", compiled_module->instance_id());
- Object* undefined = *isolate->factory()->undefined_value();
- Object* fct_obj = compiled_module->code_table();
- if (fct_obj != nullptr && fct_obj != undefined) {
- // Patch code to update memory references, global references, and function
- // table references.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- // Reset function tables.
- if (compiled_module->has_function_tables()) {
- FixedArray* function_tables = compiled_module->function_tables();
- FixedArray* empty_function_tables =
- compiled_module->empty_function_tables();
- if (function_tables != empty_function_tables) {
- DCHECK_EQ(function_tables->length(), empty_function_tables->length());
- for (int i = 0, e = function_tables->length(); i < e; ++i) {
- GlobalHandleAddress func_addr =
- WasmCompiledModule::GetTableValue(function_tables, i);
- code_specialization.RelocatePointer(
- func_addr,
- WasmCompiledModule::GetTableValue(empty_function_tables, i));
- }
- compiled_module->set_function_tables(empty_function_tables);
- }
- }
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- FixedArray* functions = FixedArray::cast(fct_obj);
- for (int i = compiled_module->num_imported_functions(),
- end = functions->length();
- i < end; ++i) {
- Code* code = Code::cast(functions->get(i));
- // Skip lazy compile stubs.
- if (code->builtin_index() == Builtins::kWasmCompileLazy) continue;
- if (code->kind() != Code::WASM_FUNCTION) {
- // From here on, there should only be wrappers for exported functions.
- for (; i < end; ++i) {
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
- Code::cast(functions->get(i))->kind());
- }
- break;
- }
- bool changed = code_specialization.ApplyToWasmCode(
- WasmCodeWrapper(handle(code)), SKIP_ICACHE_FLUSH);
- // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
- // above.
- if (changed) {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
- }
- }
-}
-
void WasmCompiledModule::InitId() {
#if DEBUG
static uint32_t instance_id_counter = 0;
@@ -1491,22 +1560,6 @@ void WasmCompiledModule::Reset(Isolate* isolate,
i, isolate->heap()->undefined_value());
}
}
- // Reset function tables.
- if (native_module->function_tables().size() > 0) {
- std::vector<GlobalHandleAddress>& function_tables =
- native_module->function_tables();
- std::vector<GlobalHandleAddress>& empty_function_tables =
- native_module->empty_function_tables();
-
- if (function_tables != empty_function_tables) {
- DCHECK_EQ(function_tables.size(), empty_function_tables.size());
- for (size_t i = 0, e = function_tables.size(); i < e; ++i) {
- code_specialization.RelocatePointer(function_tables[i],
- empty_function_tables[i]);
- }
- native_module->function_tables() = empty_function_tables;
- }
- }
for (uint32_t i = native_module->num_imported_functions(),
end = native_module->FunctionCount();
@@ -1519,7 +1572,7 @@ void WasmCompiledModule::Reset(Isolate* isolate,
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
// above.
if (changed) {
- Assembler::FlushICache(isolate, code->instructions().start(),
+ Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
@@ -1646,30 +1699,23 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
}
size_t function_table_count =
compiled_module->shared()->module()->function_tables.size();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (function_table_count > 0) {
// The tables are of the right size, but contain bogus global handle
// addresses. Produce new global handles for the empty tables, then reset,
// which will relocate the code. We end up with a WasmCompiledModule as-if
// it were just compiled.
- Handle<FixedArray> function_tables;
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
DCHECK(compiled_module->has_function_tables());
- function_tables =
- handle(compiled_module->empty_function_tables(), isolate);
- } else {
- DCHECK_GT(native_module->function_tables().size(), 0);
- }
- for (size_t i = 0; i < function_table_count; ++i) {
- Handle<Object> global_func_table_handle =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
- GlobalHandleAddress new_func_table = global_func_table_handle.address();
- if (!FLAG_wasm_jit_to_native) {
+ Handle<FixedArray> function_tables(
+ compiled_module->empty_function_tables(), isolate);
+ for (size_t i = 0; i < function_table_count; ++i) {
+ Handle<Object> global_func_table_handle =
+ isolate->global_handles()->Create(
+ isolate->heap()->undefined_value());
+ GlobalHandleAddress new_func_table = global_func_table_handle.address();
SetTableValue(isolate, function_tables, static_cast<int>(i),
new_func_table);
- } else {
- native_module->empty_function_tables()[i] = new_func_table;
}
}
}
@@ -1761,10 +1807,9 @@ bool WasmSharedModuleData::GetPositionInfo(uint32_t position,
return true;
}
-
bool WasmCompiledModule::SetBreakPoint(
Handle<WasmCompiledModule> compiled_module, int* position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = compiled_module->GetIsolate();
Handle<WasmSharedModuleData> shared(compiled_module->shared(), isolate);
@@ -1779,7 +1824,7 @@ bool WasmCompiledModule::SetBreakPoint(
DCHECK(IsBreakablePosition(*shared, func_index, offset_in_func));
// Insert new break point into break_positions of shared module data.
- WasmSharedModuleData::AddBreakpoint(shared, *position, break_point_object);
+ WasmSharedModuleData::AddBreakpoint(shared, *position, break_point);
// Iterate over all instances of this module and tell them to set this new
// breakpoint.
@@ -1793,6 +1838,28 @@ bool WasmCompiledModule::SetBreakPoint(
return true;
}
+void WasmCompiledModule::LogWasmCodes(Isolate* isolate) {
+ wasm::NativeModule* native_module = GetNativeModule();
+ if (native_module == nullptr) return;
+ const uint32_t number_of_codes = native_module->FunctionCount();
+ if (has_shared()) {
+ Handle<WasmSharedModuleData> shared_handle(shared(), isolate);
+ for (uint32_t i = 0; i < number_of_codes; i++) {
+ wasm::WasmCode* code = native_module->GetCode(i);
+ if (code == nullptr) continue;
+ int name_length;
+ Handle<String> name(
+ WasmSharedModuleData::GetFunctionName(isolate, shared_handle, i));
+ auto cname = name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
+ RobustnessFlag::ROBUST_STRING_TRAVERSAL,
+ &name_length);
+ wasm::WasmName wasm_name(cname.get(), name_length);
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code,
+ wasm_name));
+ }
+ }
+}
+
void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
MaybeHandle<WeakCell> weak_instance,
int func_index) {
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index cecc11f83f4..fe2ed419db3 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OBJECTS_H_
-#define V8_WASM_OBJECTS_H_
+#ifndef V8_WASM_WASM_OBJECTS_H_
+#define V8_WASM_WASM_OBJECTS_H_
#include "src/base/bits.h"
#include "src/debug/debug.h"
@@ -38,6 +38,8 @@ class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
+#define WASM_CONTEXT_TABLES FLAG_wasm_jit_to_native
+
#define DECL_OOL_QUERY(type) static bool Is##type(Object* object);
#define DECL_OOL_CAST(type) static type* cast(Object* object);
@@ -55,6 +57,15 @@ class WasmInstanceObject;
static const int k##name##Offset = \
kSize + (k##name##Index - kFieldCount) * kPointerSize;
+// An entry in an indirect dispatch table.
+struct IndirectFunctionTableEntry {
+ int32_t sig_id = 0;
+ WasmContext* context = nullptr;
+ Address target = nullptr;
+
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(IndirectFunctionTableEntry)
+};
+
// Wasm context used to store the mem_size and mem_start address of the linear
// memory. These variables can be accessed at C++ level at graph build time
// (e.g., initialized during instance building / changed at runtime by
@@ -67,14 +78,27 @@ struct WasmContext {
uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
byte* globals_start = nullptr;
+ // TODO(wasm): pad these entries to a power of two.
+ IndirectFunctionTableEntry* table = nullptr;
+ uint32_t table_size = 0;
- inline void SetRawMemory(void* mem_start, size_t mem_size) {
+ void SetRawMemory(void* mem_start, size_t mem_size) {
DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
DCHECK_LE(mem_size, this->mem_mask + 1);
}
+
+ ~WasmContext() {
+ if (table) free(table);
+ mem_start = nullptr;
+ mem_size = 0;
+ mem_mask = 0;
+ globals_start = nullptr;
+ table = nullptr;
+ table_size = 0;
+ }
};
// Representation of a WebAssembly.Module JavaScript-level object.
@@ -137,9 +161,13 @@ class WasmTableObject : public JSObject {
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
- static void UpdateDispatchTables(Handle<WasmTableObject> table, int index,
- wasm::FunctionSig* sig,
- Handle<Object> code_or_foreign);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int table_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> from_instance,
+ WasmCodeWrapper wasm_code, int func_index);
+
+ static void ClearDispatchTables(Handle<WasmTableObject> table, int index);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -249,6 +277,9 @@ class WasmInstanceObject : public JSObject {
static void ValidateOrphanedInstanceForTesting(
Isolate* isolate, Handle<WasmInstanceObject> instance);
+
+ static void InstallFinalizer(Isolate* isolate,
+ Handle<WasmInstanceObject> instance);
};
// A WASM function that is wrapped and exported to JavaScript.
@@ -306,7 +337,7 @@ class WasmSharedModuleData : public FixedArray {
Handle<WasmSharedModuleData>);
static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
Handle<WasmInstanceObject>);
@@ -468,7 +499,6 @@ class WasmCompiledModule : public FixedArray {
MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
MACRO(WEAK_LINK, WasmInstanceObject, owning_instance) \
MACRO(WEAK_LINK, WasmModuleObject, wasm_module) \
- MACRO(OBJECT, FixedArray, handler_table) \
MACRO(OBJECT, FixedArray, source_positions) \
MACRO(OBJECT, Foreign, native_module) \
MACRO(OBJECT, FixedArray, lazy_compile_data) \
@@ -478,9 +508,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
MACRO(CONST_OBJECT, FixedArray, code_table) \
MACRO(OBJECT, FixedArray, function_tables) \
- MACRO(OBJECT, FixedArray, signature_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_signature_tables)
+ MACRO(CONST_OBJECT, FixedArray, empty_function_tables)
// TODO(mtrofin): this is unnecessary when we stop needing
// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
@@ -516,9 +544,6 @@ class WasmCompiledModule : public FixedArray {
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
- // TODO(mtrofin): delete this when we don't need FLAG_wasm_jit_to_native
- static void ResetGCModel(Isolate* isolate, WasmCompiledModule* module);
-
wasm::NativeModule* GetNativeModule() const;
void InsertInChain(WasmModuleObject*);
void RemoveFromChain();
@@ -543,7 +568,7 @@ class WasmCompiledModule : public FixedArray {
// If it points outside a function, or behind the last breakable location,
// this function returns false and does not set any breakpoint.
static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
inline void ReplaceCodeTableForTesting(
std::vector<wasm::WasmCode*>&& testing_table);
@@ -556,6 +581,8 @@ class WasmCompiledModule : public FixedArray {
static Address GetTableValue(FixedArray* table, int index);
inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
+ void LogWasmCodes(Isolate* isolate);
+
private:
void InitId();
@@ -692,4 +719,4 @@ WasmFunctionInfo GetWasmFunctionInfo(Isolate*, Handle<Code>);
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OBJECTS_H_
+#endif // V8_WASM_WASM_OBJECTS_H_
diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc
index b503aa1a5e7..ac02b549a0c 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.cc
+++ b/chromium/v8/src/wasm/wasm-opcodes.cc
@@ -101,9 +101,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- // TODO(kschimpf): Add I64 versions of saturating conversions.
+ // TODO(kschimpf): Simplify after filling in other saturating operations.
CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
@@ -116,6 +118,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
+ CASE_INT_OP(SExtendI8, "sign_extend8")
+ CASE_INT_OP(SExtendI16, "sign_extend16")
+ CASE_I64_OP(SExtendI32, "sign_extend32")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
CASE_OP(Block, "block")
@@ -320,6 +325,19 @@ bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsSignExtensionOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprI32SExtendI8:
+ case kExprI32SExtendI16:
+ case kExprI64SExtendI8:
+ case kExprI64SExtendI16:
+ case kExprI64SExtendI32:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index 9f8232c9022..c6b87f0556e 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OPCODES_H_
-#define V8_WASM_OPCODES_H_
+#ifndef V8_WASM_WASM_OPCODES_H_
+#define V8_WASM_WASM_OPCODES_H_
#include "src/globals.h"
#include "src/machine-type.h"
@@ -225,21 +225,26 @@ using WasmName = Vector<const char>;
V(I32ReinterpretF32, 0xbc, i_f) \
V(I64ReinterpretF64, 0xbd, l_d) \
V(F32ReinterpretI32, 0xbe, f_i) \
- V(F64ReinterpretI64, 0xbf, d_l)
+ V(F64ReinterpretI64, 0xbf, d_l) \
+ V(I32SExtendI8, 0xc0, i_i) \
+ V(I32SExtendI16, 0xc1, i_i) \
+ V(I64SExtendI8, 0xc2, l_l) \
+ V(I64SExtendI16, 0xc3, l_l) \
+ V(I64SExtendI32, 0xc4, l_l)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
- V(F64Acos, 0xc2, d_d) \
- V(F64Asin, 0xc3, d_d) \
- V(F64Atan, 0xc4, d_d) \
- V(F64Cos, 0xc5, d_d) \
- V(F64Sin, 0xc6, d_d) \
- V(F64Tan, 0xc7, d_d) \
- V(F64Exp, 0xc8, d_d) \
- V(F64Log, 0xc9, d_d) \
- V(F64Atan2, 0xca, d_dd) \
- V(F64Pow, 0xcb, d_dd) \
- V(F64Mod, 0xcc, d_dd) \
+ V(F64Acos, 0xc5, d_d) \
+ V(F64Asin, 0xc6, d_d) \
+ V(F64Atan, 0xc7, d_d) \
+ V(F64Cos, 0xc8, d_d) \
+ V(F64Sin, 0xc9, d_d) \
+ V(F64Tan, 0xca, d_d) \
+ V(F64Exp, 0xcb, d_d) \
+ V(F64Log, 0xcc, d_d) \
+ V(F64Atan2, 0xcd, d_dd) \
+ V(F64Pow, 0xce, d_dd) \
+ V(F64Mod, 0xcf, d_dd) \
V(I32AsmjsDivS, 0xd0, i_ii) \
V(I32AsmjsDivU, 0xd1, i_ii) \
V(I32AsmjsRemS, 0xd2, i_ii) \
@@ -403,8 +408,11 @@ using WasmName = Vector<const char>;
V(I32SConvertSatF32, 0xfc00, i_f) \
V(I32UConvertSatF32, 0xfc01, i_f) \
V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d)
-// TODO(kschimpf): Add remaining i64 numeric opcodes.
+ V(I32UConvertSatF64, 0xfc03, i_d) \
+ V(I64SConvertSatF32, 0xfc04, l_f) \
+ V(I64UConvertSatF32, 0xfc05, l_f) \
+ V(I64SConvertSatF64, 0xfc06, l_d) \
+ V(I64UConvertSatF64, 0xfc07, l_d)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
@@ -647,6 +655,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
+ static bool IsSignExtensionOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
@@ -793,4 +802,4 @@ struct WasmInitExpr {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OPCODES_H_
+#endif // V8_WASM_WASM_OPCODES_H_
diff --git a/chromium/v8/src/wasm/wasm-result.h b/chromium/v8/src/wasm/wasm-result.h
index 7744b42923f..8250db90406 100644
--- a/chromium/v8/src/wasm/wasm-result.h
+++ b/chromium/v8/src/wasm/wasm-result.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_RESULT_H_
-#define V8_WASM_RESULT_H_
+#ifndef V8_WASM_WASM_RESULT_H_
+#define V8_WASM_WASM_RESULT_H_
#include <cstdarg>
#include <memory>
@@ -158,4 +158,4 @@ class V8_EXPORT_PRIVATE ErrorThrower {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_RESULT_H_
diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc
index 4466672f37d..240ffbca3d3 100644
--- a/chromium/v8/src/wasm/wasm-serialization.cc
+++ b/chromium/v8/src/wasm/wasm-serialization.cc
@@ -133,7 +133,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
static size_t GetCodeHeaderSize();
size_t MeasureCode(const WasmCode*) const;
size_t MeasureCopiedStubs() const;
- FixedArray* GetHandlerTable(const WasmCode*) const;
ByteArray* GetSourcePositions(const WasmCode*) const;
void BufferHeader();
@@ -187,7 +186,6 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
: isolate_(isolate), native_module_(module) {
DCHECK_NOT_NULL(isolate_);
DCHECK_NOT_NULL(native_module_);
- DCHECK_NULL(native_module_->lazy_builtin_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate_);
@@ -210,12 +208,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
size_t NativeModuleSerializer::MeasureHeader() const {
return sizeof(uint32_t) + // total wasm fct count
- sizeof(
- uint32_t) + // imported fcts - i.e. index of first wasm function
- sizeof(uint32_t) + // table count
- native_module_->specialization_data_.function_tables.size()
- // function table, containing pointers
- * sizeof(GlobalHandleAddress);
+ sizeof(uint32_t); // imported fcts - i.e. index of first wasm function
}
void NativeModuleSerializer::BufferHeader() {
@@ -225,37 +218,25 @@ void NativeModuleSerializer::BufferHeader() {
Writer writer(remaining_);
writer.Write(native_module_->FunctionCount());
writer.Write(native_module_->num_imported_functions());
- writer.Write(static_cast<uint32_t>(
- native_module_->specialization_data_.function_tables.size()));
- for (size_t i = 0,
- e = native_module_->specialization_data_.function_tables.size();
- i < e; ++i) {
- writer.Write(native_module_->specialization_data_.function_tables[i]);
- }
}
size_t NativeModuleSerializer::GetCodeHeaderSize() {
return sizeof(size_t) + // size of this section
sizeof(size_t) + // offset of constant pool
sizeof(size_t) + // offset of safepoint table
+ sizeof(size_t) + // offset of handler table
sizeof(uint32_t) + // stack slots
sizeof(size_t) + // code size
sizeof(size_t) + // reloc size
- sizeof(uint32_t) + // handler size
sizeof(uint32_t) + // source positions size
sizeof(size_t) + // protected instructions size
sizeof(bool); // is_liftoff
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- FixedArray* handler_table = GetHandlerTable(code);
ByteArray* source_positions = GetSourcePositions(code);
return GetCodeHeaderSize() + code->instructions().size() + // code
code->reloc_info().size() + // reloc info
- (handler_table == nullptr
- ? 0
- : static_cast<uint32_t>(
- handler_table->length())) + // handler table
(source_positions == nullptr
? 0
: static_cast<uint32_t>(
@@ -325,21 +306,6 @@ void NativeModuleSerializer::BufferCopiedStubs() {
}
}
-FixedArray* NativeModuleSerializer::GetHandlerTable(
- const WasmCode* code) const {
- if (code->kind() != WasmCode::kFunction) return nullptr;
- uint32_t index = code->index();
- // We write the address, the size, and then copy the code as-is, followed
- // by reloc info, followed by handler table and source positions.
- Object* handler_table_entry =
- native_module_->compiled_module()->handler_table()->get(
- static_cast<int>(index));
- if (handler_table_entry->IsFixedArray()) {
- return FixedArray::cast(handler_table_entry);
- }
- return nullptr;
-}
-
ByteArray* NativeModuleSerializer::GetSourcePositions(
const WasmCode* code) const {
if (code->kind() != WasmCode::kFunction) return nullptr;
@@ -364,15 +330,7 @@ void NativeModuleSerializer::BufferCurrentWasmCode() {
void NativeModuleSerializer::BufferCodeInAllocatedScratch(
const WasmCode* code) {
// We write the address, the size, and then copy the code as-is, followed
- // by reloc info, followed by handler table and source positions.
- FixedArray* handler_table_entry = GetHandlerTable(code);
- uint32_t handler_table_size = 0;
- Address handler_table = nullptr;
- if (handler_table_entry != nullptr) {
- handler_table_size = static_cast<uint32_t>(handler_table_entry->length());
- handler_table = reinterpret_cast<Address>(
- handler_table_entry->GetFirstElementAddress());
- }
+ // by reloc info, followed by source positions.
ByteArray* source_positions_entry = GetSourcePositions(code);
Address source_positions = nullptr;
uint32_t source_positions_size = 0;
@@ -386,10 +344,10 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
writer.Write(MeasureCode(code));
writer.Write(code->constant_pool_offset());
writer.Write(code->safepoint_table_offset());
+ writer.Write(code->handler_table_offset());
writer.Write(code->stack_slots());
writer.Write(code->instructions().size());
writer.Write(code->reloc_info().size());
- writer.Write(handler_table_size);
writer.Write(source_positions_size);
writer.Write(code->protected_instructions().size());
writer.Write(code->is_liftoff());
@@ -398,7 +356,6 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
// write the code and everything else
writer.WriteVector(code->instructions());
writer.WriteVector(code->reloc_info());
- writer.WriteVector({handler_table, handler_table_size});
writer.WriteVector({source_positions, source_positions_size});
writer.WriteVector(
{reinterpret_cast<const byte*>(code->protected_instructions().data()),
@@ -555,16 +512,6 @@ bool NativeModuleDeserializer::ReadHeader() {
bool ok = functions == native_module_->FunctionCount() &&
imports == native_module_->num_imported_functions();
if (!ok) return false;
- size_t table_count = reader.Read<uint32_t>();
-
- std::vector<GlobalHandleAddress> funcs(table_count);
- for (size_t i = 0; i < table_count; ++i) {
- funcs[i] = reader.Read<GlobalHandleAddress>();
- }
- native_module_->function_tables() = funcs;
- // resize, so that from here on the native module can be
- // asked about num_function_tables().
- native_module_->empty_function_tables().resize(table_count);
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
@@ -592,10 +539,10 @@ bool NativeModuleDeserializer::ReadCode() {
USE(code_section_size);
size_t constant_pool_offset = reader.Read<size_t>();
size_t safepoint_table_offset = reader.Read<size_t>();
+ size_t handler_table_offset = reader.Read<size_t>();
uint32_t stack_slot_count = reader.Read<uint32_t>();
size_t code_size = reader.Read<size_t>();
size_t reloc_size = reader.Read<size_t>();
- uint32_t handler_size = reader.Read<uint32_t>();
uint32_t source_position_size = reader.Read<uint32_t>();
size_t protected_instructions_size = reader.Read<size_t>();
bool is_liftoff = reader.Read<bool>();
@@ -612,9 +559,10 @@ bool NativeModuleDeserializer::ReadCode() {
WasmCode* ret = native_module_->AddOwnedCode(
code_buffer, std::move(reloc_info), reloc_size, Just(index_),
WasmCode::kFunction, constant_pool_offset, stack_slot_count,
- safepoint_table_offset, protected_instructions, is_liftoff);
+ safepoint_table_offset, handler_table_offset, protected_instructions,
+ is_liftoff);
if (ret == nullptr) return false;
- native_module_->SetCodeTable(index_, ret);
+ native_module_->code_table_[index_] = ret;
// now relocate the code
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@@ -634,7 +582,7 @@ bool NativeModuleDeserializer::ReadCode() {
case RelocInfo::CODE_TARGET: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address target = GetTrampolineOrStubFromTag(tag);
- iter.rinfo()->set_target_address(nullptr, target, SKIP_WRITE_BARRIER,
+ iter.rinfo()->set_target_address(target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
break;
}
@@ -643,23 +591,14 @@ bool NativeModuleDeserializer::ReadCode() {
reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
Address address =
ExternalReferenceTable::instance(isolate_)->address(orig_target);
- iter.rinfo()->set_target_runtime_entry(
- nullptr, address, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ iter.rinfo()->set_target_runtime_entry(address, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
break;
}
default:
break;
}
}
- if (handler_size > 0) {
- Handle<FixedArray> handler_table = isolate_->factory()->NewFixedArray(
- static_cast<int>(handler_size), TENURED);
- reader.ReadIntoVector(
- {reinterpret_cast<Address>(handler_table->GetFirstElementAddress()),
- handler_size});
- native_module_->compiled_module()->handler_table()->set(
- static_cast<int>(index_), *handler_table);
- }
if (source_position_size > 0) {
Handle<ByteArray> source_positions = isolate_->factory()->NewByteArray(
static_cast<int>(source_position_size), TENURED);
@@ -743,6 +682,10 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
compiled_module->GetNativeModule());
if (!deserializer.Read(data)) return {};
+ // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. This
+ // requires unlocking the code space here. This should be moved into the
+ // allocator eventually.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
CompileJsToWasmWrappers(isolate, compiled_module, isolate->counters());
WasmCompiledModule::ReinitializeAfterDeserialization(isolate,
compiled_module);
diff --git a/chromium/v8/src/wasm/wasm-serialization.h b/chromium/v8/src/wasm/wasm-serialization.h
index 9c0e9ce10ae..5bb49bfdce6 100644
--- a/chromium/v8/src/wasm/wasm-serialization.h
+++ b/chromium/v8/src/wasm/wasm-serialization.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_SERIALIZATION_H_
-#define V8_WASM_SERIALIZATION_H_
+#ifndef V8_WASM_WASM_SERIALIZATION_H_
+#define V8_WASM_WASM_SERIALIZATION_H_
#include "src/wasm/wasm-objects.h"
@@ -21,4 +21,4 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_SERIALIZATION_H_
diff --git a/chromium/v8/src/wasm/wasm-text.cc b/chromium/v8/src/wasm/wasm-text.cc
index 81c8e418134..1619241332d 100644
--- a/chromium/v8/src/wasm/wasm-text.cc
+++ b/chromium/v8/src/wasm/wasm-text.cc
@@ -134,7 +134,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprCallIndirect: {
CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
DCHECK_EQ(0, operand.table_index);
- os << "call_indirect " << operand.index;
+ os << "call_indirect " << operand.sig_index;
break;
}
case kExprCallFunction: {
@@ -208,6 +208,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
UNREACHABLE();
break;
}
+ break;
}
// This group is just printed by their internal opcode name, as they
diff --git a/chromium/v8/src/wasm/wasm-text.h b/chromium/v8/src/wasm/wasm-text.h
index 1608ea9a2dd..60957966abe 100644
--- a/chromium/v8/src/wasm/wasm-text.h
+++ b/chromium/v8/src/wasm/wasm-text.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_S_EXPR_H_
-#define V8_WASM_S_EXPR_H_
+#ifndef V8_WASM_WASM_TEXT_H_
+#define V8_WASM_WASM_TEXT_H_
#include <cstdint>
#include <ostream>
@@ -35,4 +35,4 @@ void PrintWasmText(
} // namespace internal
} // namespace v8
-#endif // V8_WASM_S_EXPR_H_
+#endif // V8_WASM_WASM_TEXT_H_
diff --git a/chromium/v8/src/wasm/wasm-value.h b/chromium/v8/src/wasm/wasm-value.h
index a30657aee0d..22fd13c219b 100644
--- a/chromium/v8/src/wasm/wasm-value.h
+++ b/chromium/v8/src/wasm/wasm-value.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_VALUE_H_
-#define V8_WASM_VALUE_H_
+#ifndef V8_WASM_WASM_VALUE_H_
+#define V8_WASM_WASM_VALUE_H_
#include "src/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
@@ -84,4 +84,4 @@ FOREACH_WASMVAL_TYPE(DECLARE_CAST)
} // namespace internal
} // namespace v8
-#endif // V8_WASM_VALUE_H_
+#endif // V8_WASM_WASM_VALUE_H_