summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/wasm-interpreter.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/wasm-interpreter.cc')
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc833
1 files changed, 531 insertions, 302 deletions
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 4269e18c8f..80d56a05f8 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -2,15 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <atomic>
#include <type_traits>
#include "src/wasm/wasm-interpreter.h"
#include "src/assembler-inl.h"
+#include "src/boxed-float.h"
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
#include "src/identity-map.h"
#include "src/objects-inl.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
@@ -122,7 +125,9 @@ namespace wasm {
V(I32AsmjsDivS, int32_t) \
V(I32AsmjsDivU, uint32_t) \
V(I32AsmjsRemS, int32_t) \
- V(I32AsmjsRemU, uint32_t)
+ V(I32AsmjsRemU, uint32_t) \
+ V(F32CopySign, Float32) \
+ V(F64CopySign, Float64)
#define FOREACH_OTHER_UNOP(V) \
V(I32Clz, uint32_t) \
@@ -133,14 +138,14 @@ namespace wasm {
V(I64Ctz, uint64_t) \
V(I64Popcnt, uint64_t) \
V(I64Eqz, uint64_t) \
- V(F32Abs, float) \
- V(F32Neg, float) \
+ V(F32Abs, Float32) \
+ V(F32Neg, Float32) \
V(F32Ceil, float) \
V(F32Floor, float) \
V(F32Trunc, float) \
V(F32NearestInt, float) \
- V(F64Abs, double) \
- V(F64Neg, double) \
+ V(F64Abs, Float64) \
+ V(F64Neg, Float64) \
V(F64Ceil, double) \
V(F64Floor, double) \
V(F64Trunc, double) \
@@ -177,26 +182,8 @@ namespace wasm {
namespace {
-// CachedInstanceInfo encapsulates globals and memory buffer runtime information
-// for a wasm instance. The interpreter caches that information when
-// constructed, copying it from the {WasmInstanceObject}. It expects it be
-// notified on changes to it, e.g. {GrowMemory}. We cache it because interpreter
-// perf is sensitive to accesses to this information.
-//
-// TODO(wasm): other runtime information, such as indirect function table, or
-// code table (incl. imports) is currently handled separately. Consider
-// unifying, if possible, with {ModuleEnv}.
-
-struct CachedInstanceInfo {
- CachedInstanceInfo(byte* globals, byte* mem, uint32_t size)
- : globals_start(globals), mem_start(mem), mem_size(size) {}
- // We do not expect the location of the globals buffer to
- // change for an instance.
- byte* const globals_start = nullptr;
- // The memory buffer may change because of GrowMemory
- byte* mem_start = nullptr;
- uint32_t mem_size = 0;
-};
+constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
+constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) {
@@ -324,8 +311,9 @@ inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
return JSMax(a, b);
}
-inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
- return copysignf(a, b);
+inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
+ return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
+ (b.get_bits() & kFloat32SignBitMask));
}
inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
@@ -336,8 +324,9 @@ inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
return JSMax(a, b);
}
-inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
- return copysign(a, b);
+inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
+ return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
+ (b.get_bits() & kFloat64SignBitMask));
}
inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
@@ -381,11 +370,11 @@ inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
}
int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros32(val);
+ return base::bits::CountLeadingZeros(val);
}
uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros32(val);
+ return base::bits::CountTrailingZeros(val);
}
uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
@@ -397,11 +386,11 @@ inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
}
int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros64(val);
+ return base::bits::CountLeadingZeros(val);
}
inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros64(val);
+ return base::bits::CountTrailingZeros(val);
}
inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
@@ -412,12 +401,12 @@ inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
return val == 0 ? 1 : 0;
}
-inline float ExecuteF32Abs(float a, TrapReason* trap) {
- return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
+inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
+ return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
}
-inline float ExecuteF32Neg(float a, TrapReason* trap) {
- return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
+inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
+ return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
}
inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
@@ -435,12 +424,12 @@ inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
return result;
}
-inline double ExecuteF64Abs(double a, TrapReason* trap) {
- return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
+inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
+ return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
}
-inline double ExecuteF64Neg(double a, TrapReason* trap) {
- return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
+inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
+ return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
}
inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
@@ -578,8 +567,8 @@ inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
return static_cast<float>(a);
}
-inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
- return bit_cast<float>(a);
+inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
+ return Float32::FromBits(a);
}
inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
@@ -606,31 +595,16 @@ inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
return static_cast<double>(a);
}
-inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
- return bit_cast<double>(a);
+inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
+ return Float64::FromBits(a);
}
inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
- return a.to_unchecked<int32_t>();
+ return a.to_f32_boxed().get_bits();
}
inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
- return a.to_unchecked<int64_t>();
-}
-
-inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
- MaybeHandle<WasmInstanceObject> instance_obj,
- CachedInstanceInfo* mem_info) {
- Handle<WasmInstanceObject> instance = instance_obj.ToHandleChecked();
- Isolate* isolate = instance->GetIsolate();
- int32_t ret = WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
-
- // Ensure the effects of GrowMemory have been observed by the interpreter.
- // See {UpdateMemory}. In all cases, we are in agreement with the runtime
- // object's view.
- DCHECK_EQ(mem_info->mem_size, instance->wasm_context()->mem_size);
- DCHECK_EQ(mem_info->mem_start, instance->wasm_context()->mem_start);
- return ret;
+ return a.to_f64_boxed().get_bits();
}
enum InternalOpcode {
@@ -653,17 +627,29 @@ const char* OpcodeName(uint32_t val) {
// Unwrap a wasm to js wrapper, return the callable heap object.
// If the wrapper would throw a TypeError, return a null handle.
Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
- Handle<Code> js_wrapper) {
- DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, js_wrapper->kind());
- Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
- DCHECK_EQ(2, deopt_data->length());
- intptr_t js_imports_table_loc = static_cast<intptr_t>(
- HeapNumber::cast(deopt_data->get(0))->value_as_bits());
- Handle<FixedArray> js_imports_table(
- reinterpret_cast<FixedArray**>(js_imports_table_loc));
+ WasmCodeWrapper wrapper) {
+ Handle<FixedArray> js_imports_table;
int index = 0;
- CHECK(deopt_data->get(1)->ToInt32(&index));
- DCHECK_GT(js_imports_table->length(), index);
+ if (wrapper.IsCodeObject()) {
+ Handle<Code> js_wrapper = wrapper.GetCode();
+ DCHECK(Code::WASM_TO_JS_FUNCTION == js_wrapper->kind());
+ Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
+ DCHECK_EQ(2, deopt_data->length());
+ intptr_t js_imports_table_loc = static_cast<intptr_t>(
+ HeapNumber::cast(deopt_data->get(0))->value_as_bits());
+ js_imports_table = Handle<FixedArray>(
+ reinterpret_cast<FixedArray**>(js_imports_table_loc));
+ CHECK(deopt_data->get(1)->ToInt32(&index));
+ DCHECK_GT(js_imports_table->length(), index);
+ } else {
+ const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
+ DCHECK_EQ(wasm::WasmCode::WasmToJsWrapper, wasm_code->kind());
+ js_imports_table = Handle<FixedArray>(wasm_code->owner()
+ ->compiled_module()
+ ->owning_instance()
+ ->js_imports_table());
+ index = 1 + 3 * static_cast<int>(wasm_code->index());
+ }
Handle<Object> obj(js_imports_table->get(index), isolate);
if (obj->IsCallable()) {
return Handle<HeapObject>::cast(obj);
@@ -806,6 +792,7 @@ class SideTable : public ZoneObject {
for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
bool unreachable = control_stack.back().unreachable;
if (unreachable) {
TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
@@ -826,25 +813,35 @@ class SideTable : public ZoneObject {
case kExprBlock:
case kExprLoop: {
bool is_loop = opcode == kExprLoop;
- BlockTypeOperand<false> operand(&i, i.pc());
- TRACE("control @%u: %s, arity %d\n", i.pc_offset(),
- is_loop ? "Loop" : "Block", operand.arity);
+ BlockTypeOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ if (operand.type == kWasmVar) {
+ operand.sig = module->signatures[operand.sig_index];
+ }
+ TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
+ is_loop ? "Loop" : "Block",
+ operand.in_arity(), operand.out_arity());
CLabel* label = CLabel::New(&control_transfer_zone, stack_height,
- is_loop ? 0 : operand.arity);
- control_stack.emplace_back(i.pc(), label, operand.arity);
+ is_loop ? operand.in_arity()
+ : operand.out_arity());
+ control_stack.emplace_back(i.pc(), label, operand.out_arity());
copy_unreachable();
if (is_loop) label->Bind(i.pc());
break;
}
case kExprIf: {
- TRACE("control @%u: If\n", i.pc_offset());
- BlockTypeOperand<false> operand(&i, i.pc());
+ BlockTypeOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ if (operand.type == kWasmVar) {
+ operand.sig = module->signatures[operand.sig_index];
+ }
+ TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
+ operand.in_arity(), operand.out_arity());
CLabel* end_label =
- CLabel::New(&control_transfer_zone, stack_height, operand.arity);
+ CLabel::New(&control_transfer_zone, stack_height,
+ operand.out_arity());
CLabel* else_label =
CLabel::New(&control_transfer_zone, stack_height, 0);
control_stack.emplace_back(i.pc(), end_label, else_label,
- operand.arity);
+ operand.out_arity());
copy_unreachable();
if (!unreachable) else_label->Ref(i.pc(), stack_height);
break;
@@ -880,22 +877,22 @@ class SideTable : public ZoneObject {
break;
}
case kExprBr: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
- BreakDepthOperand<false> operand(&i, i.pc());
+ BreakDepthOperand<Decoder::kNoValidate> operand(&i, i.pc());
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrTable: {
- BranchTableOperand<false> operand(&i, i.pc());
- BranchTableIterator<false> iterator(&i, operand);
+ BranchTableOperand<Decoder::kNoValidate> operand(&i, i.pc());
+ BranchTableIterator<Decoder::kNoValidate> iterator(&i, operand);
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
operand.table_count);
if (!unreachable) {
@@ -997,7 +994,15 @@ class CodeMap {
: MaybeHandle<WasmInstanceObject>();
}
- Code* GetImportedFunction(uint32_t function_index) {
+ const wasm::WasmCode* GetImportedFunction(uint32_t function_index) {
+ DCHECK(has_instance());
+ DCHECK_GT(module_->num_imported_functions, function_index);
+ const wasm::NativeModule* native_module =
+ instance()->compiled_module()->GetNativeModule();
+ return native_module->GetCode(function_index);
+ }
+
+ Code* GetImportedFunctionGC(uint32_t function_index) {
DCHECK(has_instance());
DCHECK_GT(module_->num_imported_functions, function_index);
FixedArray* code_table = instance()->compiled_module()->ptr_to_code_table();
@@ -1080,12 +1085,14 @@ Handle<Object> WasmValueToNumber(Factory* factory, WasmValue val,
// Convert JS value to WebAssembly, spec here:
// https://github.com/WebAssembly/design/blob/master/JS.md#towebassemblyvalue
+// Return WasmValue() (i.e. of type kWasmStmt) on failure. In that case, an
+// exception will be pending on the isolate.
WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
wasm::ValueType type) {
switch (type) {
case kWasmI32: {
MaybeHandle<Object> maybe_i32 = Object::ToInt32(isolate, value);
- // TODO(clemensh): Handle failure here (unwind).
+ if (maybe_i32.is_null()) return {};
int32_t value;
CHECK(maybe_i32.ToHandleChecked()->ToInt32(&value));
return WasmValue(value);
@@ -1095,13 +1102,13 @@ WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
UNREACHABLE();
case kWasmF32: {
MaybeHandle<Object> maybe_number = Object::ToNumber(value);
- // TODO(clemensh): Handle failure here (unwind).
+ if (maybe_number.is_null()) return {};
return WasmValue(
static_cast<float>(maybe_number.ToHandleChecked()->Number()));
}
case kWasmF64: {
MaybeHandle<Object> maybe_number = Object::ToNumber(value);
- // TODO(clemensh): Handle failure here (unwind).
+ if (maybe_number.is_null()) return {};
return WasmValue(maybe_number.ToHandleChecked()->Number());
}
default:
@@ -1111,6 +1118,42 @@ WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
}
}
+// Like a static_cast from src to dst, but specialized for boxed floats.
+template <typename dst, typename src>
+struct converter {
+ dst operator()(src val) const { return static_cast<dst>(val); }
+};
+template <>
+struct converter<Float64, uint64_t> {
+ Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
+};
+template <>
+struct converter<Float32, uint32_t> {
+ Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
+};
+template <>
+struct converter<uint64_t, Float64> {
+ uint64_t operator()(Float64 val) const { return val.get_bits(); }
+};
+template <>
+struct converter<uint32_t, Float32> {
+ uint32_t operator()(Float32 val) const { return val.get_bits(); }
+};
+
+template <typename T>
+V8_INLINE bool has_nondeterminism(T val) {
+ static_assert(!std::is_floating_point<T>::value, "missing specialization");
+ return false;
+}
+template <>
+V8_INLINE bool has_nondeterminism<float>(float val) {
+ return std::isnan(val);
+}
+template <>
+V8_INLINE bool has_nondeterminism<double>(double val) {
+ return std::isnan(val);
+}
+
// Responsible for executing code directly.
class ThreadImpl {
struct Activation {
@@ -1120,10 +1163,9 @@ class ThreadImpl {
};
public:
- ThreadImpl(Zone* zone, CodeMap* codemap,
- CachedInstanceInfo* cached_instance_info)
+ ThreadImpl(Zone* zone, CodeMap* codemap, WasmContext* wasm_context)
: codemap_(codemap),
- cached_instance_info_(cached_instance_info),
+ wasm_context_(wasm_context),
zone_(zone),
frames_(zone),
activations_(zone) {}
@@ -1247,7 +1289,7 @@ class ThreadImpl {
WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
Isolate* isolate) {
DCHECK(isolate->has_pending_exception());
- // TODO(wasm): Add wasm exception handling (would return true).
+ // TODO(wasm): Add wasm exception handling (would return HANDLED).
USE(isolate->pending_exception());
TRACE("----- UNWIND -----\n");
DCHECK_LT(0, activations_.size());
@@ -1283,7 +1325,7 @@ class ThreadImpl {
friend class InterpretedFrameImpl;
CodeMap* codemap_;
- CachedInstanceInfo* const cached_instance_info_;
+ WasmContext* wasm_context_;
Zone* zone_;
WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
@@ -1372,11 +1414,13 @@ class ThreadImpl {
pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
switch (code->orig_start[pc]) {
case kExprCallFunction: {
- CallFunctionOperand<false> operand(decoder, code->at(pc));
+ CallFunctionOperand<Decoder::kNoValidate> operand(decoder,
+ code->at(pc));
return pc + 1 + operand.length;
}
case kExprCallIndirect: {
- CallIndirectOperand<false> operand(decoder, code->at(pc));
+ CallIndirectOperand<Decoder::kNoValidate> operand(decoder,
+ code->at(pc));
return pc + 1 + operand.length;
}
default:
@@ -1432,28 +1476,35 @@ class ThreadImpl {
// ^ 0 ^ sp_
DCHECK_LE(dest, sp_);
DCHECK_LE(dest + arity, sp_);
- if (arity) memcpy(dest, sp_ - arity, arity * sizeof(*sp_));
+ if (arity) memmove(dest, sp_ - arity, arity * sizeof(*sp_));
sp_ = dest + arity;
}
template <typename mtype>
- inline bool BoundsCheck(uint32_t mem_size, uint32_t offset, uint32_t index) {
- return sizeof(mtype) <= mem_size && offset <= mem_size - sizeof(mtype) &&
- index <= mem_size - sizeof(mtype) - offset;
+ inline byte* BoundsCheckMem(uint32_t offset, uint32_t index) {
+ uint32_t mem_size = wasm_context_->mem_size;
+ if (sizeof(mtype) > mem_size) return nullptr;
+ if (offset > (mem_size - sizeof(mtype))) return nullptr;
+ if (index > (mem_size - sizeof(mtype) - offset)) return nullptr;
+ // Compute the effective address of the access, making sure to condition
+ // the index even in the in-bounds case.
+ return wasm_context_->mem_start + offset +
+ (index & wasm_context_->mem_mask);
}
template <typename ctype, typename mtype>
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
MachineRepresentation rep) {
- MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
+ MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc),
+ sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
- if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, operand.offset,
- index)) {
+ byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
+ if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- byte* addr = cached_instance_info_->mem_start + operand.offset + index;
- WasmValue result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
+ WasmValue result(
+ converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
Push(result);
len = 1 + operand.length;
@@ -1462,7 +1513,7 @@ class ThreadImpl {
tracing::TraceMemoryOperation(
tracing::kWasmInterpreted, false, rep, operand.offset + index,
code->function->func_index, static_cast<int>(pc),
- cached_instance_info_->mem_start);
+ wasm_context_->mem_start);
}
return true;
@@ -1471,35 +1522,114 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
MachineRepresentation rep) {
- MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
- WasmValue val = Pop();
+ MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc),
+ sizeof(ctype));
+ ctype val = Pop().to<ctype>();
uint32_t index = Pop().to<uint32_t>();
- if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, operand.offset,
- index)) {
+ byte* addr = BoundsCheckMem<mtype>(operand.offset, index);
+ if (!addr) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- byte* addr = cached_instance_info_->mem_start + operand.offset + index;
- WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>()));
+ WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
len = 1 + operand.length;
- if (std::is_same<float, ctype>::value) {
- possible_nondeterminism_ |= std::isnan(val.to<float>());
- } else if (std::is_same<double, ctype>::value) {
- possible_nondeterminism_ |= std::isnan(val.to<double>());
- }
-
if (FLAG_wasm_trace_memory) {
tracing::TraceMemoryOperation(
tracing::kWasmInterpreted, true, rep, operand.offset + index,
code->function->func_index, static_cast<int>(pc),
- cached_instance_info_->mem_start);
+ wasm_context_->mem_start);
}
return true;
}
+ template <typename type>
+ bool ExtractAtomicBinOpParams(Decoder* decoder, InterpreterCode* code,
+ Address& address, pc_t pc, type& val,
+ int& len) {
+ MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc + 1),
+ sizeof(type));
+ val = Pop().to<uint32_t>();
+ uint32_t index = Pop().to<uint32_t>();
+ address = BoundsCheckMem<type>(operand.offset, index);
+ if (!address) {
+ DoTrap(kTrapMemOutOfBounds, pc);
+ return false;
+ }
+ len = 2 + operand.length;
+ return true;
+ }
+
+ bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
+ InterpreterCode* code, pc_t pc, int& len) {
+ WasmValue result;
+ switch (opcode) {
+// TODO(gdeepti): Remove work-around when the bots are upgraded to a more
+// recent gcc version. The gcc bots (Android ARM, linux) currently use
+// gcc 4.8, in which atomics are insufficiently supported, also Bug#58016
+// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016)
+#if __GNUG__ && __GNUC__ < 5
+#define ATOMIC_BINOP_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
+ return false; \
+ } \
+ result = WasmValue( \
+ __##operation(reinterpret_cast<type*>(addr), val, __ATOMIC_SEQ_CST)); \
+ break; \
+ }
+#else
+#define ATOMIC_BINOP_CASE(name, type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<std::type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<std::" #type \
+ ">, and " #type); \
+ result = WasmValue( \
+ std::operation(reinterpret_cast<std::atomic<std::type>*>(addr), val)); \
+ break; \
+ }
+#endif
+ ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
+ ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
+ ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
+ ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
+ ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
+ ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
+#if __GNUG__ && __GNUC__ < 5
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange_n);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange_n);
+ ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange_n);
+#else
+ ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
+ ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
+#endif
+#undef ATOMIC_BINOP_CASE
+ default:
+ return false;
+ }
+ Push(result);
+ return true;
+ }
+
// Check if our control stack (frames_) exceeds the limit. Trigger stack
// overflow if it does, and unwinding the current frame.
// Returns true if execution can continue, false if the current activation was
@@ -1507,8 +1637,15 @@ class ThreadImpl {
// Do call this function immediately *after* pushing a new frame. The pc of
// the top frame will be reset to 0 if the stack check fails.
bool DoStackCheck() WARN_UNUSED_RESULT {
- // Sum up the size of all dynamically growing structures.
- if (V8_LIKELY(frames_.size() <= kV8MaxWasmInterpretedStackSize)) {
+ // The goal of this stack check is not to prevent actual stack overflows,
+ // but to simulate stack overflows during the execution of compiled code.
+ // That is why this function uses FLAG_stack_size, even though the value
+ // stack actually lies in zone memory.
+ const size_t stack_size_limit = FLAG_stack_size * KB;
+ // Sum up the value stack size and the control stack size.
+ const size_t current_stack_size =
+ (sp_ - stack_start_) + frames_.size() * sizeof(Frame);
+ if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
return true;
}
if (!codemap()->has_instance()) {
@@ -1551,16 +1688,22 @@ class ThreadImpl {
// Do first check for a breakpoint, in order to set hit_break correctly.
const char* skip = " ";
int len = 1;
- byte opcode = code->start[pc];
- byte orig = opcode;
- if (V8_UNLIKELY(opcode == kInternalBreakpoint)) {
+ byte orig = code->start[pc];
+ WasmOpcode opcode = static_cast<WasmOpcode>(orig);
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
+ }
+ if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
orig = code->orig_start[pc];
+ if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
+ opcode =
+ static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
+ }
if (SkipBreakpoint(code, pc)) {
// skip breakpoint by switching on original code.
skip = "[skip] ";
} else {
- TRACE("@%-3zu: [break] %-24s:", pc,
- WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+ TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
TraceValueStack();
TRACE("\n");
hit_break = true;
@@ -1573,8 +1716,7 @@ class ThreadImpl {
if (max > 0) --max;
USE(skip);
- TRACE("@%-3zu: %s%-24s:", pc, skip,
- WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+ TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
TraceValueStack();
TRACE("\n");
@@ -1592,17 +1734,20 @@ class ThreadImpl {
case kExprNop:
break;
case kExprBlock: {
- BlockTypeOperand<false> operand(&decoder, code->at(pc));
+ BlockTypeOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
len = 1 + operand.length;
break;
}
case kExprLoop: {
- BlockTypeOperand<false> operand(&decoder, code->at(pc));
+ BlockTypeOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
len = 1 + operand.length;
break;
}
case kExprIf: {
- BlockTypeOperand<false> operand(&decoder, code->at(pc));
+ BlockTypeOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -1628,13 +1773,15 @@ class ThreadImpl {
break;
}
case kExprBr: {
- BreakDepthOperand<false> operand(&decoder, code->at(pc));
+ BreakDepthOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
len = DoBreak(code, pc, operand.depth);
TRACE(" br => @%zu\n", pc + len);
break;
}
case kExprBrIf: {
- BreakDepthOperand<false> operand(&decoder, code->at(pc));
+ BreakDepthOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -1647,8 +1794,9 @@ class ThreadImpl {
break;
}
case kExprBrTable: {
- BranchTableOperand<false> operand(&decoder, code->at(pc));
- BranchTableIterator<false> iterator(&decoder, operand);
+ BranchTableOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
+ BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, operand);
uint32_t key = Pop().to<uint32_t>();
uint32_t depth = 0;
if (key >= operand.table_count) key = operand.table_count;
@@ -1673,44 +1821,47 @@ class ThreadImpl {
break;
}
case kExprI32Const: {
- ImmI32Operand<false> operand(&decoder, code->at(pc));
+ ImmI32Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
- ImmI64Operand<false> operand(&decoder, code->at(pc));
+ ImmI64Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
- ImmF32Operand<false> operand(&decoder, code->at(pc));
+ ImmF32Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
- ImmF64Operand<false> operand(&decoder, code->at(pc));
+ ImmF64Operand<Decoder::kNoValidate> operand(&decoder, code->at(pc));
Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprGetLocal: {
- LocalIndexOperand<false> operand(&decoder, code->at(pc));
+ LocalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
Push(GetStackValue(frames_.back().sp + operand.index));
len = 1 + operand.length;
break;
}
case kExprSetLocal: {
- LocalIndexOperand<false> operand(&decoder, code->at(pc));
+ LocalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue val = Pop();
SetStackValue(frames_.back().sp + operand.index, val);
len = 1 + operand.length;
break;
}
case kExprTeeLocal: {
- LocalIndexOperand<false> operand(&decoder, code->at(pc));
+ LocalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
WasmValue val = Pop();
SetStackValue(frames_.back().sp + operand.index, val);
Push(val);
@@ -1722,7 +1873,8 @@ class ThreadImpl {
break;
}
case kExprCallFunction: {
- CallFunctionOperand<false> operand(&decoder, code->at(pc));
+ CallFunctionOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
InterpreterCode* target = codemap()->GetCode(operand.index);
if (target->function->imported) {
CommitPc(pc);
@@ -1754,7 +1906,8 @@ class ThreadImpl {
continue; // don't bump pc
} break;
case kExprCallIndirect: {
- CallIndirectOperand<false> operand(&decoder, code->at(pc));
+ CallIndirectOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
@@ -1781,9 +1934,10 @@ class ThreadImpl {
}
} break;
case kExprGetGlobal: {
- GlobalIndexOperand<false> operand(&decoder, code->at(pc));
+ GlobalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
- byte* ptr = cached_instance_info_->globals_start + global->offset;
+ byte* ptr = wasm_context_->globals_start + global->offset;
WasmValue val;
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
@@ -1800,9 +1954,10 @@ class ThreadImpl {
break;
}
case kExprSetGlobal: {
- GlobalIndexOperand<false> operand(&decoder, code->at(pc));
+ GlobalIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
- byte* ptr = cached_instance_info_->globals_start + global->offset;
+ byte* ptr = wasm_context_->globals_start + global->offset;
WasmValue val = Pop();
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
@@ -1838,8 +1993,8 @@ class ThreadImpl {
LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
- LOAD_CASE(F32LoadMem, float, float, kFloat32);
- LOAD_CASE(F64LoadMem, double, double, kFloat64);
+ LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
+ LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
#undef LOAD_CASE
#define STORE_CASE(name, ctype, mtype, rep) \
@@ -1857,23 +2012,23 @@ class ThreadImpl {
STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
- STORE_CASE(F32StoreMem, float, float, kFloat32);
- STORE_CASE(F64StoreMem, double, double, kFloat64);
+ STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
+ STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
#undef STORE_CASE
-#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
- case kExpr##name: { \
- uint32_t index = Pop().to<uint32_t>(); \
- ctype result; \
- if (!BoundsCheck<mtype>(cached_instance_info_->mem_size, 0, index)) { \
- result = defval; \
- } else { \
- byte* addr = cached_instance_info_->mem_start + index; \
- /* TODO(titzer): alignment for asmjs load mem? */ \
- result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
- } \
- Push(WasmValue(result)); \
- break; \
+#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
+ case kExpr##name: { \
+ uint32_t index = Pop().to<uint32_t>(); \
+ ctype result; \
+ byte* addr = BoundsCheckMem<mtype>(0, index); \
+ if (!addr) { \
+ result = defval; \
+ } else { \
+ /* TODO(titzer): alignment for asmjs load mem? */ \
+ result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
+ } \
+ Push(WasmValue(result)); \
+ break; \
}
ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
@@ -1890,9 +2045,8 @@ class ThreadImpl {
case kExpr##name: { \
WasmValue val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
- if (BoundsCheck<mtype>(cached_instance_info_->mem_size, 0, index)) { \
- byte* addr = cached_instance_info_->mem_start + index; \
- /* TODO(titzer): alignment for asmjs store mem? */ \
+ byte* addr = BoundsCheckMem<mtype>(0, index); \
+ if (addr) { \
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
} \
Push(val); \
@@ -1906,16 +2060,23 @@ class ThreadImpl {
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
case kExprGrowMemory: {
- MemoryIndexOperand<false> operand(&decoder, code->at(pc));
+ MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Push(WasmValue(ExecuteGrowMemory(
- delta_pages, codemap_->maybe_instance(), cached_instance_info_)));
+ Handle<WasmInstanceObject> instance =
+ codemap()->maybe_instance().ToHandleChecked();
+ DCHECK_EQ(wasm_context_, instance->wasm_context()->get());
+ Isolate* isolate = instance->GetIsolate();
+ int32_t result =
+ WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
+ Push(WasmValue(result));
len = 1 + operand.length;
break;
}
case kExprMemorySize: {
- MemoryIndexOperand<false> operand(&decoder, code->at(pc));
- Push(WasmValue(static_cast<uint32_t>(cached_instance_info_->mem_size /
+ MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
+ code->at(pc));
+ Push(WasmValue(static_cast<uint32_t>(wasm_context_->mem_size /
WasmModule::kPageSize)));
len = 1 + operand.length;
break;
@@ -1926,69 +2087,53 @@ class ThreadImpl {
case kExprI32ReinterpretF32: {
WasmValue val = Pop();
Push(WasmValue(ExecuteI32ReinterpretF32(val)));
- possible_nondeterminism_ |= std::isnan(val.to<float>());
break;
}
case kExprI64ReinterpretF64: {
WasmValue val = Pop();
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
- possible_nondeterminism_ |= std::isnan(val.to<double>());
break;
}
+ case kAtomicPrefix: {
+ if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
+ break;
+ }
+
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
case kExpr##name: { \
WasmValue rval = Pop(); \
WasmValue lval = Pop(); \
- WasmValue result(lval.to<ctype>() op rval.to<ctype>()); \
- Push(result); \
+ auto result = lval.to<ctype>() op rval.to<ctype>(); \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ Push(WasmValue(result)); \
break; \
}
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
#undef EXECUTE_SIMPLE_BINOP
-#define EXECUTE_OTHER_BINOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- volatile ctype rval = Pop().to<ctype>(); \
- volatile ctype lval = Pop().to<ctype>(); \
- WasmValue result(Execute##name(lval, rval, &trap)); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(result); \
- break; \
+#define EXECUTE_OTHER_BINOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ ctype rval = Pop().to<ctype>(); \
+ ctype lval = Pop().to<ctype>(); \
+ auto result = Execute##name(lval, rval, &trap); \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(WasmValue(result)); \
+ break; \
}
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP
- case kExprF32CopySign: {
- // Handle kExprF32CopySign separately because it may introduce
- // observable non-determinism.
- TrapReason trap = kTrapCount;
- volatile float rval = Pop().to<float>();
- volatile float lval = Pop().to<float>();
- WasmValue result(ExecuteF32CopySign(lval, rval, &trap));
- Push(result);
- possible_nondeterminism_ |= std::isnan(rval);
- break;
- }
- case kExprF64CopySign: {
- // Handle kExprF32CopySign separately because it may introduce
- // observable non-determinism.
- TrapReason trap = kTrapCount;
- volatile double rval = Pop().to<double>();
- volatile double lval = Pop().to<double>();
- WasmValue result(ExecuteF64CopySign(lval, rval, &trap));
- Push(result);
- possible_nondeterminism_ |= std::isnan(rval);
- break;
- }
-#define EXECUTE_OTHER_UNOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- volatile ctype val = Pop().to<ctype>(); \
- WasmValue result(Execute##name(val, &trap)); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(result); \
- break; \
+#define EXECUTE_OTHER_UNOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ ctype val = Pop().to<ctype>(); \
+ auto result = Execute##name(val, &trap); \
+ possible_nondeterminism_ |= has_nondeterminism(result); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(WasmValue(result)); \
+ break; \
}
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP
@@ -2000,7 +2145,7 @@ class ThreadImpl {
}
#ifdef DEBUG
- if (!WasmOpcodes::IsControlOpcode(static_cast<WasmOpcode>(opcode))) {
+ if (!WasmOpcodes::IsControlOpcode(opcode)) {
DCHECK_EQ(expected_new_stack_height, StackHeight());
}
#endif
@@ -2118,9 +2263,8 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
- // TODO(clemensh): Remove this, call JS via existing wasm-to-js wrapper, using
- // CallExternalWasmFunction.
- ExternalCallResult CallExternalJSFunction(Isolate* isolate, Handle<Code> code,
+ ExternalCallResult CallExternalJSFunction(Isolate* isolate,
+ WasmCodeWrapper code,
FunctionSig* signature) {
Handle<HeapObject> target = UnwrapWasmToJSWrapper(isolate, code);
@@ -2166,13 +2310,16 @@ class ThreadImpl {
if (signature->return_count() > 0) {
// TODO(wasm): Handle multiple returns.
DCHECK_EQ(1, signature->return_count());
- Push(ToWebAssemblyValue(isolate, retval, signature->GetReturn()));
+ WasmValue value =
+ ToWebAssemblyValue(isolate, retval, signature->GetReturn());
+ if (value.type() == kWasmStmt) return TryHandleException(isolate);
+ Push(value);
}
return {ExternalCallResult::EXTERNAL_RETURNED};
}
ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
- Handle<Code> code,
+ WasmCodeWrapper code,
FunctionSig* sig) {
Handle<WasmDebugInfo> debug_info(codemap()->instance()->debug_info(),
isolate);
@@ -2216,6 +2363,16 @@ class ThreadImpl {
offset += param_size;
}
+ // Ensure that there is enough space in the arg_buffer to hold the return
+ // value(s).
+ uint32_t return_size = 0;
+ for (ValueType t : sig->returns()) {
+ return_size += 1 << ElementSizeLog2Of(t);
+ }
+ if (arg_buffer.size() < return_size) {
+ arg_buffer.resize(return_size);
+ }
+
// Wrap the arg_buffer data pointer in a handle. As this is an aligned
// pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
@@ -2223,13 +2380,26 @@ class ThreadImpl {
DCHECK(!arg_buffer_obj->IsHeapObject());
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- args[compiler::CWasmEntryParameters::kCodeObject] = code;
+ args[compiler::CWasmEntryParameters::kCodeObject] =
+ code.IsCodeObject()
+ ? Handle<Object>::cast(code.GetCode())
+ : Handle<Object>::cast(isolate->factory()->NewForeign(
+ code.GetWasmCode()->instructions().start(), TENURED));
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
+ trap_handler::SetThreadInWasm();
MaybeHandle<Object> maybe_retval =
Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
- if (maybe_retval.is_null()) return TryHandleException(isolate);
+ TRACE(" => External wasm function returned%s\n",
+ maybe_retval.is_null() ? " with exception" : "");
+
+ if (maybe_retval.is_null()) {
+ DCHECK(!trap_handler::IsThreadInWasm());
+ return TryHandleException(isolate);
+ }
+
+ trap_handler::ClearThreadInWasm();
// Pop arguments off the stack.
sp_ -= num_args;
@@ -2262,21 +2432,38 @@ class ThreadImpl {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
- if (code->kind() == Code::WASM_FUNCTION) {
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_EQ(2, deopt_data->length());
- WasmInstanceObject* target_instance =
- WasmInstanceObject::cast(WeakCell::cast(deopt_data->get(0))->value());
- if (target_instance != codemap()->instance()) {
- return CallExternalWasmFunction(isolate, code, signature);
+ if (code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_WASM_FUNCTION) {
+ auto func_info = GetWasmFunctionInfo(isolate, code);
+ if (*func_info.instance.ToHandleChecked() != codemap()->instance()) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
}
- int target_func_idx = Smi::ToInt(deopt_data->get(1));
- DCHECK_LE(0, target_func_idx);
+ DCHECK_LE(0, func_info.func_index);
return {ExternalCallResult::INTERNAL,
- codemap()->GetCode(target_func_idx)};
+ codemap()->GetCode(func_info.func_index)};
}
- return CallExternalJSFunction(isolate, code, signature);
+ return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
+ }
+
+ ExternalCallResult CallWasmCode(Isolate* isolate, const wasm::WasmCode* code,
+ FunctionSig* signature) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+
+ if (code->kind() == wasm::WasmCode::Function) {
+ DCHECK_EQ(*code->owner()->compiled_module()->owning_instance(),
+ codemap()->instance());
+ return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
+ }
+ if (code->kind() == wasm::WasmCode::WasmToJsWrapper) {
+ return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
+ } else if (code->kind() == wasm::WasmCode::WasmToWasmWrapper) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
+ }
+ return {ExternalCallResult::INVALID_FUNC};
}
ExternalCallResult CallImportedFunction(uint32_t function_index) {
@@ -2285,17 +2472,36 @@ class ThreadImpl {
Isolate* isolate = codemap()->instance()->GetIsolate();
HandleScope handle_scope(isolate);
- Handle<Code> target(codemap()->GetImportedFunction(function_index),
- isolate);
- return CallCodeObject(isolate, target,
+ if (FLAG_wasm_jit_to_native) {
+ const wasm::WasmCode* target =
+ codemap()->GetImportedFunction(function_index);
+ return CallWasmCode(isolate, target,
codemap()->module()->functions[function_index].sig);
+ } else {
+ Handle<Code> target(codemap()->GetImportedFunctionGC(function_index),
+ isolate);
+ return CallCodeObject(isolate, target,
+ codemap()->module()->functions[function_index].sig);
+ }
}
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- if (!codemap()->has_instance() ||
- !codemap()->instance()->compiled_module()->has_function_tables()) {
+ bool no_func_tables = !codemap()->has_instance();
+ if (FLAG_wasm_jit_to_native) {
+ no_func_tables = no_func_tables || codemap()
+ ->instance()
+ ->compiled_module()
+ ->GetNativeModule()
+ ->function_tables()
+ .empty();
+ } else {
+ no_func_tables =
+ no_func_tables ||
+ !codemap()->instance()->compiled_module()->has_function_tables();
+ }
+ if (no_func_tables) {
// No instance. Rely on the information stored in the WasmModule.
// TODO(wasm): This is only needed for testing. Refactor testing to use
// the same paths as production.
@@ -2304,13 +2510,12 @@ class ThreadImpl {
if (!code) return {ExternalCallResult::INVALID_FUNC};
if (code->function->sig_index != sig_index) {
// If not an exact match, we have to do a canonical check.
- // TODO(titzer): make this faster with some kind of caching?
- const WasmIndirectFunctionTable* table =
- &module()->function_tables[table_index];
- int function_key = table->map.Find(code->function->sig);
- if (function_key < 0 ||
- (function_key !=
- table->map.Find(module()->signatures[sig_index]))) {
+ int function_canonical_id =
+ module()->signature_ids[code->function->sig_index];
+ int expected_canonical_id = module()->signature_ids[sig_index];
+ DCHECK_EQ(function_canonical_id,
+ module()->signature_map.Find(code->function->sig));
+ if (function_canonical_id != expected_canonical_id) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
}
@@ -2321,53 +2526,92 @@ class ThreadImpl {
codemap()->instance()->compiled_module();
Isolate* isolate = compiled_module->GetIsolate();
- Code* target;
+ const wasm::WasmCode* target = nullptr;
+ Code* target_gc = nullptr;
{
DisallowHeapAllocation no_gc;
// Get function to be called directly from the live instance to see latest
// changes to the tables.
// Canonicalize signature index.
- // TODO(titzer): make this faster with some kind of caching?
- const WasmIndirectFunctionTable* table =
- &module()->function_tables[table_index];
- FunctionSig* sig = module()->signatures[sig_index];
- uint32_t canonical_sig_index = table->map.Find(sig);
-
- // Check signature.
- FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
- if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- // Reconstitute the global handle to sig_table, and, further below,
- // to the function table, from the address stored in the
- // respective table of tables.
- int table_index_as_int = static_cast<int>(table_index);
- Handle<FixedArray> sig_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(sig_tables, table_index_as_int)));
- if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
+ uint32_t canonical_sig_index = module()->signature_ids[sig_index];
+ DCHECK_EQ(canonical_sig_index,
+ module()->signature_map.Find(module()->signatures[sig_index]));
+
+ if (!FLAG_wasm_jit_to_native) {
+ // Check signature.
+ FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
+ if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ // Reconstitute the global handle to sig_table, and, further below,
+ // to the function table, from the address stored in the
+ // respective table of tables.
+ int table_index_as_int = static_cast<int>(table_index);
+ Handle<FixedArray> sig_table(reinterpret_cast<FixedArray**>(
+ WasmCompiledModule::GetTableValue(sig_tables, table_index_as_int)));
+ if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ int found_sig =
+ Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
+ }
+
+ // Get code object.
+ FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
+ DCHECK_EQ(sig_tables->length(), fun_tables->length());
+ Handle<FixedArray> fun_table(reinterpret_cast<FixedArray**>(
+ WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int)));
+ DCHECK_EQ(sig_table->length(), fun_table->length());
+ target_gc = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ } else {
+ // Check signature.
+ std::vector<GlobalHandleAddress>& sig_tables =
+ compiled_module->GetNativeModule()->signature_tables();
+ if (table_index >= sig_tables.size()) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ // Reconstitute the global handle to sig_table, and, further below,
+ // to the function table, from the address stored in the
+ // respective table of tables.
+ int table_index_as_int = static_cast<int>(table_index);
+ Handle<FixedArray> sig_table(
+ reinterpret_cast<FixedArray**>(sig_tables[table_index_as_int]));
+ if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ int found_sig =
+ Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
+ if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
+ }
- // Get code object.
- FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
- DCHECK_EQ(sig_tables->length(), fun_tables->length());
- Handle<FixedArray> fun_table(reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int)));
- DCHECK_EQ(sig_table->length(), fun_table->length());
- target = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ // Get code object.
+ std::vector<GlobalHandleAddress>& fun_tables =
+ compiled_module->GetNativeModule()->function_tables();
+ DCHECK_EQ(sig_tables.size(), fun_tables.size());
+ Handle<FixedArray> fun_table(
+ reinterpret_cast<FixedArray**>(fun_tables[table_index_as_int]));
+ DCHECK_EQ(sig_table->length(), fun_table->length());
+ Address first_instr =
+ Foreign::cast(fun_table->get(static_cast<int>(entry_index)))
+ ->foreign_address();
+ target =
+ isolate->wasm_code_manager()->GetCodeFromStartAddress(first_instr);
+ }
}
// Call the code object. Use a new HandleScope to avoid leaking /
// accumulating handles in the outer scope.
HandleScope handle_scope(isolate);
- FunctionSig* signature =
- &codemap()->module()->signatures[table_index][sig_index];
- return CallCodeObject(isolate, handle(target, isolate), signature);
+ FunctionSig* signature = module()->signatures[sig_index];
+ if (FLAG_wasm_jit_to_native) {
+ return CallWasmCode(isolate, target, signature);
+ } else {
+ return CallCodeObject(isolate, handle(target_gc, isolate), signature);
+ }
}
inline Activation current_activation() {
@@ -2559,9 +2803,6 @@ uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
//============================================================================
class WasmInterpreterInternals : public ZoneObject {
public:
- // We cache the memory information of the debugged instance here, and all
- // threads (currently, one) share it and update it in case of {GrowMemory}.
- CachedInstanceInfo cached_instance_info_;
// Create a copy of the module bytes for the interpreter, since the passed
// pointer might be invalidated after constructing the interpreter.
const ZoneVector<uint8_t> module_bytes_;
@@ -2571,13 +2812,11 @@ class WasmInterpreterInternals : public ZoneObject {
WasmInterpreterInternals(Isolate* isolate, Zone* zone,
const WasmModule* module,
const ModuleWireBytes& wire_bytes,
- byte* globals_start, byte* mem_start,
- uint32_t mem_size)
- : cached_instance_info_(globals_start, mem_start, mem_size),
- module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
+ WasmContext* wasm_context)
+ : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
codemap_(isolate, module, module_bytes_.data(), zone),
threads_(zone) {
- threads_.emplace_back(zone, &codemap_, &cached_instance_info_);
+ threads_.emplace_back(zone, &codemap_, wasm_context);
}
};
@@ -2586,12 +2825,10 @@ class WasmInterpreterInternals : public ZoneObject {
//============================================================================
WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
- byte* globals_start, byte* mem_start,
- uint32_t mem_size)
+ WasmContext* wasm_context)
: zone_(isolate->allocator(), ZONE_NAME),
internals_(new (&zone_) WasmInterpreterInternals(
- isolate, &zone_, module, wire_bytes, globals_start, mem_start,
- mem_size)) {}
+ isolate, &zone_, module, wire_bytes, wasm_context)) {}
WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
@@ -2643,14 +2880,6 @@ WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
return ToThread(&internals_->threads_[id]);
}
-void WasmInterpreter::UpdateMemory(byte* mem_start, uint32_t mem_size) {
- // We assume one thread. Things are likely to be more complicated than this
- // in a multi-threaded case.
- DCHECK_EQ(1, internals_->threads_.size());
- internals_->cached_instance_info_.mem_start = mem_start;
- internals_->cached_instance_info_.mem_size = mem_size;
-}
-
void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
internals_->codemap_.AddFunction(function, nullptr, nullptr);
}