summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/wasm')
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc105
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc48
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc48
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc593
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc14
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc21
9 files changed, 551 insertions, 288 deletions
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index dc02cfd14a..556d74daef 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -32,16 +32,18 @@ constexpr int kJumpTableSlotCount = 128;
constexpr uint32_t kJumpTableSize =
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
+constexpr size_t kThunkBufferSize = AssemblerBase::kMinimalBufferSize;
+
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
constexpr uint32_t kAvailableBufferSlots =
- (kMaxWasmCodeMemory - kJumpTableSize) / AssemblerBase::kMinimalBufferSize;
+ (kMaxWasmCodeMemory - kJumpTableSize) / kThunkBufferSize;
constexpr uint32_t kBufferSlotStartOffset =
- RoundUp<AssemblerBase::kMinimalBufferSize>(kJumpTableSize);
+ RoundUp<kThunkBufferSize>(kJumpTableSize);
#else
constexpr uint32_t kAvailableBufferSlots = 0;
#endif
-Address GenerateJumpTableThunk(
+Address AllocateJumpTableThunk(
Address jump_target, byte* thunk_slot_buffer,
std::bitset<kAvailableBufferSlots>* used_slots,
std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
@@ -62,20 +64,22 @@ Address GenerateJumpTableThunk(
buffer_index = rng->NextInt(kAvailableBufferSlots);
} while (used_slots->test(buffer_index));
used_slots->set(buffer_index);
- byte* buffer =
- thunk_slot_buffer + buffer_index * AssemblerBase::kMinimalBufferSize;
+ return reinterpret_cast<Address>(thunk_slot_buffer +
+ buffer_index * kThunkBufferSize);
#else
USE(thunk_slot_buffer);
USE(used_slots);
- thunk_buffers->emplace_back(AllocateAssemblerBuffer(
- AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr()));
- byte* buffer = thunk_buffers->back()->start();
+ thunk_buffers->emplace_back(
+ AllocateAssemblerBuffer(kThunkBufferSize, GetRandomMmapAddr()));
+ return reinterpret_cast<Address>(thunk_buffers->back()->start());
#endif
+}
- MacroAssembler masm(
- nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
- ExternalAssemblerBuffer(buffer, AssemblerBase::kMinimalBufferSize));
+void CompileJumpTableThunk(Address thunk, Address jump_target) {
+ MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(reinterpret_cast<void*>(thunk),
+ kThunkBufferSize));
Label exit;
Register scratch = kReturnRegister0;
@@ -132,9 +136,9 @@ Address GenerateJumpTableThunk(
__ bind(&exit);
__ Ret();
- CodeDesc desc;
- masm.GetCode(nullptr, &desc);
- return reinterpret_cast<Address>(buffer);
+ FlushInstructionCache(thunk, kThunkBufferSize);
+ CHECK(SetPermissions(GetPlatformPageAllocator(), thunk, kThunkBufferSize,
+ v8::PageAllocator::kReadExecute));
}
class JumpTableRunner : public v8::base::Thread {
@@ -159,29 +163,38 @@ class JumpTableRunner : public v8::base::Thread {
class JumpTablePatcher : public v8::base::Thread {
public:
JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
- Address thunk2)
+ Address thunk2, base::Mutex* jump_table_mutex)
: Thread(Options("JumpTablePatcher")),
slot_start_(slot_start),
slot_index_(slot_index),
- thunks_{thunk1, thunk2} {}
+ thunks_{thunk1, thunk2},
+ jump_table_mutex_(jump_table_mutex) {}
void Run() override {
- TRACE("Patcher is starting ...\n");
+ TRACE("Patcher %p is starting ...\n", this);
+ Address slot_address =
+ slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
+ // First, emit code to the two thunks.
+ for (Address thunk : thunks_) {
+ CompileJumpTableThunk(thunk, slot_address);
+ }
+ // Then, repeatedly patch the jump table to jump to one of the two thunks.
constexpr int kNumberOfPatchIterations = 64;
for (int i = 0; i < kNumberOfPatchIterations; ++i) {
- TRACE(" patch slot " V8PRIxPTR_FMT " to thunk #%d\n",
- slot_start_ + JumpTableAssembler::SlotIndexToOffset(slot_index_),
- i % 2);
+ TRACE(" patcher %p patch slot " V8PRIxPTR_FMT " to thunk #%d\n", this,
+ slot_address, i % 2);
+ base::MutexGuard jump_table_guard(jump_table_mutex_);
JumpTableAssembler::PatchJumpTableSlot(
slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
}
- TRACE("Patcher is stopping ...\n");
+ TRACE("Patcher %p is stopping ...\n", this);
}
private:
Address slot_start_;
uint32_t slot_index_;
Address thunks_[2];
+ base::Mutex* jump_table_mutex_;
};
} // namespace
@@ -198,9 +211,10 @@ class JumpTablePatcher : public v8::base::Thread {
// one of the runners is currently executing the jump-table slot.
TEST(JumpTablePatchingStress) {
constexpr int kNumberOfRunnerThreads = 5;
+ constexpr int kNumberOfPatcherThreads = 3;
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
- // We need the branches (from GenerateJumpTableThunk) to be within near-call
+ // We need the branches (from CompileJumpTableThunk) to be within near-call
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so for Arm64 we generate all code in a single
@@ -226,29 +240,42 @@ TEST(JumpTablePatchingStress) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
- Address thunk1 =
- GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
- &used_thunk_slots, &thunk_buffers);
- Address thunk2 =
- GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
- &used_thunk_slots, &thunk_buffers);
- TRACE(" generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
- TRACE(" generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
- JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
- WasmCode::kFlushICache);
+ // Patch the jump table slot to jump to itself. This will later be patched
+ // by the patchers.
+ JumpTableAssembler::PatchJumpTableSlot(
+ slot_start, slot, slot_start + slot_offset, WasmCode::kFlushICache);
+ // For each patcher, generate two thunks where this patcher can emit code
+ // which finally jumps back to {slot} in the jump table.
+ std::vector<Address> patcher_thunks;
+ for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
+ Address thunk =
+ AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
+ &used_thunk_slots, &thunk_buffers);
+ ZapCode(thunk, kThunkBufferSize);
+ patcher_thunks.push_back(thunk);
+ TRACE(" generated jump thunk: " V8PRIxPTR_FMT "\n",
+ patcher_thunks.back());
+ }
- for (auto& buf : thunk_buffers) buf->MakeExecutable();
- // Start multiple runner threads and a patcher thread that hammer on the
- // same jump-table slot concurrently.
+ // Start multiple runner threads that execute the jump table slot
+ // concurrently.
std::list<JumpTableRunner> runners;
for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
runners.emplace_back(slot_start + slot_offset, runner);
}
- JumpTablePatcher patcher(slot_start, slot, thunk1, thunk2);
+ // Start multiple patcher thread that concurrently generate code and insert
+ // jumps to that into the jump table slot.
+ std::list<JumpTablePatcher> patchers;
+ // Only one patcher should modify the jump table at a time.
+ base::Mutex jump_table_mutex;
+ for (int i = 0; i < kNumberOfPatcherThreads; ++i) {
+ patchers.emplace_back(slot_start, slot, patcher_thunks[2 * i],
+ patcher_thunks[2 * i + 1], &jump_table_mutex);
+ }
global_stop_bit = 0; // Signal runners to keep going.
- for (auto& runner : runners) runner.Start();
- patcher.Start();
- patcher.Join();
+ for (auto& runner : runners) CHECK(runner.Start());
+ for (auto& patcher : patchers) CHECK(patcher.Start());
+ for (auto& patcher : patchers) patcher.Join();
global_stop_bit = -1; // Signal runners to stop.
for (auto& runner : runners) runner.Join();
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index c8dd901161..3f96f8720f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -618,7 +618,9 @@ WASM_EXEC_TEST(F32UConvertI64) {
{0x8000008000000000, 0x5F000000},
{0x8000008000000001, 0x5F000001},
{0x8000000000000400, 0x5F000000},
- {0x8000000000000401, 0x5F000000}};
+ {0x8000000000000401, 0x5F000000},
+ {0x20000020000001, 0x5a000001},
+ {0xFFFFFe8000000001, 0x5f7FFFFF}};
WasmRunner<float, uint64_t> r(execution_tier);
BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index 60cda4adde..354ff436c0 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -317,6 +317,54 @@ WASM_EXEC_TEST(AtomicFence) {
CHECK_EQ(0, r.Call());
}
+WASM_EXEC_TEST(AtomicStoreNoConsideredEffectful) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ FLAG_wasm_trap_handler = false; // To use {Load} instead of {ProtectedLoad}.
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_STORE_OP(kExprI32AtomicStore, WASM_ZERO, WASM_I32V_1(20),
+ MachineRepresentation::kWord32),
+ kExprI64Eqz);
+ CHECK_EQ(1, r.Call());
+}
+
+void RunNoEffectTest(ExecutionTier execution_tier, WasmOpcode wasm_op) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ FLAG_wasm_trap_handler = false; // To use {Load} instead of {ProtectedLoad}.
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I32V_1(20),
+ MachineRepresentation::kWord32),
+ WASM_DROP, kExprI64Eqz);
+ CHECK_EQ(1, r.Call());
+}
+
+WASM_EXEC_TEST(AtomicAddNoConsideredEffectful) {
+ RunNoEffectTest(execution_tier, kExprI32AtomicAdd);
+}
+
+WASM_EXEC_TEST(AtomicExchangeNoConsideredEffectful) {
+ RunNoEffectTest(execution_tier, kExprI32AtomicExchange);
+}
+
+WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ FLAG_wasm_trap_handler = false; // To use {Load} instead of {ProtectedLoad}.
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO),
+ WASM_ATOMICS_TERNARY_OP(kExprI32AtomicCompareExchange, WASM_ZERO,
+ WASM_ZERO, WASM_I32V_1(30),
+ MachineRepresentation::kWord32),
+ WASM_DROP, kExprI32Eqz);
+ CHECK_EQ(1, r.Call());
+}
+
} // namespace test_run_wasm_atomics
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 2d5d6a945c..748adc4a67 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -646,6 +646,54 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32UFail) {
CHECK_EQ(initial, r.builder().ReadMemory(&memory[0]));
}
+WASM_EXEC_TEST(AtomicStoreNoConsideredEffectful) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ FLAG_wasm_trap_handler = false; // To use {Load} instead of {ProtectedLoad}.
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_I64V(20),
+ MachineRepresentation::kWord64),
+ kExprI64Eqz);
+ CHECK_EQ(1, r.Call());
+}
+
+void RunNoEffectTest(ExecutionTier execution_tier, WasmOpcode wasm_op) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ FLAG_wasm_trap_handler = false; // To use {Load} instead of {ProtectedLoad}.
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_BINOP(wasm_op, WASM_ZERO, WASM_I64V(20),
+ MachineRepresentation::kWord64),
+ WASM_DROP, kExprI64Eqz);
+ CHECK_EQ(1, r.Call());
+}
+
+WASM_EXEC_TEST(AtomicAddNoConsideredEffectful) {
+ RunNoEffectTest(execution_tier, kExprI64AtomicAdd);
+}
+
+WASM_EXEC_TEST(AtomicExchangeNoConsideredEffectful) {
+ RunNoEffectTest(execution_tier, kExprI64AtomicExchange);
+}
+
+WASM_EXEC_TEST(AtomicCompareExchangeNoConsideredEffectful) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ FLAG_wasm_trap_handler = false; // To use {Load} instead of {ProtectedLoad}.
+ WasmRunner<uint32_t> r(execution_tier);
+ r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
+ r.builder().SetHasSharedMemory();
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_ZERO),
+ WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange, WASM_ZERO,
+ WASM_I64V(0), WASM_I64V(30),
+ MachineRepresentation::kWord64),
+ WASM_DROP, kExprI64Eqz);
+ CHECK_EQ(1, r.Call());
+}
+
} // namespace test_run_wasm_atomics_64
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 526c5846a2..51d97650d4 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -583,7 +583,7 @@ TEST(TestInterruptLoop) {
int32_t* memory_array = reinterpret_cast<int32_t*>(memory->backing_store());
InterruptThread thread(isolate, memory_array);
- thread.Start();
+ CHECK(thread.Start());
testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
Address address = reinterpret_cast<Address>(
&memory_array[InterruptThread::interrupt_location_]);
@@ -910,6 +910,8 @@ TEST(EmptyMemoryEmptyDataSegment) {
TEST(MemoryWithOOBEmptyDataSegment) {
{
+ FlagScope<bool> no_bulk_memory(
+ &v8::internal::FLAG_experimental_wasm_bulk_memory, false);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index b1d95a12bb..b48321df40 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -20,6 +20,7 @@ namespace test_run_wasm_simd {
namespace {
using DoubleUnOp = double (*)(double);
+using DoubleBinOp = double (*)(double, double);
using DoubleCompareOp = int64_t (*)(double, double);
using FloatUnOp = float (*)(float);
using FloatBinOp = float (*)(float, float);
@@ -85,6 +86,13 @@ T Mul(T a, T b) {
return a * b;
}
+template <typename T, typename = typename std::enable_if<
+ std::is_floating_point<T>::value>::type>
+T Div(T a, T b) {
+ // Workaround C++ undefined behavior when b is 0.
+ return base::Divide(a, b);
+}
+
template <typename T>
T Minimum(T a, T b) {
return a <= b ? a : b;
@@ -271,7 +279,7 @@ T Sqrt(T a) {
return std::sqrt(a);
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
// only used for F64x2 tests below
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
@@ -284,7 +292,7 @@ int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
int64_t Less(double a, double b) { return a < b ? -1 : 0; }
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
-#endif // V8_TARGET_ARCH_X64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
} // namespace
@@ -299,7 +307,7 @@ int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
#define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
-#define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
+#define WASM_SIMD_SHIFT_OP(op, x, y) x, y, WASM_SIMD_OP(op)
#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
@@ -652,12 +660,13 @@ WASM_SIMD_TEST(F32x4Sub) {
WASM_SIMD_TEST(F32x4Mul) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
}
-// v8:8425 tracks this test being enabled in the interpreter.
-WASM_SIMD_COMPILED_TEST(F32x4Min) {
+WASM_SIMD_TEST(F32x4Div) {
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Div, Div);
+}
+WASM_SIMD_TEST(F32x4Min) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
}
-// v8:8425 tracks this test being enabled in the interpreter.
-WASM_SIMD_COMPILED_TEST(F32x4Max) {
+WASM_SIMD_TEST(F32x4Max) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
}
@@ -715,7 +724,201 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ // Set up a global to hold output vector.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte param1 = 0;
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = x;
+ for (int i = 0; i < 2; i++) {
+ int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
+ CHECK_EQ(actual, expected);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractLane) {
+ WasmRunner<int64_t> r(execution_tier, lower_simd);
+ r.AllocateLocal(kWasmI64);
+ r.AllocateLocal(kWasmS128);
+ BUILD(
+ r,
+ WASM_SET_LOCAL(0, WASM_SIMD_I64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(0xFFFFFFFFFF)))),
+ WASM_SET_LOCAL(1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_SIMD_I64x2_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
+ CHECK_EQ(0xFFFFFFFFFF, r.Call());
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ReplaceLane) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ // Set up a global to hold input/output vector.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build function to replace each lane with its index.
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
+ WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(temp1), WASM_I64V(0))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_REPLACE_LANE(
+ 1, WASM_GET_LOCAL(temp1), WASM_I64V(1))),
+ WASM_ONE);
+
+ r.Call();
+ for (int64_t i = 0; i < 2; i++) {
+ CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+}
+
+void RunI64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64UnOp expected_op) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
+ RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
+ base::NegateWithWraparound);
+}
+
+void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64ShiftOp expected_op) {
+ for (int shift = 1; shift < 64; shift++) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte value = 0;
+ byte shift_index = r.AllocateLocal(kWasmI32);
+ byte simd1 = r.AllocateLocal(kWasmS128);
+ BUILD(r,
+ WASM_SET_LOCAL(simd1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+ WASM_GET_LOCAL(shift_index))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x, shift);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Shl) {
+ RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
+ LogicalShiftLeft);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ShrS) {
+ RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
+ ArithmeticShiftRight);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
+ RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
+ LogicalShiftRight);
+}
+
+void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64BinOp expected_op) {
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value1))),
+ WASM_SET_LOCAL(temp2, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value2))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
+ WASM_GET_LOCAL(temp2))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ FOR_INT64_INPUTS(y) {
+ r.Call(x, y);
+ int64_t expected = expected_op(x, y);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Add) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
+ base::AddWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Sub) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
+ base::SubWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtU, UnsignedLess);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeU,
+ UnsignedLessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtU, UnsignedGreater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
+ UnsignedGreaterEqual);
+}
+
WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -770,6 +973,16 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLane) {
}
}
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
+ WasmRunner<int64_t> r(execution_tier, lower_simd);
+ BUILD(r, WASM_IF_ELSE_L(
+ WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
+ WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
+ WASM_I64V(1), WASM_I64V(0)));
+ CHECK_EQ(1, r.Call());
+}
+
WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input/output vector.
@@ -789,58 +1002,12 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
}
}
-void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, DoubleCompareOp expected_op) {
- WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
- // Set up global to hold mask output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test values, perform compare op, and write the result.
- byte value1 = 0, value2 = 1;
- byte temp1 = r.AllocateLocal(kWasmS128);
- byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
- WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
- WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
- WASM_GET_LOCAL(temp2))),
- WASM_ONE);
-
- FOR_FLOAT64_INPUTS(x) {
- if (!PlatformCanRepresent(x)) continue;
- FOR_FLOAT64_INPUTS(y) {
- if (!PlatformCanRepresent(y)) continue;
- double diff = x - y; // Model comparison as subtraction.
- if (!PlatformCanRepresent(diff)) continue;
- r.Call(x, y);
- int64_t expected = expected_op(x, y);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
- }
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) {
- RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) {
- RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) {
- RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) {
- RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
- RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
- RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
+bool IsExtreme(double x) {
+ double abs_x = std::fabs(x);
+ const double kSmallFloatThreshold = 1.0e-298;
+ const double kLargeFloatThreshold = 1.0e298;
+ return abs_x != 0.0f && // 0 or -0 are fine.
+ (abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
bool IsSameNan(double expected, double actual) {
@@ -855,7 +1022,7 @@ bool IsSameNan(double expected, double actual) {
bool IsCanonical(double actual) {
uint64_t actual_bits = bit_cast<uint64_t>(actual);
// Canonical NaN has quiet bit and no payload.
- return (actual_bits & 0xFF80000000000000) == actual_bits;
+ return (actual_bits & 0xFFF8000000000000) == actual_bits;
}
void CheckDoubleResult(double x, double y, double expected, double actual,
@@ -948,7 +1115,6 @@ void RunF64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
}
-#undef FOR_FLOAT64_NAN_INPUTS
WASM_SIMD_TEST_NO_LOWERING(F64x2Abs) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
@@ -958,96 +1124,90 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Neg) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- // Set up a global to hold output vector.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- byte param1 = 0;
- BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+void RunF64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleBinOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Global to hold output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test value, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
+ WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
+ WASM_GET_LOCAL(temp2))),
WASM_ONE);
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = x;
- for (int i = 0; i < 2; i++) {
- int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
- CHECK_EQ(actual, expected);
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(x)) continue;
+ double expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, y, expected, actual, true /* exact */);
+ }
}
}
-}
-WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
- WasmRunner<int64_t> r(execution_tier, lower_simd);
- BUILD(r, WASM_IF_ELSE_L(
- WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
- 0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
- WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
- WASM_I64V(1), WASM_I64V(0)));
- CHECK_EQ(1, r.Call());
+ FOR_FLOAT64_NAN_INPUTS(i) {
+ double x = bit_cast<double>(double_nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_NAN_INPUTS(j) {
+ double y = bit_cast<double>(double_nan_test_array[j]);
+ double expected = expected_op(x, y);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x, y);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, y, expected, actual, true /* exact */);
+ }
+ }
+ }
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2ReplaceLane) {
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- // Set up a global to hold input/output vector.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build function to replace each lane with its index.
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
- WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
- 0, WASM_GET_LOCAL(temp1), WASM_I64V(0))),
- WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_REPLACE_LANE(
- 1, WASM_GET_LOCAL(temp1), WASM_I64V(1))),
- WASM_ONE);
+#undef FOR_FLOAT64_NAN_INPUTS
- r.Call();
- for (int64_t i = 0; i < 2; i++) {
- CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
- }
+WASM_SIMD_TEST_NO_LOWERING(F64x2Add) {
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Add, Add);
}
-void RunI64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64UnOp expected_op) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- // Global to hold output.
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test value, perform unop, and write the result.
- byte value = 0;
- byte temp1 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
- WASM_ONE);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Sub) {
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Sub, Sub);
+}
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = expected_op(x);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
+WASM_SIMD_TEST_NO_LOWERING(F64x2Mul) {
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Mul, Mul);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
- RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
- base::NegateWithWraparound);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Div) {
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, Div);
}
-void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64BinOp expected_op) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
- // Global to hold output.
+void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleCompareOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- // Build fn to splat test values, perform binop, and write the result.
+ // Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
- BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value1))),
- WASM_SET_LOCAL(temp2, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value2))),
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
+ WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
- FOR_INT64_INPUTS(x) {
- FOR_INT64_INPUTS(y) {
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ double diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
r.Call(x, y);
int64_t expected = expected_op(x, y);
for (int i = 0; i < 2; i++) {
@@ -1057,101 +1217,63 @@ void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Add) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
- base::AddWithWraparound);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Sub) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
- base::SubWithWraparound);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
- base::MulWithWraparound);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtU, UnsignedLess);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeU,
- UnsignedLessEqual);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Min) {
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Min, JSMin);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtU, UnsignedGreater);
+WASM_SIMD_TEST_NO_LOWERING(F64x2Max) {
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Max, JSMax);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
- UnsignedGreaterEqual);
+#if V8_TARGET_ARCH_X64
+WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
+ base::MulWithWraparound);
}
-void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
- WasmOpcode opcode, Int64ShiftOp expected_op) {
- for (int shift = 1; shift < 64; shift++) {
- WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
- int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
- byte value = 0;
- byte simd1 = r.AllocateLocal(kWasmS128);
- BUILD(r,
- WASM_SET_LOCAL(simd1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_GLOBAL(
- 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
- WASM_ONE);
-
- FOR_INT64_INPUTS(x) {
- r.Call(x);
- int64_t expected = expected_op(x, shift);
- for (int i = 0; i < 2; i++) {
- CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
- }
- }
- }
+WASM_SIMD_TEST_NO_LOWERING(I64x2MinS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinS, Minimum);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2Shl) {
- RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
- LogicalShiftLeft);
+WASM_SIMD_TEST_NO_LOWERING(I64x2MaxS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxS, Maximum);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2ShrS) {
- RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
- ArithmeticShiftRight);
+WASM_SIMD_TEST_NO_LOWERING(I64x2MinU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinU,
+ UnsignedMinimum);
}
-WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
- RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
- LogicalShiftRight);
+WASM_SIMD_TEST_NO_LOWERING(I64x2MaxU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxU,
+ UnsignedMaximum);
}
#endif // V8_TARGET_ARCH_X64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -1534,16 +1656,17 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
byte value = 0;
+ byte shift_index = r.AllocateLocal(kWasmI32);
byte simd1 = r.AllocateLocal(kWasmS128);
- BUILD(r,
+ BUILD(r, WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
WASM_SET_LOCAL(simd1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_GLOBAL(
- 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+ WASM_GET_LOCAL(shift_index))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
r.Call(x);
- float expected = expected_op(x, shift);
+ int32_t expected = expected_op(x, shift);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
}
@@ -1551,17 +1674,17 @@ void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST(I32x4Shl) {
+WASM_SIMD_TEST_NO_LOWERING(I32x4Shl) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
LogicalShiftLeft);
}
-WASM_SIMD_TEST(I32x4ShrS) {
+WASM_SIMD_TEST_NO_LOWERING(I32x4ShrS) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
ArithmeticShiftRight);
}
-WASM_SIMD_TEST(I32x4ShrU) {
+WASM_SIMD_TEST_NO_LOWERING(I32x4ShrU) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
LogicalShiftRight);
}
@@ -1784,10 +1907,12 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
byte value = 0;
byte simd1 = r.AllocateLocal(kWasmS128);
+ byte shift_index = r.AllocateLocal(kWasmI32);
BUILD(r,
WASM_SET_LOCAL(simd1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_GLOBAL(
- 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+ WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+ WASM_GET_LOCAL(shift_index))),
WASM_ONE);
FOR_INT16_INPUTS(x) {
@@ -1800,17 +1925,17 @@ void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST(I16x8Shl) {
+WASM_SIMD_TEST_NO_LOWERING(I16x8Shl) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
LogicalShiftLeft);
}
-WASM_SIMD_TEST(I16x8ShrS) {
+WASM_SIMD_TEST_NO_LOWERING(I16x8ShrS) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
ArithmeticShiftRight);
}
-WASM_SIMD_TEST(I16x8ShrU) {
+WASM_SIMD_TEST_NO_LOWERING(I16x8ShrU) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
LogicalShiftRight);
}
@@ -1998,15 +2123,17 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
byte value = 0;
byte simd1 = r.AllocateLocal(kWasmS128);
+ byte shift_index = r.AllocateLocal(kWasmI32);
BUILD(r,
WASM_SET_LOCAL(simd1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
- WASM_SET_GLOBAL(
- 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+ WASM_SET_LOCAL(shift_index, WASM_I32V(shift)),
+ WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd1),
+ WASM_GET_LOCAL(shift_index))),
WASM_ONE);
FOR_INT8_INPUTS(x) {
r.Call(x);
- float expected = expected_op(x, shift);
+ int8_t expected = expected_op(x, shift);
for (int i = 0; i < 16; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
}
@@ -2014,17 +2141,17 @@ void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST(I8x16Shl) {
+WASM_SIMD_TEST_NO_LOWERING(I8x16Shl) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
LogicalShiftLeft);
}
-WASM_SIMD_TEST(I8x16ShrS) {
+WASM_SIMD_TEST_NO_LOWERING(I8x16ShrS) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
ArithmeticShiftRight);
}
-WASM_SIMD_TEST(I8x16ShrU) {
+WASM_SIMD_TEST_NO_LOWERING(I8x16ShrU) {
RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
LogicalShiftRight);
}
@@ -2432,13 +2559,14 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
// result. Use relational ops on numeric vectors to create the boolean vector
// test inputs. Test inputs with all true, all false, one true, and one false.
-#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
+#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes, int_type) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
WasmRunner<int32_t> r(execution_tier, lower_simd); \
+ if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte zero = r.AllocateLocal(kWasmS128); \
byte one_one = r.AllocateLocal(kWasmS128); \
byte reduced = r.AllocateLocal(kWasmI32); \
- BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(WASM_ZERO)), \
+ BUILD(r, WASM_SET_LOCAL(zero, WASM_SIMD_I##format##_SPLAT(int_type(0))), \
WASM_SET_LOCAL( \
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
@@ -2469,7 +2597,7 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
WASM_RETURN1(WASM_ZERO)), \
WASM_SET_LOCAL(one_one, \
WASM_SIMD_I##format##_REPLACE_LANE( \
- lanes - 1, WASM_GET_LOCAL(zero), WASM_ONE)), \
+ lanes - 1, WASM_GET_LOCAL(zero), int_type(1))), \
WASM_SET_LOCAL( \
reduced, WASM_SIMD_UNOP(kExprS1x##lanes##AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
@@ -2502,9 +2630,12 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
CHECK_EQ(1, r.Call()); \
}
-WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4)
-WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
-WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_BOOL_REDUCTION_TEST(64x2, 2, WASM_I64V)
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4, WASM_I32V)
+WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8, WASM_I32V)
+WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16, WASM_I32V)
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
@@ -2758,7 +2889,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
+WASM_SIMD_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
@@ -2776,11 +2907,10 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
-// V8:8665 - Tracking bug to enable reduction tests in the interpreter,
-// and for SIMD lowering.
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
- WASM_SIMD_TEST_NO_LOWERING(S##format##AnyTrue) { \
+ WASM_SIMD_TEST(S##format##AnyTrue) { \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
+ if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
BUILD( \
r, \
@@ -2790,16 +2920,17 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
DCHECK_EQ(1, r.Call(5)); \
DCHECK_EQ(0, r.Call(0)); \
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_ANYTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
-#endif // V8_TARGET_ARCH_X64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
- WASM_SIMD_TEST_NO_LOWERING(S##format##AllTrue) { \
+ WASM_SIMD_TEST(S##format##AllTrue) { \
WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
+ if (lanes == 2 && lower_simd == kLowerSimd) return; \
byte simd = r.AllocateLocal(kWasmS128); \
BUILD( \
r, \
@@ -2809,9 +2940,9 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
DCHECK_EQ(1, r.Call(0x1)); \
DCHECK_EQ(0, r.Call(0)); \
}
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
-#endif // V8_TARGET_ARCH_X64
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 93ae92d697..795fa30e72 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -267,7 +267,7 @@ size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
kAllWasmFeatures, buffer, buffer + size, false, ModuleOrigin::kWasmOrigin,
isolate->counters(), isolate->wasm_engine()->allocator());
CHECK(result.ok());
- const WasmFunction* func = &result.value()->functions[1];
+ const WasmFunction* func = &result.value()->functions[index];
return func->code.offset();
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 855e44aba2..b5bacf57d4 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -273,8 +273,8 @@ TEST(SharedEngineRunThreadedBuildingSync) {
Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
CHECK_EQ(42, isolate.Run(instance));
});
- thread1.Start();
- thread2.Start();
+ CHECK(thread1.Start());
+ CHECK(thread2.Start());
thread1.Join();
thread2.Join();
}
@@ -295,8 +295,8 @@ TEST(SharedEngineRunThreadedBuildingAsync) {
CompileAndInstantiateAsync(isolate, buffer);
CHECK_EQ(42, isolate.Run(instance));
});
- thread1.Start();
- thread2.Start();
+ CHECK(thread1.Start());
+ CHECK(thread2.Start());
thread1.Join();
thread2.Join();
}
@@ -321,8 +321,8 @@ TEST(SharedEngineRunThreadedExecution) {
Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
CHECK_EQ(23, isolate.Run(instance));
});
- thread1.Start();
- thread2.Start();
+ CHECK(thread1.Start());
+ CHECK(thread2.Start());
thread1.Join();
thread2.Join();
}
@@ -358,7 +358,7 @@ TEST(SharedEngineRunThreadedTierUp) {
&module->module()->functions[0], ExecutionTier::kTurbofan);
CHECK_EQ(23, isolate.Run(instance));
});
- for (auto& thread : threads) thread.Start();
+ for (auto& thread : threads) CHECK(thread.Start());
for (auto& thread : threads) thread.Join();
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 6a17b81c56..528d71f53c 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -47,8 +47,8 @@ TestingModuleBuilder::TestingModuleBuilder(
if (maybe_import) {
// Manually compile an import wrapper and insert it into the instance.
CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
- auto resolved = compiler::ResolveWasmImportCall(maybe_import->js_function,
- maybe_import->sig, false);
+ auto resolved = compiler::ResolveWasmImportCall(
+ maybe_import->js_function, maybe_import->sig, enabled_features_);
compiler::WasmImportCallKind kind = resolved.first;
Handle<JSReceiver> callable = resolved.second;
WasmImportWrapperCache::ModificationScope cache_scope(
@@ -159,7 +159,7 @@ void TestingModuleBuilder::FreezeSignatureMapAndInitializeWrapperCache() {
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
FreezeSignatureMapAndInitializeWrapperCache();
SetExecutable();
- return WasmInstanceObject::GetOrCreateWasmExportedFunction(
+ return WasmInstanceObject::GetOrCreateWasmExternalFunction(
isolate_, instance_object(), index);
}
@@ -324,9 +324,14 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
Handle<Script> script =
isolate_->factory()->NewScript(isolate_->factory()->empty_string());
script->set_type(Script::TYPE_WASM);
+
+ auto native_module = isolate_->wasm_engine()->NewNativeModule(
+ isolate_, enabled_features_, test_module_);
+ native_module->SetWireBytes(OwnedVector<const uint8_t>());
+ native_module->SetRuntimeStubs(isolate_);
+
Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate_, enabled_features_, test_module_, {},
- script, Handle<ByteArray>::null());
+ WasmModuleObject::New(isolate_, std::move(native_module), script);
// This method is called when we initialize TestEnvironment. We don't
// have a memory yet, so we won't create it here. We'll update the
// interpreter when we get a memory. We do have globals, though.
@@ -360,7 +365,7 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
FATAL("Verification failed; pc = +%x, msg = %s", result.error().offset(),
result.error().message().c_str());
}
- builder->LowerInt64();
+ builder->LowerInt64(compiler::WasmGraphBuilder::kCalledFromWasm);
if (!CpuFeatures::SupportsWasmSimd128()) {
builder->SimdScalarLoweringForTesting();
}
@@ -453,8 +458,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
if (!code_.ToHandle(&code)) {
Isolate* isolate = CcTest::InitIsolateOnce();
- auto call_descriptor =
- compiler::Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
+ auto call_descriptor = compiler::Linkage::GetSimplifiedCDescriptor(
+ zone(), signature_, CallDescriptor::kInitializeRootRegister);
if (kSystemPointerSize == 4) {
size_t num_params = signature_->parameter_count();