diff options
author | Michaël Zasso <targos@protonmail.com> | 2019-03-12 09:01:49 +0100 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2019-03-14 18:49:21 +0100 |
commit | 7b48713334469818661fe276cf571de9c7899f2d (patch) | |
tree | 4dbda49ac88db76ce09dc330a0cb587e68e139ba /deps/v8/test/cctest/compiler | |
parent | 8549ac09b256666cf5275224ec58fab9939ff32e (diff) | |
download | node-new-7b48713334469818661fe276cf571de9c7899f2d.tar.gz |
deps: update V8 to 7.3.492.25
PR-URL: https://github.com/nodejs/node/pull/25852
Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/test/cctest/compiler')
29 files changed, 495 insertions, 604 deletions
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h index 0aea6e938b..f7d08ec899 100644 --- a/deps/v8/test/cctest/compiler/c-signature.h +++ b/deps/v8/test/cctest/compiler/c-signature.h @@ -29,8 +29,8 @@ namespace compiler { template <typename T> inline constexpr MachineType MachineTypeForC() { - static_assert(std::is_convertible<T, Object*>::value, - "all non-specialized types must be convertible to Object*"); + static_assert(std::is_convertible<T, Object>::value, + "all non-specialized types must be convertible to Object"); return MachineType::AnyTagged(); } @@ -114,7 +114,7 @@ typedef CSignatureOf<int32_t, int32_t, int32_t> CSignature_i_ii; typedef CSignatureOf<uint32_t, uint32_t, uint32_t> CSignature_u_uu; typedef CSignatureOf<float, float, float> CSignature_f_ff; typedef CSignatureOf<double, double, double> CSignature_d_dd; -typedef CSignatureOf<Object*, Object*, Object*> CSignature_o_oo; +typedef CSignatureOf<Object, Object, Object> CSignature_o_oo; } // namespace compiler } // namespace internal diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h index 4bf06a9ba3..4bca79625c 100644 --- a/deps/v8/test/cctest/compiler/call-tester.h +++ b/deps/v8/test/cctest/compiler/call-tester.h @@ -40,6 +40,15 @@ class CallHelper { Isolate* isolate_; }; +template <> +template <typename... Params> +Object CallHelper<Object>::Call(Params... args) { + CSignature::VerifyParams<Params...>(csig_); + Address entry = Generate(); + auto fn = GeneratedCode<Address, Params...>::FromAddress(isolate_, entry); + return Object(fn.Call(args...)); +} + // A call helper that calls the given code object assuming C calling convention. template <typename T> class CodeRunner : public CallHelper<T> { diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h index 7c88998f8a..6707e2ba13 100644 --- a/deps/v8/test/cctest/compiler/code-assembler-tester.h +++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h @@ -46,7 +46,7 @@ class CodeAssemblerTester { : zone_(isolate->allocator(), ZONE_NAME), scope_(isolate), state_(isolate, &zone_, call_descriptor, Code::STUB, name, - PoisoningMitigationLevel::kDontPoison, 0, -1) {} + PoisoningMitigationLevel::kDontPoison, Builtins::kNoBuiltinId) {} CodeAssemblerState* state() { return &state_; } @@ -56,11 +56,13 @@ class CodeAssemblerTester { } Handle<Code> GenerateCode() { - return CodeAssembler::GenerateCode( - &state_, AssemblerOptions::Default(scope_.isolate())); + return GenerateCode(AssemblerOptions::Default(scope_.isolate())); } Handle<Code> GenerateCode(const AssemblerOptions& options) { + if (state_.InsideBlock()) { + CodeAssembler(&state_).Unreachable(); + } return CodeAssembler::GenerateCode(&state_, options); } diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc index f66385a92e..0aff318211 100644 --- a/deps/v8/test/cctest/compiler/codegen-tester.cc +++ b/deps/v8/test/cctest/compiler/codegen-tester.cc @@ -3,6 +3,8 @@ // found in the LICENSE file. #include "test/cctest/compiler/codegen-tester.h" + +#include "src/base/overflowing-math.h" #include "src/objects-inl.h" #include "test/cctest/cctest.h" #include "test/cctest/compiler/value-helper.h" @@ -381,7 +383,7 @@ void RunSmiConstant(int32_t v) { // TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister #if !V8_TARGET_ARCH_X64 if (Smi::IsValid(v)) { - RawMachineAssemblerTester<Object*> m; + RawMachineAssemblerTester<Object> m; m.Return(m.NumberConstant(v)); CHECK_EQ(Smi::FromInt(v), m.Call()); } @@ -390,14 +392,14 @@ void RunSmiConstant(int32_t v) { void RunNumberConstant(double v) { - RawMachineAssemblerTester<Object*> m; + RawMachineAssemblerTester<Object> m; #if V8_TARGET_ARCH_X64 // TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister Handle<Object> number = m.isolate()->factory()->NewNumber(v); if (number->IsSmi()) return; #endif m.Return(m.NumberConstant(v)); - Object* result = m.Call(); + Object result = m.Call(); m.CheckNumber(v, result); } @@ -419,11 +421,12 @@ TEST(RunInt32Constants) { TEST(RunSmiConstants) { - for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) { + for (int32_t i = 1; i < Smi::kMaxValue && i != 0; + i = base::ShlWithWraparound(i, 1)) { RunSmiConstant(i); - RunSmiConstant(3 * i); - RunSmiConstant(5 * i); - RunSmiConstant(-i); + RunSmiConstant(base::MulWithWraparound(3, i)); + RunSmiConstant(base::MulWithWraparound(5, i)); + RunSmiConstant(base::NegateWithWraparound(i)); RunSmiConstant(i | 1); RunSmiConstant(i | 3); } @@ -444,9 +447,10 @@ TEST(RunNumberConstants) { FOR_INT32_INPUTS(i) { RunNumberConstant(*i); } } - for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) { + for (int32_t i = 1; i < Smi::kMaxValue && i != 0; + i = base::ShlWithWraparound(i, 1)) { RunNumberConstant(i); - RunNumberConstant(-i); + RunNumberConstant(base::NegateWithWraparound(i)); RunNumberConstant(i | 1); RunNumberConstant(i | 3); } @@ -458,24 +462,25 @@ TEST(RunNumberConstants) { TEST(RunEmptyString) { - RawMachineAssemblerTester<Object*> m; + RawMachineAssemblerTester<Object> m; m.Return(m.StringConstant("empty")); m.CheckString("empty", m.Call()); } TEST(RunHeapConstant) { - RawMachineAssemblerTester<Object*> m; + RawMachineAssemblerTester<Object> m; m.Return(m.StringConstant("empty")); m.CheckString("empty", m.Call()); } TEST(RunHeapNumberConstant) { - RawMachineAssemblerTester<HeapObject*> m; + RawMachineAssemblerTester<void*> m; Handle<HeapObject> number = m.isolate()->factory()->NewHeapNumber(100.5); m.Return(m.HeapConstant(number)); - HeapObject* result = m.Call(); + HeapObject result = + HeapObject::cast(Object(reinterpret_cast<Address>(m.Call()))); CHECK_EQ(result, *number); } @@ -575,6 +580,20 @@ TEST(RunBinopTester) { #if V8_TARGET_ARCH_64_BIT // TODO(ahaas): run int64 tests on all platforms when supported. + +namespace { + +int64_t Add4(int64_t a, int64_t b, int64_t c, int64_t d) { + // Operate on uint64_t values to avoid undefined behavior. + return static_cast<int64_t>( + static_cast<uint64_t>(a) + static_cast<uint64_t>(b) + + static_cast<uint64_t>(c) + static_cast<uint64_t>(d)); +} + +int64_t Add3(int64_t a, int64_t b, int64_t c) { return Add4(a, b, c, 0); } + +} // namespace + TEST(RunBufferedRawMachineAssemblerTesterTester) { { BufferedRawMachineAssemblerTester<int64_t> m; @@ -592,8 +611,8 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) { m.Return(m.Int64Add(m.Parameter(0), m.Parameter(1))); FOR_INT64_INPUTS(i) { FOR_INT64_INPUTS(j) { - CHECK_EQ(*i + *j, m.Call(*i, *j)); - CHECK_EQ(*j + *i, m.Call(*j, *i)); + CHECK_EQ(base::AddWithWraparound(*i, *j), m.Call(*i, *j)); + CHECK_EQ(base::AddWithWraparound(*j, *i), m.Call(*j, *i)); } } } @@ -604,9 +623,9 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) { m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2))); FOR_INT64_INPUTS(i) { FOR_INT64_INPUTS(j) { - CHECK_EQ(*i + *i + *j, m.Call(*i, *i, *j)); - CHECK_EQ(*i + *j + *i, m.Call(*i, *j, *i)); - CHECK_EQ(*j + *i + *i, m.Call(*j, *i, *i)); + CHECK_EQ(Add3(*i, *i, *j), m.Call(*i, *i, *j)); + CHECK_EQ(Add3(*i, *j, *i), m.Call(*i, *j, *i)); + CHECK_EQ(Add3(*j, *i, *i), m.Call(*j, *i, *i)); } } } @@ -619,10 +638,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) { m.Parameter(3))); FOR_INT64_INPUTS(i) { FOR_INT64_INPUTS(j) { - CHECK_EQ(*i + *i + *i + *j, m.Call(*i, *i, *i, *j)); - CHECK_EQ(*i + *i + *j + *i, m.Call(*i, *i, *j, *i)); - CHECK_EQ(*i + *j + *i + *i, m.Call(*i, *j, *i, *i)); - CHECK_EQ(*j + *i + *i + *i, m.Call(*j, *i, *i, *i)); + CHECK_EQ(Add4(*i, *i, *i, *j), m.Call(*i, *i, *i, *j)); + CHECK_EQ(Add4(*i, *i, *j, *i), m.Call(*i, *i, *j, *i)); + CHECK_EQ(Add4(*i, *j, *i, *i), m.Call(*i, *j, *i, *i)); + CHECK_EQ(Add4(*j, *i, *i, *i), m.Call(*j, *i, *i, *i)); } } } @@ -658,10 +677,10 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) { FOR_INT64_INPUTS(i) { FOR_INT64_INPUTS(j) { m.Call(*i, *j); - CHECK_EQ(*i + *j, result); + CHECK_EQ(base::AddWithWraparound(*i, *j), result); m.Call(*j, *i); - CHECK_EQ(*j + *i, result); + CHECK_EQ(base::AddWithWraparound(*j, *i), result); } } } @@ -677,13 +696,13 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) { FOR_INT64_INPUTS(i) { FOR_INT64_INPUTS(j) { m.Call(*i, *i, *j); - CHECK_EQ(*i + *i + *j, result); + CHECK_EQ(Add3(*i, *i, *j), result); m.Call(*i, *j, *i); - CHECK_EQ(*i + *j + *i, result); + CHECK_EQ(Add3(*i, *j, *i), result); m.Call(*j, *i, *i); - CHECK_EQ(*j + *i + *i, result); + CHECK_EQ(Add3(*j, *i, *i), result); } } } @@ -702,16 +721,16 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) { FOR_INT64_INPUTS(i) { FOR_INT64_INPUTS(j) { m.Call(*i, *i, *i, *j); - CHECK_EQ(*i + *i + *i + *j, result); + CHECK_EQ(Add4(*i, *i, *i, *j), result); m.Call(*i, *i, *j, *i); - CHECK_EQ(*i + *i + *j + *i, result); + CHECK_EQ(Add4(*i, *i, *j, *i), result); m.Call(*i, *j, *i, *i); - CHECK_EQ(*i + *j + *i + *i, result); + CHECK_EQ(Add4(*i, *j, *i, *i), result); m.Call(*j, *i, *i, *i); - CHECK_EQ(*j + *i + *i + *i, result); + CHECK_EQ(Add4(*j, *i, *i, *i), result); } } } diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h index f9fbd4af3a..dc35a6b928 100644 --- a/deps/v8/test/cctest/compiler/codegen-tester.h +++ b/deps/v8/test/cctest/compiler/codegen-tester.h @@ -5,7 +5,7 @@ #ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_ #define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_ -#include "src/compiler/instruction-selector.h" +#include "src/compiler/backend/instruction-selector.h" #include "src/compiler/pipeline.h" #include "src/compiler/raw-machine-assembler.h" #include "src/optimized-compilation-info.h" @@ -59,11 +59,11 @@ class RawMachineAssemblerTester : public HandleAndZoneScope, ~RawMachineAssemblerTester() override = default; - void CheckNumber(double expected, Object* number) { + void CheckNumber(double expected, Object number) { CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number)); } - void CheckString(const char* expected, Object* string) { + void CheckString(const char* expected, Object string) { CHECK( this->isolate()->factory()->InternalizeUtf8String(expected)->SameValue( string)); diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc index 86678606d4..bb23d0644a 100644 --- a/deps/v8/test/cctest/compiler/function-tester.cc +++ b/deps/v8/test/cctest/compiler/function-tester.cc @@ -5,6 +5,7 @@ #include "test/cctest/compiler/function-tester.h" #include "src/api-inl.h" +#include "src/assembler.h" #include "src/compiler.h" #include "src/compiler/linkage.h" #include "src/compiler/pipeline.h" @@ -131,7 +132,7 @@ Handle<Object> FunctionTester::false_value() { Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph, int param_count) { - JSFunction* p = nullptr; + JSFunction p; { // because of the implicit handle scope of FunctionTester. FunctionTester f(graph, param_count); p = *f.function; @@ -142,6 +143,11 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph, Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) { Handle<SharedFunctionInfo> shared(function->shared(), isolate); + IsCompiledScope is_compiled_scope(shared->is_compiled_scope()); + CHECK(is_compiled_scope.is_compiled() || + Compiler::Compile(function, Compiler::CLEAR_EXCEPTION, + &is_compiled_scope)); + Zone zone(isolate->allocator(), ZONE_NAME); OptimizedCompilationInfo info(&zone, isolate, shared, function); @@ -149,14 +155,12 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) { info.MarkAsInliningEnabled(); } - CHECK(function->is_compiled() || - Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)); CHECK(info.shared_info()->HasBytecodeArray()); JSFunction::EnsureFeedbackVector(function); Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, isolate).ToHandleChecked(); - info.context()->native_context()->AddOptimizedCode(*code); + info.native_context()->AddOptimizedCode(*code); function->set_code(*code); return function; } diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h index e0045979d4..4fe0fc9292 100644 --- a/deps/v8/test/cctest/compiler/graph-builder-tester.h +++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h @@ -5,8 +5,9 @@ #ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_ #define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_ +#include "src/assembler.h" +#include "src/compiler/backend/instruction-selector.h" #include "src/compiler/common-operator.h" -#include "src/compiler/instruction-selector.h" #include "src/compiler/linkage.h" #include "src/compiler/machine-operator.h" #include "src/compiler/operator-properties.h" @@ -96,8 +97,9 @@ class GraphBuilderTester : public HandleAndZoneScope, Node* PointerConstant(void* value) { intptr_t intptr_value = reinterpret_cast<intptr_t>(value); - return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value)) - : Int32Constant(static_cast<int>(intptr_value)); + return kSystemPointerSize == 8 + ? NewNode(common()->Int64Constant(intptr_value)) + : Int32Constant(static_cast<int>(intptr_value)); } Node* Int32Constant(int32_t value) { return NewNode(common()->Int32Constant(value)); @@ -222,7 +224,7 @@ class GraphBuilderTester : public HandleAndZoneScope, if (has_control) ++input_count_with_deps; if (has_effect) ++input_count_with_deps; Node** buffer = zone()->template NewArray<Node*>(input_count_with_deps); - memcpy(buffer, value_inputs, kPointerSize * value_input_count); + memcpy(buffer, value_inputs, kSystemPointerSize * value_input_count); Node** current_input = buffer + value_input_count; if (has_effect) { *current_input++ = effect_; diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc index f961021913..0414532002 100644 --- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc +++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc @@ -48,13 +48,13 @@ TEST(ProfileDiamond) { m.GenerateCode(); { - uint32_t expected[] = {0, 0, 0, 0}; + uint32_t expected[] = {0, 0, 0, 0, 0, 0}; m.Expect(arraysize(expected), expected); } m.Call(0); { - uint32_t expected[] = {1, 1, 0, 1}; + uint32_t expected[] = {1, 1, 1, 0, 0, 1}; m.Expect(arraysize(expected), expected); } @@ -62,13 +62,13 @@ TEST(ProfileDiamond) { m.Call(1); { - uint32_t expected[] = {1, 0, 1, 1}; + uint32_t expected[] = {1, 0, 0, 1, 1, 1}; m.Expect(arraysize(expected), expected); } m.Call(0); { - uint32_t expected[] = {2, 1, 1, 2}; + uint32_t expected[] = {2, 1, 1, 1, 1, 2}; m.Expect(arraysize(expected), expected); } } @@ -94,7 +94,7 @@ TEST(ProfileLoop) { m.GenerateCode(); { - uint32_t expected[] = {0, 0, 0, 0}; + uint32_t expected[] = {0, 0, 0, 0, 0, 0}; m.Expect(arraysize(expected), expected); } @@ -102,7 +102,7 @@ TEST(ProfileLoop) { for (size_t i = 0; i < arraysize(runs); i++) { m.ResetCounts(); CHECK_EQ(1, m.Call(static_cast<int>(runs[i]))); - uint32_t expected[] = {1, runs[i] + 1, runs[i], 1}; + uint32_t expected[] = {1, runs[i] + 1, runs[i], runs[i], 1, 1}; m.Expect(arraysize(expected), expected); } } diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc index 090a0f23cd..46240aa9b1 100644 --- a/deps/v8/test/cctest/compiler/test-branch-combine.cc +++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/base/overflowing-math.h" #include "src/objects-inl.h" #include "test/cctest/cctest.h" #include "test/cctest/compiler/codegen-tester.h" @@ -501,7 +502,8 @@ TEST(BranchCombineInt32AddLessThanZero) { FOR_INT32_INPUTS(j) { int32_t a = *i; int32_t b = *j; - int32_t expect = (a + b < 0) ? t_constant : f_constant; + int32_t expect = + (base::AddWithWraparound(a, b) < 0) ? t_constant : f_constant; CHECK_EQ(expect, m.Call(a, b)); } } @@ -529,7 +531,8 @@ TEST(BranchCombineInt32AddGreaterThanOrEqualZero) { FOR_INT32_INPUTS(j) { int32_t a = *i; int32_t b = *j; - int32_t expect = (a + b >= 0) ? t_constant : f_constant; + int32_t expect = + (base::AddWithWraparound(a, b) >= 0) ? t_constant : f_constant; CHECK_EQ(expect, m.Call(a, b)); } } @@ -557,7 +560,8 @@ TEST(BranchCombineInt32ZeroGreaterThanAdd) { FOR_INT32_INPUTS(j) { int32_t a = *i; int32_t b = *j; - int32_t expect = (0 > a + b) ? t_constant : f_constant; + int32_t expect = + (0 > base::AddWithWraparound(a, b)) ? t_constant : f_constant; CHECK_EQ(expect, m.Call(a, b)); } } @@ -585,7 +589,8 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAdd) { FOR_INT32_INPUTS(j) { int32_t a = *i; int32_t b = *j; - int32_t expect = (0 <= a + b) ? t_constant : f_constant; + int32_t expect = + (0 <= base::AddWithWraparound(a, b)) ? t_constant : f_constant; CHECK_EQ(expect, m.Call(a, b)); } } @@ -609,8 +614,8 @@ TEST(BranchCombineUint32AddLessThanOrEqualZero) { m.Bind(&blockb); m.Return(m.Int32Constant(f_constant)); - FOR_INT32_INPUTS(i) { - FOR_INT32_INPUTS(j) { + FOR_UINT32_INPUTS(i) { + FOR_UINT32_INPUTS(j) { uint32_t a = *i; uint32_t b = *j; int32_t expect = (a + b <= 0) ? t_constant : f_constant; @@ -637,8 +642,8 @@ TEST(BranchCombineUint32AddGreaterThanZero) { m.Bind(&blockb); m.Return(m.Int32Constant(f_constant)); - FOR_INT32_INPUTS(i) { - FOR_INT32_INPUTS(j) { + FOR_UINT32_INPUTS(i) { + FOR_UINT32_INPUTS(j) { uint32_t a = *i; uint32_t b = *j; int32_t expect = (a + b > 0) ? t_constant : f_constant; @@ -665,8 +670,8 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAdd) { m.Bind(&blockb); m.Return(m.Int32Constant(f_constant)); - FOR_INT32_INPUTS(i) { - FOR_INT32_INPUTS(j) { + FOR_UINT32_INPUTS(i) { + FOR_UINT32_INPUTS(j) { uint32_t a = *i; uint32_t b = *j; int32_t expect = (0 >= a + b) ? t_constant : f_constant; @@ -693,8 +698,8 @@ TEST(BranchCombineUint32ZeroLessThanAdd) { m.Bind(&blockb); m.Return(m.Int32Constant(f_constant)); - FOR_INT32_INPUTS(i) { - FOR_INT32_INPUTS(j) { + FOR_UINT32_INPUTS(i) { + FOR_UINT32_INPUTS(j) { uint32_t a = *i; uint32_t b = *j; int32_t expect = (0 < a + b) ? t_constant : f_constant; diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc index a2243e6edd..3b83d422d2 100644 --- a/deps/v8/test/cctest/compiler/test-code-assembler.cc +++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc @@ -8,6 +8,7 @@ #include "src/compiler/opcodes.h" #include "src/isolate.h" #include "src/objects-inl.h" +#include "src/objects/heap-number-inl.h" #include "test/cctest/compiler/code-assembler-tester.h" #include "test/cctest/compiler/function-tester.h" @@ -68,8 +69,7 @@ TEST(SimpleIntPtrReturn) { m.IntPtrConstant(reinterpret_cast<intptr_t>(&test)))); FunctionTester ft(asm_tester.GenerateCode()); MaybeHandle<Object> result = ft.Call(); - CHECK_EQ(reinterpret_cast<intptr_t>(&test), - reinterpret_cast<intptr_t>(*result.ToHandleChecked())); + CHECK_EQ(reinterpret_cast<Address>(&test), result.ToHandleChecked()->ptr()); } TEST(SimpleDoubleReturn) { @@ -560,6 +560,50 @@ TEST(GotoIfExceptionMultiple) { CHECK(constructor->SameValue(*isolate->type_error_function())); } +TEST(ExceptionHandler) { + Isolate* isolate(CcTest::InitIsolateOnce()); + const int kNumParams = 0; + CodeAssemblerTester asm_tester(isolate, kNumParams); + CodeAssembler m(asm_tester.state()); + + CodeAssembler::TVariable<Object> var(m.SmiConstant(0), &m); + Label exception(&m, {&var}, Label::kDeferred); + { + CodeAssemblerScopedExceptionHandler handler(&m, &exception, &var); + Node* context = m.HeapConstant(Handle<Context>(isolate->native_context())); + m.CallRuntime(Runtime::kThrow, context, m.SmiConstant(2)); + } + m.Return(m.SmiConstant(1)); + + m.Bind(&exception); + m.Return(var.value()); + + FunctionTester ft(asm_tester.GenerateCode(), kNumParams); + CHECK_EQ(2, ft.CallChecked<Smi>()->value()); +} + +TEST(TestCodeAssemblerCodeComment) { + i::FLAG_code_comments = true; + Isolate* isolate(CcTest::InitIsolateOnce()); + const int kNumParams = 0; + CodeAssemblerTester asm_tester(isolate, kNumParams); + CodeAssembler m(asm_tester.state()); + + m.Comment("Comment1"); + m.Return(m.SmiConstant(1)); + + Handle<Code> code = asm_tester.GenerateCode(); + CHECK_NE(code->code_comments(), kNullAddress); + CodeCommentsIterator it(code->code_comments()); + CHECK(it.HasCurrent()); + bool found_comment = false; + while (it.HasCurrent()) { + if (strcmp(it.GetComment(), "Comment1") == 0) found_comment = true; + it.Next(); + } + CHECK(found_comment); +} + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc index 8bf29dca69..6125ef4bdb 100644 --- a/deps/v8/test/cctest/compiler/test-code-generator.cc +++ b/deps/v8/test/cctest/compiler/test-code-generator.cc @@ -5,12 +5,13 @@ #include "src/assembler-inl.h" #include "src/base/utils/random-number-generator.h" #include "src/code-stub-assembler.h" -#include "src/codegen.h" -#include "src/compiler/code-generator.h" -#include "src/compiler/instruction.h" +#include "src/compiler/backend/code-generator.h" +#include "src/compiler/backend/instruction.h" #include "src/compiler/linkage.h" #include "src/isolate.h" #include "src/objects-inl.h" +#include "src/objects/heap-number-inl.h" +#include "src/objects/smi.h" #include "src/optimized-compilation-info.h" #include "test/cctest/cctest.h" @@ -129,7 +130,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, __ Int32Constant(0)); for (int lane = 0; lane < 4; lane++) { TNode<Int32T> lane_value = __ LoadAndUntagToWord32FixedArrayElement( - element, __ IntPtrConstant(lane)); + __ CAST(element), __ IntPtrConstant(lane)); vector = tester.raw_assembler_for_testing()->AddNode( tester.raw_assembler_for_testing()->machine()->I32x4ReplaceLane( lane), @@ -156,11 +157,11 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, // ~~~ // FixedArray teardown(CodeObject* /* unused */, FixedArray result, // // Tagged registers. -// Object* r0, Object* r1, ..., +// Object r0, Object r1, ..., // // FP registers. // Float32 s0, Float64 d1, ..., // // Mixed stack slots. -// Float64 mem0, Object* mem1, Float32 mem2, ...) { +// Float64 mem0, Object mem1, Float32 mem2, ...) { // result[0] = r0; // result[1] = r1; // ... @@ -256,7 +257,7 @@ void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value, os << value->Number(); break; case MachineRepresentation::kSimd128: { - FixedArray* vector = FixedArray::cast(*value); + FixedArray vector = FixedArray::cast(*value); os << "["; for (int lane = 0; lane < 4; lane++) { os << Smi::cast(*vector->GetValueChecked<Smi>(isolate, lane))->value(); @@ -361,7 +362,7 @@ class TestEnvironment : public HandleAndZoneScope { public: // These constants may be tuned to experiment with different environments. -#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS) +#ifdef V8_TARGET_ARCH_IA32 static constexpr int kGeneralRegisterCount = 3; #else static constexpr int kGeneralRegisterCount = 4; @@ -380,19 +381,12 @@ class TestEnvironment : public HandleAndZoneScope { static constexpr int kDoubleConstantCount = 4; TestEnvironment() - : blocks_(1, main_zone()), + : blocks_(1, NewBlock(main_zone(), RpoNumber::FromInt(0)), main_zone()), code_(main_isolate(), main_zone(), &blocks_), rng_(CcTest::random_number_generator()), supported_reps_({MachineRepresentation::kTagged, MachineRepresentation::kFloat32, MachineRepresentation::kFloat64}) { - // Create and initialize a single empty block in blocks_. - InstructionBlock* block = new (main_zone()) InstructionBlock( - main_zone(), RpoNumber::FromInt(0), RpoNumber::Invalid(), - RpoNumber::Invalid(), false, false); - block->set_ao_number(RpoNumber::FromInt(0)); - blocks_[0] = block; - stack_slot_count_ = kTaggedSlotCount + kFloat32SlotCount + kFloat64SlotCount; if (TestSimd128Moves()) { @@ -404,11 +398,11 @@ class TestEnvironment : public HandleAndZoneScope { // ~~~ // FixedArray f(CodeObject* teardown, FixedArray preallocated_result, // // Tagged registers. - // Object*, Object*, ..., + // Object, Object, ..., // // FP registers. // Float32, Float64, Simd128, ..., // // Mixed stack slots. - // Float64, Object*, Float32, Simd128, ...); + // Float64, Object, Float32, Simd128, ...); // ~~~ LocationSignature::Builder test_signature( main_zone(), 1, @@ -539,8 +533,8 @@ class TestEnvironment : public HandleAndZoneScope { // differentiate between a pointer to a HeapNumber and a integer. For this // reason, we make sure all integers are Smis, including constants. for (int i = 0; i < kSmiConstantCount; i++) { - intptr_t smi_value = reinterpret_cast<intptr_t>( - Smi::FromInt(rng_->NextInt(Smi::kMaxValue))); + intptr_t smi_value = static_cast<intptr_t>( + Smi::FromInt(rng_->NextInt(Smi::kMaxValue)).ptr()); Constant constant = kPointerSize == 8 ? Constant(static_cast<int64_t>(smi_value)) : Constant(static_cast<int32_t>(smi_value)); @@ -733,15 +727,13 @@ class TestEnvironment : public HandleAndZoneScope { switch (constant.type()) { case Constant::kInt32: constant_value = - Handle<Smi>(reinterpret_cast<Smi*>( - static_cast<intptr_t>(constant.ToInt32())), + Handle<Smi>(Smi(static_cast<Address>( + static_cast<intptr_t>(constant.ToInt32()))), main_isolate()); break; case Constant::kInt64: - constant_value = - Handle<Smi>(reinterpret_cast<Smi*>( - static_cast<intptr_t>(constant.ToInt64())), - main_isolate()); + constant_value = Handle<Smi>( + Smi(static_cast<Address>(constant.ToInt64())), main_isolate()); break; case Constant::kFloat32: constant_value = main_isolate()->factory()->NewHeapNumber( @@ -824,7 +816,7 @@ class TestEnvironment : public HandleAndZoneScope { Handle<Smi> expected_lane = FixedArray::cast(*expected)->GetValueChecked<Smi>(main_isolate(), lane); - if (!actual_lane->StrictEquals(*expected_lane)) { + if (*actual_lane != *expected_lane) { return false; } } @@ -926,6 +918,11 @@ class TestEnvironment : public HandleAndZoneScope { return allocated_constants_[rep][index]; } + static InstructionBlock* NewBlock(Zone* zone, RpoNumber rpo) { + return new (zone) InstructionBlock(zone, rpo, RpoNumber::Invalid(), + RpoNumber::Invalid(), false, false); + } + v8::base::RandomNumberGenerator* rng() const { return rng_; } InstructionSequence* code() { return &code_; } CallDescriptor* test_descriptor() { return test_descriptor_; } diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc index 504c01cb09..85dd389287 100644 --- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc +++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/compiler/gap-resolver.h" +#include "src/compiler/backend/gap-resolver.h" #include "src/base/utils/random-number-generator.h" #include "test/cctest/cctest.h" @@ -165,14 +165,12 @@ class InterpreterState { friend std::ostream& operator<<(std::ostream& os, const InterpreterState& is) { - for (OperandMap::const_iterator it = is.values_.begin(); - it != is.values_.end(); ++it) { - if (it != is.values_.begin()) os << " "; - InstructionOperand source = FromKey(it->second); - InstructionOperand destination = FromKey(it->first); - MoveOperands mo(source, destination); - PrintableMoveOperands pmo = {GetRegConfig(), &mo}; - os << pmo; + const char* space = ""; + for (auto& value : is.values_) { + InstructionOperand source = FromKey(value.second); + InstructionOperand destination = FromKey(value.first); + os << space << MoveOperands{source, destination}; + space = " "; } return os; } @@ -314,9 +312,9 @@ class ParallelMoveCreator : public HandleAndZoneScope { UNREACHABLE(); } - // min(num_alloctable_general_registers for each arch) == 6 from + // min(num_alloctable_general_registers for each arch) == 5 from // assembler-ia32.h - const int kMaxIndex = 6; + const int kMaxIndex = 5; const int kMaxIndices = kMaxIndex + 1; // Non-FP slots shouldn't overlap FP slots. diff --git a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc index 468961c010..f80718e05e 100644 --- a/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc +++ b/deps/v8/test/cctest/compiler/test-instruction-scheduler.cc @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/compiler/instruction-scheduler.h" -#include "src/compiler/instruction-selector-impl.h" -#include "src/compiler/instruction.h" +#include "src/compiler/backend/instruction-scheduler.h" +#include "src/compiler/backend/instruction-selector-impl.h" +#include "src/compiler/backend/instruction.h" #include "test/cctest/cctest.h" @@ -14,13 +14,11 @@ namespace compiler { // Create InstructionBlocks with a single block. InstructionBlocks* CreateSingleBlock(Zone* zone) { - InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1); - new (blocks) InstructionBlocks(1, nullptr, zone); InstructionBlock* block = new (zone) InstructionBlock(zone, RpoNumber::FromInt(0), RpoNumber::Invalid(), RpoNumber::Invalid(), false, false); - block->set_ao_number(RpoNumber::FromInt(0)); - (*blocks)[0] = block; + InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1); + new (blocks) InstructionBlocks(1, block, zone); return blocks; } diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc index 1140ef9113..a806cd857f 100644 --- a/deps/v8/test/cctest/compiler/test-instruction.cc +++ b/deps/v8/test/cctest/compiler/test-instruction.cc @@ -2,10 +2,10 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/compiler/code-generator.h" +#include "src/compiler/backend/code-generator.h" +#include "src/compiler/backend/instruction.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" -#include "src/compiler/instruction.h" #include "src/compiler/linkage.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node.h" diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc index 7938c50069..5e6e3b3cc2 100644 --- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc +++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc @@ -82,7 +82,7 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges( Node* new_context = NodeProperties::GetContextInput(r.replacement()); CHECK_EQ(IrOpcode::kHeapConstant, new_context->opcode()); HeapObjectMatcher match(new_context); - CHECK_EQ(*match.Value(), *expected_new_context_object); + CHECK_EQ(Context::cast(*match.Value()), *expected_new_context_object); ContextAccess new_access = ContextAccessOf(r.replacement()->op()); CHECK_EQ(new_access.depth(), expected_new_depth); @@ -108,11 +108,6 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges( static const int slot_index = Context::NATIVE_CONTEXT_INDEX; TEST(ReduceJSLoadContext0) { - // TODO(neis): The native context below does not have all the fields - // initialized that the heap broker wants to serialize. - bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend; - FLAG_concurrent_compiler_frontend = false; - ContextSpecializationTester t(Nothing<OuterContext>()); Node* start = t.graph()->NewNode(t.common()->Start(0)); @@ -158,7 +153,7 @@ TEST(ReduceJSLoadContext0) { Node* new_context_input = NodeProperties::GetContextInput(r.replacement()); CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode()); HeapObjectMatcher match(new_context_input); - CHECK_EQ(*native, *match.Value()); + CHECK_EQ(*native, Context::cast(*match.Value())); ContextAccess access = ContextAccessOf(r.replacement()->op()); CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index())); CHECK_EQ(0, static_cast<int>(access.depth())); @@ -177,8 +172,6 @@ TEST(ReduceJSLoadContext0) { CHECK(match.HasValue()); CHECK_EQ(*expected, *match.Value()); } - - FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend; } TEST(ReduceJSLoadContext1) { @@ -256,11 +249,6 @@ TEST(ReduceJSLoadContext2) { // context2 <-- context1 <-- context0 (= HeapConstant(context_object1)) // context_object1 <~~ context_object0 - // TODO(neis): The native context below does not have all the fields - // initialized that the heap broker wants to serialize. - bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend; - FLAG_concurrent_compiler_frontend = false; - ContextSpecializationTester t(Nothing<OuterContext>()); Node* start = t.graph()->NewNode(t.common()->Start(0)); @@ -331,8 +319,6 @@ TEST(ReduceJSLoadContext2) { t.javascript()->LoadContext(3, slot_index, true), context2, start); t.CheckChangesToValue(load, slot_value0); } - - FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend; } TEST(ReduceJSLoadContext3) { @@ -342,11 +328,6 @@ TEST(ReduceJSLoadContext3) { // context_object2 from ReduceJSLoadContext2 for this, so almost all test // expectations are the same as in ReduceJSLoadContext2. - // TODO(neis): The native context below does not have all the fields - // initialized that the heap broker wants to serialize. - bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend; - FLAG_concurrent_compiler_frontend = false; - HandleAndZoneScope handle_zone_scope; auto factory = handle_zone_scope.main_isolate()->factory(); @@ -421,16 +402,9 @@ TEST(ReduceJSLoadContext3) { t.javascript()->LoadContext(3, slot_index, true), context2, start); t.CheckChangesToValue(load, slot_value0); } - - FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend; } TEST(ReduceJSStoreContext0) { - // TODO(neis): The native context below does not have all the fields - // initialized that the heap broker wants to serialize. - bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend; - FLAG_concurrent_compiler_frontend = false; - ContextSpecializationTester t(Nothing<OuterContext>()); Node* start = t.graph()->NewNode(t.common()->Start(0)); @@ -484,14 +458,12 @@ TEST(ReduceJSStoreContext0) { Node* new_context_input = NodeProperties::GetContextInput(r.replacement()); CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode()); HeapObjectMatcher match(new_context_input); - CHECK_EQ(*native, *match.Value()); + CHECK_EQ(*native, Context::cast(*match.Value())); ContextAccess access = ContextAccessOf(r.replacement()->op()); CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index())); CHECK_EQ(0, static_cast<int>(access.depth())); CHECK_EQ(false, access.immutable()); } - - FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend; } TEST(ReduceJSStoreContext1) { @@ -539,11 +511,6 @@ TEST(ReduceJSStoreContext1) { } TEST(ReduceJSStoreContext2) { - // TODO(neis): The native context below does not have all the fields - // initialized that the heap broker wants to serialize. - bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend; - FLAG_concurrent_compiler_frontend = false; - ContextSpecializationTester t(Nothing<OuterContext>()); Node* start = t.graph()->NewNode(t.common()->Start(0)); @@ -594,16 +561,9 @@ TEST(ReduceJSStoreContext2) { context2, context2, start, start); t.CheckContextInputAndDepthChanges(store, context_object0, 0); } - - FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend; } TEST(ReduceJSStoreContext3) { - // TODO(neis): The native context below does not have all the fields - // initialized that the heap broker wants to serialize. - bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend; - FLAG_concurrent_compiler_frontend = false; - HandleAndZoneScope handle_zone_scope; auto factory = handle_zone_scope.main_isolate()->factory(); @@ -658,8 +618,6 @@ TEST(ReduceJSStoreContext3) { context2, context2, start, start); t.CheckContextInputAndDepthChanges(store, context_object0, 0); } - - FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend; } TEST(SpecializeJSFunction_ToConstant1) { diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc index 52309f41e0..994fea0868 100644 --- a/deps/v8/test/cctest/compiler/test-jump-threading.cc +++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/compiler/instruction-codes.h" -#include "src/compiler/instruction.h" -#include "src/compiler/jump-threading.h" +#include "src/compiler/backend/instruction-codes.h" +#include "src/compiler/backend/instruction.h" +#include "src/compiler/backend/jump-threading.h" #include "src/source-position.h" #include "test/cctest/cctest.h" @@ -613,6 +613,7 @@ TEST(FwPermuted_diamond) { RunAllPermutations<4>(RunPermutedDiamond); } void ApplyForwarding(TestCode& code, int size, int* forward) { + code.sequence_.RecomputeAssemblyOrderForTesting(); ZoneVector<RpoNumber> vector(code.main_zone()); for (int i = 0; i < size; i++) { vector.push_back(RpoNumber::FromInt(forward[i])); diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc index 38c5d17b6b..b8e9479675 100644 --- a/deps/v8/test/cctest/compiler/test-linkage.cc +++ b/deps/v8/test/cctest/compiler/test-linkage.cc @@ -4,7 +4,6 @@ #include "src/api-inl.h" #include "src/code-factory.h" -#include "src/code-stubs.h" #include "src/compiler.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc index df18062acf..073891a52b 100644 --- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc +++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/base/overflowing-math.h" #include "src/base/utils/random-number-generator.h" -#include "src/codegen.h" #include "src/compiler/js-graph.h" #include "src/compiler/machine-operator-reducer.h" #include "src/compiler/operator-properties.h" @@ -85,7 +85,8 @@ class ReducerTester : public HandleAndZoneScope { graph(main_zone()), javascript(main_zone()), jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine), - maxuint32(Constant<int32_t>(kMaxUInt32)) { + maxuint32(Constant<int32_t>(kMaxUInt32)), + graph_reducer(main_zone(), &graph, jsgraph.Dead()) { Node* s = graph.NewNode(common.Start(num_parameters)); graph.SetStart(s); } @@ -99,6 +100,7 @@ class ReducerTester : public HandleAndZoneScope { JSOperatorBuilder javascript; JSGraph jsgraph; Node* maxuint32; + GraphReducer graph_reducer; template <typename T> Node* Constant(volatile T value) { @@ -123,7 +125,7 @@ class ReducerTester : public HandleAndZoneScope { void CheckFoldBinop(volatile T expect, Node* a, Node* b) { CHECK(binop); Node* n = CreateBinopNode(a, b); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction reduction = reducer.Reduce(n); CHECK(reduction.Changed()); CHECK_NE(n, reduction.replacement()); @@ -143,7 +145,7 @@ class ReducerTester : public HandleAndZoneScope { void CheckBinop(Node* expect, Node* a, Node* b) { CHECK(binop); Node* n = CreateBinopNode(a, b); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction reduction = reducer.Reduce(n); CHECK(reduction.Changed()); CHECK_EQ(expect, reduction.replacement()); @@ -155,7 +157,7 @@ class ReducerTester : public HandleAndZoneScope { Node* right) { CHECK(binop); Node* n = CreateBinopNode(left, right); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction reduction = reducer.Reduce(n); CHECK(reduction.Changed()); CHECK_EQ(binop, reduction.replacement()->op()); @@ -170,7 +172,7 @@ class ReducerTester : public HandleAndZoneScope { Node* right_expect, Node* left, Node* right) { CHECK(binop); Node* n = CreateBinopNode(left, right); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction r = reducer.Reduce(n); CHECK(r.Changed()); CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode()); @@ -185,7 +187,7 @@ class ReducerTester : public HandleAndZoneScope { volatile T right_expect, Node* left, Node* right) { CHECK(binop); Node* n = CreateBinopNode(left, right); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction r = reducer.Reduce(n); CHECK(r.Changed()); CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode()); @@ -204,7 +206,7 @@ class ReducerTester : public HandleAndZoneScope { Node* k = Constant<T>(constant); { Node* n = CreateBinopNode(k, p); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction reduction = reducer.Reduce(n); CHECK(!reduction.Changed() || reduction.replacement() == n); CHECK_EQ(p, n->InputAt(0)); @@ -212,7 +214,7 @@ class ReducerTester : public HandleAndZoneScope { } { Node* n = CreateBinopNode(p, k); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction reduction = reducer.Reduce(n); CHECK(!reduction.Changed()); CHECK_EQ(p, n->InputAt(0)); @@ -228,7 +230,7 @@ class ReducerTester : public HandleAndZoneScope { Node* p = Parameter(); Node* k = Constant<T>(constant); Node* n = CreateBinopNode(k, p); - MachineOperatorReducer reducer(&jsgraph); + MachineOperatorReducer reducer(&graph_reducer, &jsgraph); Reduction reduction = reducer.Reduce(n); CHECK(!reduction.Changed()); CHECK_EQ(k, n->InputAt(0)); @@ -503,7 +505,7 @@ TEST(ReduceInt32Add) { FOR_INT32_INPUTS(pl) { FOR_INT32_INPUTS(pr) { int32_t x = *pl, y = *pr; - R.CheckFoldBinop<int32_t>(x + y, x, y); // TODO(titzer): signed overflow + R.CheckFoldBinop<int32_t>(base::AddWithWraparound(x, y), x, y); } } @@ -524,7 +526,7 @@ TEST(ReduceInt64Add) { FOR_INT64_INPUTS(pl) { FOR_INT64_INPUTS(pr) { int64_t x = *pl, y = *pr; - R.CheckFoldBinop<int64_t>(x + y, x, y); + R.CheckFoldBinop<int64_t>(base::AddWithWraparound(x, y), x, y); } } @@ -543,7 +545,7 @@ TEST(ReduceInt32Sub) { FOR_INT32_INPUTS(pl) { FOR_INT32_INPUTS(pr) { int32_t x = *pl, y = *pr; - R.CheckFoldBinop<int32_t>(x - y, x, y); + R.CheckFoldBinop<int32_t>(base::SubWithWraparound(x, y), x, y); } } @@ -562,7 +564,7 @@ TEST(ReduceInt64Sub) { FOR_INT64_INPUTS(pl) { FOR_INT64_INPUTS(pr) { int64_t x = *pl, y = *pr; - R.CheckFoldBinop<int64_t>(x - y, x, y); + R.CheckFoldBinop<int64_t>(base::SubWithWraparound(x, y), x, y); } } @@ -587,7 +589,7 @@ TEST(ReduceInt32Mul) { FOR_INT32_INPUTS(pl) { FOR_INT32_INPUTS(pr) { int32_t x = *pl, y = *pr; - R.CheckFoldBinop<int32_t>(x * y, x, y); // TODO(titzer): signed overflow + R.CheckFoldBinop<int32_t>(base::MulWithWraparound(x, y), x, y); } } @@ -626,7 +628,8 @@ TEST(ReduceInt32Div) { FOR_INT32_INPUTS(pr) { int32_t x = *pl, y = *pr; if (y == 0) continue; // TODO(titzer): test / 0 - int32_t r = y == -1 ? -x : x / y; // INT_MIN / -1 may explode in C + int32_t r = y == -1 ? base::NegateWithWraparound(x) + : x / y; // INT_MIN / -1 may explode in C R.CheckFoldBinop<int32_t>(r, x, y); } } @@ -823,7 +826,7 @@ TEST(ReduceLoadStore) { index, R.graph.start(), R.graph.start()); { - MachineOperatorReducer reducer(&R.jsgraph); + MachineOperatorReducer reducer(&R.graph_reducer, &R.jsgraph); Reduction reduction = reducer.Reduce(load); CHECK(!reduction.Changed()); // loads should not be reduced. } @@ -833,7 +836,7 @@ TEST(ReduceLoadStore) { R.graph.NewNode(R.machine.Store(StoreRepresentation( MachineRepresentation::kWord32, kNoWriteBarrier)), base, index, load, load, R.graph.start()); - MachineOperatorReducer reducer(&R.jsgraph); + MachineOperatorReducer reducer(&R.graph_reducer, &R.jsgraph); Reduction reduction = reducer.Reduce(store); CHECK(!reduction.Changed()); // stores should not be reduced. } diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc index dccdbd9b92..bf5e829509 100644 --- a/deps/v8/test/cctest/compiler/test-multiple-return.cc +++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc @@ -9,7 +9,6 @@ #include "src/assembler.h" #include "src/base/bits.h" -#include "src/codegen.h" #include "src/compiler.h" #include "src/compiler/linkage.h" #include "src/compiler/wasm-compiler.h" @@ -123,15 +122,11 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate, size_t code_size) { std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule()); module->num_declared_functions = 1; - wasm::ModuleEnv env( - module.get(), wasm::UseTrapHandler::kNoTrapHandler, - wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport); // We have to add the code object to a NativeModule, because the // WasmCallDescriptor assumes that code is on the native heap and not // within a code object. return isolate->wasm_engine()->code_manager()->NewNativeModule( - isolate, wasm::kAllWasmFeatures, code_size, false, std::move(module), - env); + isolate, wasm::kAllWasmFeatures, code_size, false, std::move(module)); } void TestReturnMultipleValues(MachineType type) { diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc index c334ecb383..f4218467f7 100644 --- a/deps/v8/test/cctest/compiler/test-representation-change.cc +++ b/deps/v8/test/cctest/compiler/test-representation-change.cc @@ -71,7 +71,7 @@ class RepresentationChangerTester : public HandleAndZoneScope, CHECK_FLOAT_EQ(expected, fval); } - void CheckHeapConstant(Node* n, HeapObject* expected) { + void CheckHeapConstant(Node* n, HeapObject expected) { HeapObjectMatcher m(n); CHECK(m.HasValue()); CHECK_EQ(expected, *m.Value()); @@ -204,6 +204,15 @@ TEST(ToFloat64_constant) { UseInfo(MachineRepresentation::kFloat64, Truncation::None())); r.CheckFloat64Constant(c, i); } + + { + Node* n = r.jsgraph()->Constant(0); + Node* use = r.Return(n); + Node* c = r.changer()->GetRepresentationFor( + n, MachineRepresentation::kWord64, Type::Range(0, 0, r.zone()), use, + UseInfo(MachineRepresentation::kFloat64, Truncation::None())); + r.CheckFloat64Constant(c, 0); + } } @@ -280,7 +289,7 @@ TEST(ToInt64_constant) { Node* n = r.jsgraph()->Constant(*i); Node* use = r.Return(n); Node* c = r.changer()->GetRepresentationFor( - n, MachineRepresentation::kTagged, TypeCache::Get().kSafeInteger, use, + n, MachineRepresentation::kTagged, TypeCache::Get()->kSafeInteger, use, UseInfo(MachineRepresentation::kWord64, Truncation::None())); r.CheckInt64Constant(c, *i); } @@ -299,7 +308,8 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from, CHECK_EQ(expected, c->opcode()); CHECK_EQ(n, c->InputAt(0)); - if (expected == IrOpcode::kCheckedFloat64ToInt32) { + if (expected == IrOpcode::kCheckedFloat64ToInt32 || + expected == IrOpcode::kCheckedFloat64ToInt64) { CheckForMinusZeroMode mode = from_type.Maybe(Type::MinusZero()) ? use_info.minus_zero_check() @@ -316,13 +326,13 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from, static void CheckTwoChanges(IrOpcode::Value expected2, IrOpcode::Value expected1, MachineRepresentation from, Type from_type, - MachineRepresentation to) { + MachineRepresentation to, UseInfo use_info) { RepresentationChangerTester r; Node* n = r.Parameter(); Node* use = r.Return(n); - Node* c1 = r.changer()->GetRepresentationFor(n, from, from_type, use, - UseInfo(to, Truncation::None())); + Node* c1 = + r.changer()->GetRepresentationFor(n, from, from_type, use, use_info); CHECK_NE(c1, n); CHECK_EQ(expected1, c1->opcode()); @@ -332,6 +342,14 @@ static void CheckTwoChanges(IrOpcode::Value expected2, CHECK_EQ(n, c2->InputAt(0)); } +static void CheckTwoChanges(IrOpcode::Value expected2, + IrOpcode::Value expected1, + MachineRepresentation from, Type from_type, + MachineRepresentation to) { + CheckTwoChanges(expected2, expected1, from, from_type, to, + UseInfo(to, Truncation::None())); +} + static void CheckChange(IrOpcode::Value expected, MachineRepresentation from, Type from_type, MachineRepresentation to, UseInfo use_info) { @@ -349,13 +367,13 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from, TEST(Word64) { CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord8, - TypeCache::Get().kInt8, MachineRepresentation::kWord64); + TypeCache::Get()->kInt8, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord8, - TypeCache::Get().kUint8, MachineRepresentation::kWord64); + TypeCache::Get()->kUint8, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord16, - TypeCache::Get().kInt16, MachineRepresentation::kWord64); + TypeCache::Get()->kInt16, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord16, - TypeCache::Get().kUint16, MachineRepresentation::kWord64); + TypeCache::Get()->kUint16, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord32, Type::Signed32(), MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord32, @@ -366,15 +384,15 @@ TEST(Word64) { CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64, Type::Unsigned32(), MachineRepresentation::kWord32); CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64, - TypeCache::Get().kSafeInteger, MachineRepresentation::kWord32, + TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord32, UseInfo::TruncatingWord32()); CheckChange( IrOpcode::kCheckedInt64ToInt32, MachineRepresentation::kWord64, - TypeCache::Get().kSafeInteger, MachineRepresentation::kWord32, + TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord32, UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair())); CheckChange( IrOpcode::kCheckedUint64ToInt32, MachineRepresentation::kWord64, - TypeCache::Get().kPositiveSafeInteger, MachineRepresentation::kWord32, + TypeCache::Get()->kPositiveSafeInteger, MachineRepresentation::kWord32, UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair())); CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64, @@ -382,18 +400,22 @@ TEST(Word64) { CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64, Type::Unsigned32(), MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64, - TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64); + TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64, - TypeCache::Get().kInt64, MachineRepresentation::kWord64); + TypeCache::Get()->kInt64, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeFloat64ToUint64, MachineRepresentation::kFloat64, - TypeCache::Get().kUint64, MachineRepresentation::kWord64); + TypeCache::Get()->kUint64, MachineRepresentation::kWord64); + CheckChange( + IrOpcode::kCheckedFloat64ToInt64, MachineRepresentation::kFloat64, + Type::Number(), MachineRepresentation::kWord64, + UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair())); CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64, Type::Signed32(), MachineRepresentation::kFloat64); CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64, Type::Unsigned32(), MachineRepresentation::kFloat64); CheckChange(IrOpcode::kChangeInt64ToFloat64, MachineRepresentation::kWord64, - TypeCache::Get().kSafeInteger, MachineRepresentation::kFloat64); + TypeCache::Get()->kSafeInteger, MachineRepresentation::kFloat64); CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64, IrOpcode::kChangeFloat64ToInt64, @@ -405,12 +427,17 @@ TEST(Word64) { MachineRepresentation::kWord64); CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64, IrOpcode::kChangeFloat64ToInt64, - MachineRepresentation::kFloat32, TypeCache::Get().kInt64, + MachineRepresentation::kFloat32, TypeCache::Get()->kInt64, MachineRepresentation::kWord64); CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64, IrOpcode::kChangeFloat64ToUint64, - MachineRepresentation::kFloat32, TypeCache::Get().kUint64, + MachineRepresentation::kFloat32, TypeCache::Get()->kUint64, MachineRepresentation::kWord64); + CheckTwoChanges( + IrOpcode::kChangeFloat32ToFloat64, IrOpcode::kCheckedFloat64ToInt64, + MachineRepresentation::kFloat32, Type::Number(), + MachineRepresentation::kWord64, + UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair())); CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64, IrOpcode::kTruncateFloat64ToFloat32, @@ -422,12 +449,20 @@ TEST(Word64) { CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged, Type::Unsigned32(), MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged, - TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64); + TypeCache::Get()->kSafeInteger, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged, - TypeCache::Get().kInt64, MachineRepresentation::kWord64); + TypeCache::Get()->kInt64, MachineRepresentation::kWord64); CheckChange(IrOpcode::kChangeTaggedSignedToInt64, MachineRepresentation::kTaggedSigned, Type::SignedSmall(), MachineRepresentation::kWord64); + CheckChange( + IrOpcode::kCheckedTaggedToInt64, MachineRepresentation::kTagged, + Type::Number(), MachineRepresentation::kWord64, + UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair())); + CheckChange( + IrOpcode::kCheckedTaggedToInt64, MachineRepresentation::kTaggedPointer, + Type::Number(), MachineRepresentation::kWord64, + UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, VectorSlotPair())); CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32, IrOpcode::kChangeInt31ToTaggedSigned, @@ -442,9 +477,9 @@ TEST(Word64) { MachineRepresentation::kWord64, Type::Unsigned32(), MachineRepresentation::kTagged); CheckChange(IrOpcode::kChangeInt64ToTagged, MachineRepresentation::kWord64, - TypeCache::Get().kSafeInteger, MachineRepresentation::kTagged); + TypeCache::Get()->kSafeInteger, MachineRepresentation::kTagged); CheckChange(IrOpcode::kChangeUint64ToTagged, MachineRepresentation::kWord64, - TypeCache::Get().kPositiveSafeInteger, + TypeCache::Get()->kPositiveSafeInteger, MachineRepresentation::kTagged); CheckTwoChanges(IrOpcode::kTruncateInt64ToInt32, @@ -458,19 +493,19 @@ TEST(Word64) { MachineRepresentation::kTaggedSigned); } CheckChange(IrOpcode::kCheckedInt64ToTaggedSigned, - MachineRepresentation::kWord64, TypeCache::Get().kSafeInteger, + MachineRepresentation::kWord64, TypeCache::Get()->kSafeInteger, MachineRepresentation::kTaggedSigned, UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair())); CheckChange(IrOpcode::kCheckedUint64ToTaggedSigned, MachineRepresentation::kWord64, - TypeCache::Get().kPositiveSafeInteger, + TypeCache::Get()->kPositiveSafeInteger, MachineRepresentation::kTaggedSigned, UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair())); - CheckTwoChanges(IrOpcode::kChangeInt64ToFloat64, - IrOpcode::kChangeFloat64ToTaggedPointer, - MachineRepresentation::kWord64, TypeCache::Get().kSafeInteger, - MachineRepresentation::kTaggedPointer); + CheckTwoChanges( + IrOpcode::kChangeInt64ToFloat64, IrOpcode::kChangeFloat64ToTaggedPointer, + MachineRepresentation::kWord64, TypeCache::Get()->kSafeInteger, + MachineRepresentation::kTaggedPointer); } TEST(SingleChanges) { @@ -589,6 +624,11 @@ TEST(SignednessInWord32) { IrOpcode::kTruncateFloat64ToWord32, MachineRepresentation::kFloat32, Type::Number(), MachineRepresentation::kWord32); + + CheckChange( + IrOpcode::kCheckedUint32ToInt32, MachineRepresentation::kWord32, + Type::Unsigned32(), + UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, VectorSlotPair())); } static void TestMinusZeroCheck(IrOpcode::Value expected, Type from_type) { diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc index 681669f334..775ffadfd4 100644 --- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc +++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc @@ -2703,37 +2703,6 @@ void TestJumpWithConstantsAndWideConstants(size_t shard) { SHARD_TEST_BY_4(JumpWithConstantsAndWideConstants) -TEST(BytecodeGraphBuilderDoExpressions) { - bool old_flag = FLAG_harmony_do_expressions; - FLAG_harmony_do_expressions = true; - HandleAndZoneScope scope; - Isolate* isolate = scope.main_isolate(); - Factory* factory = isolate->factory(); - ExpectedSnippet<0> snippets[] = { - {"var a = do {}; return a;", {factory->undefined_value()}}, - {"var a = do { var x = 100; }; return a;", {factory->undefined_value()}}, - {"var a = do { var x = 100; }; return a;", {factory->undefined_value()}}, - {"var a = do { var x = 100; x++; }; return a;", - {handle(Smi::FromInt(100), isolate)}}, - {"var i = 0; for (; i < 5;) { i = do { if (i == 3) { break; }; i + 1; }};" - "return i;", - {handle(Smi::FromInt(3), isolate)}}, - }; - - for (size_t i = 0; i < arraysize(snippets); i++) { - ScopedVector<char> script(1024); - SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName, - snippets[i].code_snippet, kFunctionName); - - BytecodeGraphTester tester(isolate, script.start()); - auto callable = tester.GetCallable<>(); - Handle<Object> return_value = callable().ToHandleChecked(); - CHECK(return_value->SameValue(*snippets[i].return_value())); - } - - FLAG_harmony_do_expressions = old_flag; -} - TEST(BytecodeGraphBuilderWithStatement) { HandleAndZoneScope scope; Isolate* isolate = scope.main_isolate(); diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc index a9c7ed0587..ffee5310d2 100644 --- a/deps/v8/test/cctest/compiler/test-run-load-store.cc +++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc @@ -7,8 +7,8 @@ #include <limits> #include "src/base/bits.h" +#include "src/base/overflowing-math.h" #include "src/base/utils/random-number-generator.h" -#include "src/codegen.h" #include "src/objects-inl.h" #include "test/cctest/cctest.h" #include "test/cctest/compiler/codegen-tester.h" @@ -25,6 +25,14 @@ enum TestAlignment { kUnaligned, }; +#if V8_TARGET_LITTLE_ENDIAN +#define LSB(addr, bytes) addr +#elif V8_TARGET_BIG_ENDIAN +#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes) +#else +#error "Unknown Architecture" +#endif + // This is a America! #define A_BILLION 1000000000ULL #define A_GIG (1024ULL * 1024ULL * 1024ULL) @@ -82,7 +90,8 @@ void RunLoadStoreFloat32Offset(TestAlignment t) { float p2 = 0.0f; // and stores directly into this location. FOR_INT32_INPUTS(i) { - int32_t magic = 0x2342AABB + *i * 3; + int32_t magic = + base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3)); RawMachineAssemblerTester<int32_t> m; int32_t offset = *i; byte* from = reinterpret_cast<byte*>(&p1) - offset; @@ -119,7 +128,8 @@ void RunLoadStoreFloat64Offset(TestAlignment t) { double p2 = 0; // and stores directly into this location. FOR_INT32_INPUTS(i) { - int32_t magic = 0x2342AABB + *i * 3; + int32_t magic = + base::AddWithWraparound(0x2342AABB, base::MulWithWraparound(*i, 3)); RawMachineAssemblerTester<int32_t> m; int32_t offset = *i; byte* from = reinterpret_cast<byte*>(&p1) - offset; @@ -178,22 +188,61 @@ TEST(RunUnalignedLoadStoreFloat64Offset) { } namespace { -template <typename Type> -void RunLoadImmIndex(MachineType rep, TestAlignment t) { - const int kNumElems = 3; - Type buffer[kNumElems]; - // initialize the buffer with some raw data. - byte* raw = reinterpret_cast<byte*>(buffer); - for (size_t i = 0; i < sizeof(buffer); i++) { - raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); +// Initializes the buffer with some raw data respecting requested representation +// of the values. +template <typename CType> +void InitBuffer(CType* buffer, size_t length, MachineType rep) { + const size_t kBufferSize = sizeof(CType) * length; + if (!rep.IsTagged()) { + byte* raw = reinterpret_cast<byte*>(buffer); + for (size_t i = 0; i < kBufferSize; i++) { + raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA); + } + return; + } + + // Tagged field loads require values to be properly tagged because of + // pointer decompression that may be happenning during load. + Isolate* isolate = CcTest::InitIsolateOnce(); + Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]); + if (rep.IsTaggedSigned()) { + for (size_t i = 0; i < length; i++) { + smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0); + } + } else { + memcpy(&buffer[0], &isolate->roots_table(), kBufferSize); + if (!rep.IsTaggedPointer()) { + // Also add some Smis if we are checking AnyTagged case. + for (size_t i = 0; i < length / 2; i++) { + smi_view[i] = + Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0); + } + } } +} + +template <typename CType> +void RunLoadImmIndex(MachineType rep, TestAlignment t) { + const int kNumElems = 16; + CType buffer[kNumElems]; + + InitBuffer(buffer, kNumElems, rep); // Test with various large and small offsets. for (int offset = -1; offset <= 200000; offset *= -5) { for (int i = 0; i < kNumElems; i++) { - BufferedRawMachineAssemblerTester<Type> m; - Node* base = m.PointerConstant(buffer - offset); + BufferedRawMachineAssemblerTester<CType> m; + void* base_pointer = &buffer[0] - offset; +#ifdef V8_COMPRESS_POINTERS + if (rep.IsTagged()) { + // When pointer compression is enabled then we need to access only + // the lower 32-bit of the tagged value while the buffer contains + // full 64-bit values. + base_pointer = LSB(base_pointer, kPointerSize / 2); + } +#endif + Node* base = m.PointerConstant(base_pointer); Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); if (t == TestAlignment::kAligned) { m.Return(m.Load(rep, base, index)); @@ -203,82 +252,91 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { UNREACHABLE(); } - volatile Type expected = buffer[i]; - volatile Type actual = m.Call(); - CHECK_EQ(expected, actual); + CHECK_EQ(buffer[i], m.Call()); } } } template <typename CType> +CType NullValue() { + return CType{0}; +} + +template <> +HeapObject NullValue<HeapObject>() { + return HeapObject(); +} + +template <typename CType> void RunLoadStore(MachineType rep, TestAlignment t) { - const int kNumElems = 4; - CType buffer[kNumElems]; + const int kNumElems = 16; + CType in_buffer[kNumElems]; + CType out_buffer[kNumElems]; + + InitBuffer(in_buffer, kNumElems, rep); for (int32_t x = 0; x < kNumElems; x++) { int32_t y = kNumElems - x - 1; - // initialize the buffer with raw data. - byte* raw = reinterpret_cast<byte*>(buffer); - for (size_t i = 0; i < sizeof(buffer); i++) { - raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); - } RawMachineAssemblerTester<int32_t> m; int32_t OK = 0x29000 + x; - Node* base = m.PointerConstant(buffer); - Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0])); - Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0])); + Node* in_base = m.PointerConstant(in_buffer); + Node* in_index = m.IntPtrConstant(x * sizeof(CType)); + Node* out_base = m.PointerConstant(out_buffer); + Node* out_index = m.IntPtrConstant(y * sizeof(CType)); if (t == TestAlignment::kAligned) { - Node* load = m.Load(rep, base, index0); - m.Store(rep.representation(), base, index1, load, kNoWriteBarrier); + Node* load = m.Load(rep, in_base, in_index); + m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier); } else if (t == TestAlignment::kUnaligned) { - Node* load = m.UnalignedLoad(rep, base, index0); - m.UnalignedStore(rep.representation(), base, index1, load); + Node* load = m.UnalignedLoad(rep, in_base, in_index); + m.UnalignedStore(rep.representation(), out_base, out_index, load); } m.Return(m.Int32Constant(OK)); - CHECK(buffer[x] != buffer[y]); + memset(out_buffer, 0, sizeof(out_buffer)); + CHECK_NE(in_buffer[x], out_buffer[y]); CHECK_EQ(OK, m.Call()); - CHECK(buffer[x] == buffer[y]); + CHECK_EQ(in_buffer[x], out_buffer[y]); + for (int32_t z = 0; z < kNumElems; z++) { + if (z != y) CHECK_EQ(NullValue<CType>(), out_buffer[z]); + } } } template <typename CType> void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) { CType in, out; - CType in_buffer[2]; - CType out_buffer[2]; - byte* raw; - - for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) { - int y = sizeof(CType) - x; + byte in_buffer[2 * sizeof(CType)]; + byte out_buffer[2 * sizeof(CType)]; - raw = reinterpret_cast<byte*>(&in); - for (size_t i = 0; i < sizeof(CType); i++) { - raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA); - } + InitBuffer(&in, 1, rep); - raw = reinterpret_cast<byte*>(in_buffer); - MemCopy(raw + x, &in, sizeof(CType)); + for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) { + // Direct write to &in_buffer[x] may cause unaligned access in C++ code so + // we use MemCopy() to handle that. + MemCopy(&in_buffer[x], &in, sizeof(CType)); - RawMachineAssemblerTester<int32_t> m; - int32_t OK = 0x29000 + x; + for (int y = 0; y < static_cast<int>(sizeof(CType)); y++) { + RawMachineAssemblerTester<int32_t> m; + int32_t OK = 0x29000 + x; - Node* base0 = m.PointerConstant(in_buffer); - Node* base1 = m.PointerConstant(out_buffer); - Node* index0 = m.IntPtrConstant(x); - Node* index1 = m.IntPtrConstant(y); - Node* load = m.UnalignedLoad(rep, base0, index0); - m.UnalignedStore(rep.representation(), base1, index1, load); + Node* in_base = m.PointerConstant(in_buffer); + Node* in_index = m.IntPtrConstant(x); + Node* load = m.UnalignedLoad(rep, in_base, in_index); - m.Return(m.Int32Constant(OK)); + Node* out_base = m.PointerConstant(out_buffer); + Node* out_index = m.IntPtrConstant(y); + m.UnalignedStore(rep.representation(), out_base, out_index, load); - CHECK_EQ(OK, m.Call()); + m.Return(m.Int32Constant(OK)); - raw = reinterpret_cast<byte*>(&out_buffer); - MemCopy(&out, raw + y, sizeof(CType)); - CHECK(in == out); + CHECK_EQ(OK, m.Call()); + // Direct read of &out_buffer[y] may cause unaligned access in C++ code + // so we use MemCopy() to handle that. + MemCopy(&out, &out_buffer[y], sizeof(CType)); + CHECK_EQ(in, out); + } } } } // namespace @@ -290,7 +348,11 @@ TEST(RunLoadImmIndex) { RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); - RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned); + RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kAligned); + RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned); + RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(), + TestAlignment::kAligned); + RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kAligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned); #if V8_TARGET_ARCH_64_BIT @@ -304,8 +366,11 @@ TEST(RunUnalignedLoadImmIndex) { RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); - RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), - TestAlignment::kUnaligned); + RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned); + RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned); + RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(), + TestAlignment::kUnaligned); + RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned); RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned); #if V8_TARGET_ARCH_64_BIT @@ -321,7 +386,11 @@ TEST(RunLoadStore) { RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); - RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned); + RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kAligned); + RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kAligned); + RunLoadStore<HeapObject>(MachineType::TaggedPointer(), + TestAlignment::kAligned); + RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kAligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned); #if V8_TARGET_ARCH_64_BIT @@ -334,7 +403,11 @@ TEST(RunUnalignedLoadStore) { RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); - RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned); + RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned); + RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned); + RunLoadStore<HeapObject>(MachineType::TaggedPointer(), + TestAlignment::kUnaligned); + RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned); RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned); RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned); #if V8_TARGET_ARCH_64_BIT @@ -347,7 +420,11 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) { RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16()); RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32()); RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32()); - RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged()); + RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer()); + RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned()); + RunUnalignedLoadStoreUnalignedAccess<HeapObject>( + MachineType::TaggedPointer()); + RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged()); RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32()); RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64()); #if V8_TARGET_ARCH_64_BIT @@ -355,14 +432,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) { #endif } -#if V8_TARGET_LITTLE_ENDIAN -#define LSB(addr, bytes) addr -#elif V8_TARGET_BIG_ENDIAN -#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes -#else -#error "Unknown Architecture" -#endif - namespace { void RunLoadStoreSignExtend32(TestAlignment t) { int32_t buffer[4]; @@ -608,6 +677,10 @@ TEST(RunUnalignedLoadStoreTruncation) { LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned); } +#undef LSB +#undef A_BILLION +#undef A_GIG + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc index 419d1b0699..782e9b51b8 100644 --- a/deps/v8/test/cctest/compiler/test-run-machops.cc +++ b/deps/v8/test/cctest/compiler/test-run-machops.cc @@ -8,9 +8,9 @@ #include "src/base/bits.h" #include "src/base/ieee754.h" +#include "src/base/overflowing-math.h" #include "src/base/utils/random-number-generator.h" #include "src/boxed-float.h" -#include "src/codegen.h" #include "src/objects-inl.h" #include "src/utils.h" #include "test/cctest/cctest.h" @@ -848,7 +848,7 @@ TEST(RunDiamondPhiConst) { TEST(RunDiamondPhiNumber) { - RawMachineAssemblerTester<Object*> m(MachineType::Int32()); + RawMachineAssemblerTester<Object> m(MachineType::Int32()); double false_val = -11.1; double true_val = 200.1; Node* true_node = m.NumberConstant(true_val); @@ -861,7 +861,7 @@ TEST(RunDiamondPhiNumber) { TEST(RunDiamondPhiString) { - RawMachineAssemblerTester<Object*> m(MachineType::Int32()); + RawMachineAssemblerTester<Object> m(MachineType::Int32()); const char* false_val = "false"; const char* true_val = "true"; Node* true_node = m.StringConstant(true_val); @@ -2058,7 +2058,7 @@ TEST(RunInt32MulP) { bt.AddReturn(m.Int32Mul(bt.param0, bt.param1)); FOR_INT32_INPUTS(i) { FOR_INT32_INPUTS(j) { - int expected = static_cast<int32_t>(*i * *j); + int expected = base::MulWithWraparound(*i, *j); CHECK_EQ(expected, bt.call(*i, *j)); } } @@ -2125,7 +2125,8 @@ TEST(RunInt32MulAndInt32AddP) { m.Int32Mul(m.Parameter(0), m.Int32Constant(p1)))); FOR_INT32_INPUTS(k) { int32_t p2 = *k; - int expected = p0 + static_cast<int32_t>(p1 * p2); + int expected = + base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2)); CHECK_EQ(expected, m.Call(p2)); } } @@ -2142,7 +2143,8 @@ TEST(RunInt32MulAndInt32AddP) { int32_t p0 = *i; int32_t p1 = *j; int32_t p2 = *k; - int expected = p0 + static_cast<int32_t>(p1 * p2); + int expected = + base::AddWithWraparound(p0, base::MulWithWraparound(p1, p2)); CHECK_EQ(expected, m.Call(p0, p1, p2)); } } @@ -2159,7 +2161,8 @@ TEST(RunInt32MulAndInt32AddP) { int32_t p0 = *i; int32_t p1 = *j; int32_t p2 = *k; - int expected = static_cast<int32_t>(p0 * p1) + p2; + int expected = + base::AddWithWraparound(base::MulWithWraparound(p0, p1), p2); CHECK_EQ(expected, m.Call(p0, p1, p2)); } } @@ -2175,7 +2178,8 @@ TEST(RunInt32MulAndInt32AddP) { FOR_INT32_INPUTS(k) { int32_t p0 = *j; int32_t p1 = *k; - int expected = *i + static_cast<int32_t>(p0 * p1); + int expected = + base::AddWithWraparound(*i, base::MulWithWraparound(p0, p1)); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -2187,24 +2191,24 @@ TEST(RunInt32MulAndInt32AddP) { TEST(RunInt32MulAndInt32SubP) { { RawMachineAssemblerTester<int32_t> m( - MachineType::Uint32(), MachineType::Int32(), MachineType::Int32()); + MachineType::Int32(), MachineType::Int32(), MachineType::Int32()); m.Return( m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2)))); - FOR_UINT32_INPUTS(i) { + FOR_INT32_INPUTS(i) { FOR_INT32_INPUTS(j) { FOR_INT32_INPUTS(k) { - uint32_t p0 = *i; + int32_t p0 = *i; int32_t p1 = *j; int32_t p2 = *k; - // Use uint32_t because signed overflow is UB in C. - int expected = p0 - static_cast<uint32_t>(p1 * p2); + int expected = + base::SubWithWraparound(p0, base::MulWithWraparound(p1, p2)); CHECK_EQ(expected, m.Call(p0, p1, p2)); } } } } { - FOR_UINT32_INPUTS(i) { + FOR_INT32_INPUTS(i) { RawMachineAssemblerTester<int32_t> m; Int32BinopTester bt(&m); bt.AddReturn( @@ -2213,8 +2217,8 @@ TEST(RunInt32MulAndInt32SubP) { FOR_INT32_INPUTS(k) { int32_t p0 = *j; int32_t p1 = *k; - // Use uint32_t because signed overflow is UB in C. - int expected = *i - static_cast<uint32_t>(p0 * p1); + int expected = + base::SubWithWraparound(*i, base::MulWithWraparound(p0, p1)); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -2262,7 +2266,8 @@ TEST(RunInt32DivP) { int p0 = *i; int p1 = *j; if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) { - int expected = static_cast<int32_t>(p0 + (p0 / p1)); + int expected = + static_cast<int32_t>(base::AddWithWraparound(p0, (p0 / p1))); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -2330,7 +2335,8 @@ TEST(RunInt32ModP) { int p0 = *i; int p1 = *j; if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) { - int expected = static_cast<int32_t>(p0 + (p0 % p1)); + int expected = + static_cast<int32_t>(base::AddWithWraparound(p0, (p0 % p1))); CHECK_EQ(expected, bt.call(p0, p1)); } } @@ -3463,7 +3469,7 @@ TEST(RunInt32NegP) { RawMachineAssemblerTester<int32_t> m(MachineType::Int32()); m.Return(m.Int32Neg(m.Parameter(0))); FOR_INT32_INPUTS(i) { - int expected = -*i; + int expected = base::NegateWithWraparound(*i); CHECK_EQ(expected, m.Call(*i)); } } @@ -3676,7 +3682,9 @@ TEST(RunFloat32Div) { m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1))); FOR_FLOAT32_INPUTS(i) { - FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i / *j, m.Call(*i, *j)); } + FOR_FLOAT32_INPUTS(j) { + CHECK_FLOAT_EQ(base::Divide(*i, *j), m.Call(*i, *j)); + } } } @@ -3725,7 +3733,9 @@ TEST(RunFloat64Div) { m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1))); FOR_FLOAT64_INPUTS(i) { - FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(*i / *j, m.Call(*i, *j)); } + FOR_FLOAT64_INPUTS(j) { + CHECK_DOUBLE_EQ(base::Divide(*i, *j), m.Call(*i, *j)); + } } } @@ -4056,7 +4066,9 @@ TEST(RunFloat32DivP) { bt.AddReturn(m.Float32Div(bt.param0, bt.param1)); FOR_FLOAT32_INPUTS(pl) { - FOR_FLOAT32_INPUTS(pr) { CHECK_FLOAT_EQ(*pl / *pr, bt.call(*pl, *pr)); } + FOR_FLOAT32_INPUTS(pr) { + CHECK_FLOAT_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr)); + } } } @@ -4068,7 +4080,9 @@ TEST(RunFloat64DivP) { bt.AddReturn(m.Float64Div(bt.param0, bt.param1)); FOR_FLOAT64_INPUTS(pl) { - FOR_FLOAT64_INPUTS(pr) { CHECK_DOUBLE_EQ(*pl / *pr, bt.call(*pl, *pr)); } + FOR_FLOAT64_INPUTS(pr) { + CHECK_DOUBLE_EQ(base::Divide(*pl, *pr), bt.call(*pl, *pr)); + } } } @@ -4714,7 +4728,7 @@ TEST(RunRefDiamond) { const int magic = 99644; Handle<String> rexpected = CcTest::i_isolate()->factory()->InternalizeUtf8String("A"); - String* buffer; + String buffer; RawMachineLabel blocka, blockb, end; Node* k1 = m.StringConstant("A"); @@ -4743,7 +4757,7 @@ TEST(RunDoubleRefDiamond) { double dconstant = 99.99; Handle<String> rexpected = CcTest::i_isolate()->factory()->InternalizeUtf8String("AX"); - String* rbuffer; + String rbuffer; RawMachineLabel blocka, blockb, end; Node* d1 = m.Float64Constant(dconstant); @@ -4778,7 +4792,7 @@ TEST(RunDoubleRefDoubleDiamond) { double dconstant = 99.997; Handle<String> rexpected = CcTest::i_isolate()->factory()->InternalizeUtf8String("AD"); - String* rbuffer; + String rbuffer; RawMachineLabel blocka, blockb, mid, blockd, blocke, end; Node* d1 = m.Float64Constant(dconstant); @@ -5250,7 +5264,7 @@ TEST(RunSpillConstantsAndParameters) { Node* accs[kInputSize]; Node* acc = m.Int32Constant(0); for (int i = 0; i < kInputSize; i++) { - csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i)); + csts[i] = m.Int32Constant(base::AddWithWraparound(kBase, i)); } for (int i = 0; i < kInputSize; i++) { acc = m.Int32Add(acc, csts[i]); @@ -5262,9 +5276,9 @@ TEST(RunSpillConstantsAndParameters) { m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1)))); FOR_INT32_INPUTS(i) { FOR_INT32_INPUTS(j) { - int32_t expected = *i + *j; + int32_t expected = base::AddWithWraparound(*i, *j); for (int k = 0; k < kInputSize; k++) { - expected += kBase + k; + expected = base::AddWithWraparound(expected, kBase + k); } CHECK_EQ(expected, m.Call(*i, *j)); expected = 0; @@ -5278,7 +5292,7 @@ TEST(RunSpillConstantsAndParameters) { TEST(RunNewSpaceConstantsInPhi) { - RawMachineAssemblerTester<Object*> m(MachineType::Int32()); + RawMachineAssemblerTester<Object> m(MachineType::Int32()); Isolate* isolate = CcTest::i_isolate(); Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2); @@ -6238,17 +6252,15 @@ int32_t foo0() { return kMagicFoo0; } int32_t foo1(int32_t x) { return x; } +int32_t foo2(int32_t x, int32_t y) { return base::SubWithWraparound(x, y); } -int32_t foo2(int32_t x, int32_t y) { return x - y; } - - -int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f, - int32_t g, int32_t h) { +uint32_t foo8(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, + uint32_t f, uint32_t g, uint32_t h) { return a + b + c + d + e + f + g + h; } -int32_t foo9(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f, - int32_t g, int32_t h, int32_t i) { +uint32_t foo9(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, + uint32_t f, uint32_t g, uint32_t h, uint32_t i) { return a + b + c + d + e + f + g + h + i; } @@ -6289,7 +6301,7 @@ TEST(RunCallCFunction2) { int32_t const x = *i; FOR_INT32_INPUTS(j) { int32_t const y = *j; - CHECK_EQ(x - y, m.Call(x, y)); + CHECK_EQ(base::SubWithWraparound(x, y), m.Call(x, y)); } } } @@ -6307,7 +6319,7 @@ TEST(RunCallCFunction8) { function, param, param, param, param, param, param, param, param)); FOR_INT32_INPUTS(i) { int32_t const x = *i; - CHECK_EQ(x * 8, m.Call(x)); + CHECK_EQ(base::MulWithWraparound(x, 8), m.Call(x)); } } @@ -6331,7 +6343,8 @@ TEST(RunCallCFunction9) { m.Int32Add(param, m.Int32Constant(8)))); FOR_INT32_INPUTS(i) { int32_t const x = *i; - CHECK_EQ(x * 9 + 36, m.Call(x)); + CHECK_EQ(base::AddWithWraparound(base::MulWithWraparound(x, 9), 36), + m.Call(x)); } } #endif // USE_SIMULATOR diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc index 2ddaa1bc07..19c6abb8fc 100644 --- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc +++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc @@ -5,7 +5,7 @@ #include <vector> #include "src/assembler.h" -#include "src/codegen.h" +#include "src/base/overflowing-math.h" #include "src/compiler/linkage.h" #include "src/compiler/raw-machine-assembler.h" #include "src/machine-type.h" @@ -1061,7 +1061,7 @@ void MixedParamTest(int start) { Handle<Code> wrapper = Handle<Code>::null(); int32_t expected_ret; char bytes[kDoubleSize]; - V8_ALIGNED(8) char output[kDoubleSize]; + alignas(8) char output[kDoubleSize]; int expected_size = 0; CSignatureOf<int32_t> csig; { @@ -1101,7 +1101,8 @@ void MixedParamTest(int start) { CHECK_NOT_NULL(konst); inputs[input_count++] = konst; - constant += 0x1010101010101010; + const int64_t kIncrement = 0x1010101010101010; + constant = base::AddWithWraparound(constant, kIncrement); } Node* call = raw.CallN(desc, input_count, inputs); diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc index 3bbab4265f..24080bc573 100644 --- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc +++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc @@ -4,6 +4,7 @@ #include "src/assembler-inl.h" #include "src/code-stub-assembler.h" +#include "src/macro-assembler.h" #include "test/cctest/cctest.h" #include "test/cctest/compiler/code-assembler-tester.h" diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc deleted file mode 100644 index 9c76f22b99..0000000000 --- a/deps/v8/test/cctest/compiler/test-run-stubs.cc +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/bootstrapper.h" -#include "src/callable.h" -#include "src/code-stubs.h" -#include "src/compiler/common-operator.h" -#include "src/compiler/graph.h" -#include "src/compiler/js-graph.h" -#include "src/compiler/js-operator.h" -#include "src/compiler/linkage.h" -#include "src/compiler/machine-operator.h" -#include "src/compiler/pipeline.h" -#include "src/objects-inl.h" -#include "src/objects/js-array-inl.h" -#include "src/optimized-compilation-info.h" -#include "test/cctest/compiler/function-tester.h" - -namespace v8 { -namespace internal { -namespace compiler { - -class StubTester { - public: - StubTester(Zone* zone, CodeStub* stub) - : zone_(zone), - info_(ArrayVector("test"), zone, Code::STUB), - interface_descriptor_(stub->GetCallInterfaceDescriptor()), - descriptor_(Linkage::GetStubCallDescriptor( - zone, interface_descriptor_, stub->GetStackParameterCount(), - CallDescriptor::kNoFlags, Operator::kNoProperties)), - graph_(zone_), - common_(zone_), - tester_(InitializeFunctionTester(stub->GetCode()), - GetParameterCountWithContext()) {} - - StubTester(Isolate* isolate, Zone* zone, Builtins::Name name) - : zone_(zone), - info_(ArrayVector("test"), zone, Code::STUB), - interface_descriptor_( - Builtins::CallableFor(isolate, name).descriptor()), - descriptor_(Linkage::GetStubCallDescriptor( - zone, interface_descriptor_, - interface_descriptor_.GetStackParameterCount(), - CallDescriptor::kNoFlags, Operator::kNoProperties)), - graph_(zone_), - common_(zone_), - tester_(InitializeFunctionTester( - Handle<Code>(isolate->builtins()->builtin(name), isolate)), - GetParameterCountWithContext()) {} - - template <typename... Args> - Handle<Object> Call(Args... args) { - DCHECK_EQ(interface_descriptor_.GetParameterCount(), sizeof...(args)); - MaybeHandle<Object> result = - tester_ - .Call(args..., - Handle<HeapObject>(tester_.function->context(), ft().isolate)) - .ToHandleChecked(); - return result.ToHandleChecked(); - } - - FunctionTester& ft() { return tester_; } - - private: - Graph* InitializeFunctionTester(Handle<Code> stub) { - // Add target, effect and control. - int node_count = GetParameterCountWithContext() + 3; - // Add extra inputs for the JSFunction parameter and the receiver (which for - // the tester is always undefined) to the start node. - Node* start = - graph_.NewNode(common_.Start(GetParameterCountWithContext() + 2)); - Node** node_array = zone_->NewArray<Node*>(node_count); - node_array[0] = graph_.NewNode(common_.HeapConstant(stub)); - for (int i = 0; i < GetParameterCountWithContext(); ++i) { - CHECK(IsAnyTagged(descriptor_->GetParameterType(i).representation())); - node_array[i + 1] = graph_.NewNode(common_.Parameter(i + 1), start); - } - node_array[node_count - 2] = start; - node_array[node_count - 1] = start; - Node* call = - graph_.NewNode(common_.Call(descriptor_), node_count, &node_array[0]); - - Node* zero = graph_.NewNode(common_.Int32Constant(0)); - Node* ret = graph_.NewNode(common_.Return(), zero, call, call, start); - Node* end = graph_.NewNode(common_.End(1), ret); - graph_.SetStart(start); - graph_.SetEnd(end); - return &graph_; - } - - int GetParameterCountWithContext() { - return interface_descriptor_.GetParameterCount() + 1; - } - - Zone* zone_; - OptimizedCompilationInfo info_; - CallInterfaceDescriptor interface_descriptor_; - CallDescriptor* descriptor_; - Graph graph_; - CommonOperatorBuilder common_; - FunctionTester tester_; -}; - -TEST(RunStringWrapperLengthStub) { - HandleAndZoneScope scope; - Isolate* isolate = scope.main_isolate(); - Zone* zone = scope.main_zone(); - - StubTester tester(isolate, zone, Builtins::kLoadIC_StringWrapperLength); - - // Actuall call through to the stub, verifying its result. - const char* testString = "Und das Lamm schrie HURZ!"; - Handle<Object> receiverArg = - Object::ToObject(isolate, tester.ft().Val(testString)).ToHandleChecked(); - Handle<Object> nameArg = tester.ft().Val("length"); - Handle<Object> slot = tester.ft().Val(0.0); - Handle<Object> vector = tester.ft().Val(0.0); - Handle<Object> result = tester.Call(receiverArg, nameArg, slot, vector); - CHECK_EQ(static_cast<int>(strlen(testString)), Smi::ToInt(*result)); -} - -TEST(RunArrayExtractStubSimple) { - HandleAndZoneScope scope; - Isolate* isolate = scope.main_isolate(); - Zone* zone = scope.main_zone(); - - StubTester tester(isolate, zone, Builtins::kExtractFastJSArray); - - // Actuall call through to the stub, verifying its result. - Handle<JSArray> source_array = isolate->factory()->NewJSArray( - PACKED_ELEMENTS, 5, 10, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - static_cast<FixedArray*>(source_array->elements())->set(0, Smi::FromInt(5)); - static_cast<FixedArray*>(source_array->elements())->set(1, Smi::FromInt(4)); - static_cast<FixedArray*>(source_array->elements())->set(2, Smi::FromInt(3)); - static_cast<FixedArray*>(source_array->elements())->set(3, Smi::FromInt(2)); - static_cast<FixedArray*>(source_array->elements())->set(4, Smi::FromInt(1)); - Handle<JSArray> result = Handle<JSArray>::cast( - tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate), - Handle<Smi>(Smi::FromInt(5), isolate))); - CHECK_NE(*source_array, *result); - CHECK_EQ(result->GetElementsKind(), PACKED_ELEMENTS); - CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(0), - Smi::FromInt(5)); - CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(1), - Smi::FromInt(4)); - CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(2), - Smi::FromInt(3)); - CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(3), - Smi::FromInt(2)); - CHECK_EQ(static_cast<FixedArray*>(result->elements())->get(4), - Smi::FromInt(1)); -} - -TEST(RunArrayExtractDoubleStubSimple) { - HandleAndZoneScope scope; - Isolate* isolate = scope.main_isolate(); - Zone* zone = scope.main_zone(); - - StubTester tester(isolate, zone, Builtins::kExtractFastJSArray); - - // Actuall call through to the stub, verifying its result. - Handle<JSArray> source_array = isolate->factory()->NewJSArray( - PACKED_DOUBLE_ELEMENTS, 5, 10, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - static_cast<FixedDoubleArray*>(source_array->elements())->set(0, 5); - static_cast<FixedDoubleArray*>(source_array->elements())->set(1, 4); - static_cast<FixedDoubleArray*>(source_array->elements())->set(2, 3); - static_cast<FixedDoubleArray*>(source_array->elements())->set(3, 2); - static_cast<FixedDoubleArray*>(source_array->elements())->set(4, 1); - Handle<JSArray> result = Handle<JSArray>::cast( - tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate), - Handle<Smi>(Smi::FromInt(5), isolate))); - CHECK_NE(*source_array, *result); - CHECK_EQ(result->GetElementsKind(), PACKED_DOUBLE_ELEMENTS); - CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(0), - 5); - CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(1), - 4); - CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(2), - 3); - CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(3), - 2); - CHECK_EQ(static_cast<FixedDoubleArray*>(result->elements())->get_scalar(4), - 1); -} - -TEST(RunArrayExtractStubTooBigForNewSpace) { - HandleAndZoneScope scope; - Isolate* isolate = scope.main_isolate(); - Zone* zone = scope.main_zone(); - - StubTester tester(isolate, zone, Builtins::kExtractFastJSArray); - - // Actuall call through to the stub, verifying its result. - Handle<JSArray> source_array = isolate->factory()->NewJSArray( - PACKED_ELEMENTS, 500000, 500000, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); - for (int i = 0; i < 500000; ++i) { - static_cast<FixedArray*>(source_array->elements())->set(i, Smi::FromInt(i)); - } - Handle<JSArray> result = Handle<JSArray>::cast( - tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate), - Handle<Smi>(Smi::FromInt(500000), isolate))); - CHECK_NE(*source_array, *result); - CHECK_EQ(result->GetElementsKind(), PACKED_ELEMENTS); - for (int i = 0; i < 500000; ++i) { - CHECK_EQ(static_cast<FixedArray*>(source_array->elements())->get(i), - static_cast<FixedArray*>(result->elements())->get(i)); - } -} - -TEST(RunArrayExtractDoubleStubTooBigForNewSpace) { - HandleAndZoneScope scope; - Isolate* isolate = scope.main_isolate(); - Zone* zone = scope.main_zone(); - - StubTester tester(isolate, zone, Builtins::kExtractFastJSArray); - - // Actuall call through to the stub, verifying its result. - Handle<JSArray> source_array = isolate->factory()->NewJSArray( - PACKED_DOUBLE_ELEMENTS, 500000, 500000, - INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, TENURED); - for (int i = 0; i < 500000; ++i) { - static_cast<FixedDoubleArray*>(source_array->elements())->set(i, i); - } - Handle<JSArray> result = Handle<JSArray>::cast( - tester.Call(source_array, Handle<Smi>(Smi::FromInt(0), isolate), - Handle<Smi>(Smi::FromInt(500000), isolate))); - CHECK_NE(*source_array, *result); - CHECK_EQ(result->GetElementsKind(), PACKED_DOUBLE_ELEMENTS); - for (int i = 0; i < 500000; ++i) { - CHECK_EQ( - static_cast<FixedDoubleArray*>(source_array->elements())->get_scalar(i), - static_cast<FixedDoubleArray*>(result->elements())->get_scalar(i)); - } -} - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc index b57b4fcbac..b0ca000a02 100644 --- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc +++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc @@ -5,6 +5,7 @@ #include "src/assembler-inl.h" #include "src/base/utils/random-number-generator.h" #include "src/code-stub-assembler.h" +#include "src/macro-assembler.h" #include "test/cctest/cctest.h" #include "test/cctest/compiler/code-assembler-tester.h" diff --git a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc index e50fcd90cd..5ecc501c2e 100644 --- a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc +++ b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc @@ -9,7 +9,6 @@ #include "src/flags.h" #include "src/objects-inl.h" #include "src/objects.h" -#include "src/unicode-cache.h" #include "test/cctest/compiler/function-tester.h" namespace v8 { diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h index 8e652ec3b5..45750e7e28 100644 --- a/deps/v8/test/cctest/compiler/value-helper.h +++ b/deps/v8/test/cctest/compiler/value-helper.h @@ -48,7 +48,7 @@ class ValueHelper { CHECK_EQ(expected, OpParameter<int32_t>(node->op())); } - void CheckHeapConstant(HeapObject* expected, Node* node) { + void CheckHeapConstant(HeapObject expected, Node* node) { CHECK_EQ(IrOpcode::kHeapConstant, node->opcode()); CHECK_EQ(expected, *HeapConstantOf(node->op())); } |