summaryrefslogtreecommitdiff
path: root/chromium/v8/src/compiler
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/compiler
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/compiler')
-rw-r--r--chromium/v8/src/compiler/access-builder.cc38
-rw-r--r--chromium/v8/src/compiler/access-builder.h12
-rw-r--r--chromium/v8/src/compiler/access-info.cc8
-rw-r--r--chromium/v8/src/compiler/allocation-builder-inl.h13
-rw-r--r--chromium/v8/src/compiler/allocation-builder.h5
-rw-r--r--chromium/v8/src/compiler/backend/arm/code-generator-arm.cc150
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h16
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc16
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc53
-rw-r--r--chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc123
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h29
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc29
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc86
-rw-r--r--chromium/v8/src/compiler/backend/code-generator.cc47
-rw-r--r--chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc31
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h15
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc15
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc68
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector-impl.h2
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector.cc97
-rw-r--r--chromium/v8/src/compiler/backend/instruction.h2
-rw-r--r--chromium/v8/src/compiler/backend/mips/code-generator-mips.cc102
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h19
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc19
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc39
-rw-r--r--chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc102
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h19
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc19
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc39
-rw-r--r--chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc683
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h93
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc93
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc307
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator.cc47
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator.h11
-rw-r--r--chromium/v8/src/compiler/backend/s390/code-generator-s390.cc133
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h31
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc31
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc47
-rw-r--r--chromium/v8/src/compiler/backend/x64/code-generator-x64.cc194
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h19
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc19
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc53
-rw-r--r--chromium/v8/src/compiler/basic-block-instrumentor.cc74
-rw-r--r--chromium/v8/src/compiler/basic-block-instrumentor.h6
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.cc319
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.h1
-rw-r--r--chromium/v8/src/compiler/code-assembler.cc5
-rw-r--r--chromium/v8/src/compiler/code-assembler.h3
-rw-r--r--chromium/v8/src/compiler/effect-control-linearizer.cc16
-rw-r--r--chromium/v8/src/compiler/globals.h13
-rw-r--r--chromium/v8/src/compiler/graph-assembler.cc106
-rw-r--r--chromium/v8/src/compiler/graph-assembler.h12
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.cc121
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.h26
-rw-r--r--chromium/v8/src/compiler/js-call-reducer.cc200
-rw-r--r--chromium/v8/src/compiler/js-create-lowering.cc31
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.cc739
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.h18
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.cc12
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.h29
-rw-r--r--chromium/v8/src/compiler/js-heap-copy-reducer.cc44
-rw-r--r--chromium/v8/src/compiler/js-inlining.cc7
-rw-r--r--chromium/v8/src/compiler/js-native-context-specialization.cc17
-rw-r--r--chromium/v8/src/compiler/js-operator.cc206
-rw-r--r--chromium/v8/src/compiler/js-operator.h169
-rw-r--r--chromium/v8/src/compiler/js-type-hint-lowering.cc27
-rw-r--r--chromium/v8/src/compiler/js-type-hint-lowering.h1
-rw-r--r--chromium/v8/src/compiler/js-typed-lowering.cc125
-rw-r--r--chromium/v8/src/compiler/linkage.cc17
-rw-r--r--chromium/v8/src/compiler/linkage.h21
-rw-r--r--chromium/v8/src/compiler/load-elimination.h2
-rw-r--r--chromium/v8/src/compiler/machine-graph-verifier.cc46
-rw-r--r--chromium/v8/src/compiler/machine-graph.cc5
-rw-r--r--chromium/v8/src/compiler/machine-graph.h1
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.cc173
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.h1
-rw-r--r--chromium/v8/src/compiler/machine-operator.cc25
-rw-r--r--chromium/v8/src/compiler/machine-operator.h25
-rw-r--r--chromium/v8/src/compiler/memory-lowering.h1
-rw-r--r--chromium/v8/src/compiler/node-matchers.h2
-rw-r--r--chromium/v8/src/compiler/node.h10
-rw-r--r--chromium/v8/src/compiler/opcodes.cc2
-rw-r--r--chromium/v8/src/compiler/opcodes.h119
-rw-r--r--chromium/v8/src/compiler/operator-properties.cc2
-rw-r--r--chromium/v8/src/compiler/pipeline-statistics.cc4
-rw-r--r--chromium/v8/src/compiler/pipeline.cc365
-rw-r--r--chromium/v8/src/compiler/representation-change.cc68
-rw-r--r--chromium/v8/src/compiler/representation-change.h9
-rw-r--r--chromium/v8/src/compiler/schedule.cc2
-rw-r--r--chromium/v8/src/compiler/scheduler.cc4
-rw-r--r--chromium/v8/src/compiler/simd-scalar-lowering.cc95
-rw-r--r--chromium/v8/src/compiler/simd-scalar-lowering.h1
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.cc428
-rw-r--r--chromium/v8/src/compiler/simplified-operator.cc44
-rw-r--r--chromium/v8/src/compiler/simplified-operator.h2
-rw-r--r--chromium/v8/src/compiler/typed-optimization.cc2
-rw-r--r--chromium/v8/src/compiler/typer.cc81
-rw-r--r--chromium/v8/src/compiler/types.cc4
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.cc456
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.h24
101 files changed, 5077 insertions, 2265 deletions
diff --git a/chromium/v8/src/compiler/access-builder.cc b/chromium/v8/src/compiler/access-builder.cc
index e19067f3c1c..656b250a1c2 100644
--- a/chromium/v8/src/compiler/access-builder.cc
+++ b/chromium/v8/src/compiler/access-builder.cc
@@ -17,6 +17,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/source-text-module.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -530,6 +531,26 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
}
// static
+FieldAccess AccessBuilder::ForSloppyArgumentsElementsContext() {
+ FieldAccess access = {
+ kTaggedBase, SloppyArgumentsElements::kContextOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForSloppyArgumentsElementsArguments() {
+ FieldAccess access = {
+ kTaggedBase, SloppyArgumentsElements::kArgumentsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForPropertyArrayLengthAndHash() {
FieldAccess access = {
kTaggedBase, PropertyArray::kLengthAndHashOffset,
@@ -867,6 +888,14 @@ ElementAccess AccessBuilder::ForFixedArrayElement() {
}
// static
+ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
+ ElementAccess access = {
+ kTaggedBase, SloppyArgumentsElements::kMappedEntriesOffset, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
+ return access;
+}
+
+// statics
ElementAccess AccessBuilder::ForFixedArrayElement(
ElementsKind kind, LoadSensitivity load_sensitivity) {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
@@ -1139,6 +1168,15 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForFeedbackCellValue() {
+ FieldAccess access = {kTaggedBase, FeedbackCell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kFullWriteBarrier};
+ return access;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/access-builder.h b/chromium/v8/src/compiler/access-builder.h
index 622dc1d76c2..9edd3272a19 100644
--- a/chromium/v8/src/compiler/access-builder.h
+++ b/chromium/v8/src/compiler/access-builder.h
@@ -179,6 +179,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray::length() field.
static FieldAccess ForFixedArrayLength();
+ // Provides access to SloppyArgumentsElements::context() field.
+ static FieldAccess ForSloppyArgumentsElementsContext();
+
+ // Provides access to SloppyArgumentsElements::arguments() field.
+ static FieldAccess ForSloppyArgumentsElementsArguments();
+
// Provides access to PropertyArray::length() field.
static FieldAccess ForPropertyArrayLengthAndHash();
@@ -283,6 +289,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
ElementsKind kind,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ // Provides access to SloppyArgumentsElements elements.
+ static ElementAccess ForSloppyArgumentsElementsMappedEntry();
+
// Provides access to stack arguments
static ElementAccess ForStackArgument();
@@ -318,6 +327,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryNextEnumerationIndex();
static FieldAccess ForDictionaryObjectHashIndex();
+ // Provides access to a FeedbackCell's value.
+ static FieldAccess ForFeedbackCellValue();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/chromium/v8/src/compiler/access-info.cc b/chromium/v8/src/compiler/access-info.cc
index 9a2a56cd8b1..db195c1bf9e 100644
--- a/chromium/v8/src/compiler/access-info.cc
+++ b/chromium/v8/src/compiler/access-info.cc
@@ -36,7 +36,7 @@ bool CanInlinePropertyAccess(Handle<Map> map) {
if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
return map->IsJSObjectMap() && !map->is_dictionary_map() &&
!map->has_named_interceptor() &&
- // TODO(verwaest): Whitelist contexts to which we have access.
+ // TODO(verwaest): Allowlist contexts to which we have access.
!map->is_access_check_needed();
}
@@ -505,8 +505,10 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MaybeHandle<JSObject> holder;
while (true) {
// Lookup the named property on the {map}.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- InternalIndex const number = descriptors->Search(*name, *map);
+ Handle<DescriptorArray> descriptors(
+ map->synchronized_instance_descriptors(), isolate());
+ InternalIndex const number =
+ descriptors->Search(*name, *map, broker()->is_concurrent_inlining());
if (number.is_found()) {
PropertyDetails const details = descriptors->GetDetails(number);
if (access_mode == AccessMode::kStore ||
diff --git a/chromium/v8/src/compiler/allocation-builder-inl.h b/chromium/v8/src/compiler/allocation-builder-inl.h
index 26fbe503c36..2b6109f49e2 100644
--- a/chromium/v8/src/compiler/allocation-builder-inl.h
+++ b/chromium/v8/src/compiler/allocation-builder-inl.h
@@ -5,10 +5,11 @@
#ifndef V8_COMPILER_ALLOCATION_BUILDER_INL_H_
#define V8_COMPILER_ALLOCATION_BUILDER_INL_H_
-#include "src/compiler/allocation-builder.h"
-
#include "src/compiler/access-builder.h"
+#include "src/compiler/allocation-builder.h"
#include "src/objects/map-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -40,6 +41,14 @@ void AllocationBuilder::AllocateArray(int length, MapRef map,
Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
+void AllocationBuilder::AllocateSloppyArgumentElements(
+ int length, MapRef map, AllocationType allocation) {
+ int size = SloppyArgumentsElements::SizeFor(length);
+ Allocate(size, allocation, Type::OtherInternal());
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/allocation-builder.h b/chromium/v8/src/compiler/allocation-builder.h
index 040dd014051..709146950c6 100644
--- a/chromium/v8/src/compiler/allocation-builder.h
+++ b/chromium/v8/src/compiler/allocation-builder.h
@@ -55,6 +55,11 @@ class AllocationBuilder final {
inline void AllocateArray(int length, MapRef map,
AllocationType allocation = AllocationType::kYoung);
+ // Compound allocation of a SloppyArgumentsElements
+ inline void AllocateSloppyArgumentElements(
+ int length, MapRef map,
+ AllocationType allocation = AllocationType::kYoung);
+
// Compound store of a constant into a field.
void Store(const FieldAccess& access, const ObjectRef& value) {
Store(access, jsgraph()->Constant(value));
diff --git a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
index d453cf0188d..f50c0c858a7 100644
--- a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -1456,7 +1456,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmVrintmF32: {
CpuFeatureScope scope(tasm(), ARMv8);
- __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ vrintm(NeonS32, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ } else {
+ __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ }
break;
}
case kArmVrintmF64: {
@@ -1466,7 +1471,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintpF32: {
CpuFeatureScope scope(tasm(), ARMv8);
- __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ vrintp(NeonS32, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ } else {
+ __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ }
break;
}
case kArmVrintpF64: {
@@ -1476,7 +1486,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintzF32: {
CpuFeatureScope scope(tasm(), ARMv8);
- __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ vrintz(NeonS32, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ } else {
+ __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ }
break;
}
case kArmVrintzF64: {
@@ -1960,43 +1975,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmF64x2Lt: {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
- __ mov(scratch, Operand(-1), LeaveCC, lt);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, cs);
+ __ mov(scratch, Operand(-1), LeaveCC, mi);
__ vmov(i.OutputSimd128Register().low(), scratch, scratch);
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).high(),
i.InputSimd128Register(1).high());
- __ mov(scratch, Operand(-1), LeaveCC, lt);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, cs);
+ __ mov(scratch, Operand(-1), LeaveCC, mi);
__ vmov(i.OutputSimd128Register().high(), scratch, scratch);
break;
}
case kArmF64x2Le: {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
- __ mov(scratch, Operand(-1), LeaveCC, le);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, hi);
+ __ mov(scratch, Operand(-1), LeaveCC, ls);
__ vmov(i.OutputSimd128Register().low(), scratch, scratch);
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).high(),
i.InputSimd128Register(1).high());
- __ mov(scratch, Operand(-1), LeaveCC, le);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, hi);
+ __ mov(scratch, Operand(-1), LeaveCC, ls);
__ vmov(i.OutputSimd128Register().high(), scratch, scratch);
break;
}
+ case kArmF64x2Pmin: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_EQ(dst, lhs);
+
+ // Move rhs only when rhs is strictly greater (mi).
+ __ VFPCompareAndSetFlags(rhs.low(), lhs.low());
+ __ vmov(dst.low(), rhs.low(), mi);
+ __ VFPCompareAndSetFlags(rhs.high(), lhs.high());
+ __ vmov(dst.high(), rhs.high(), mi);
+ break;
+ }
+ case kArmF64x2Pmax: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_EQ(dst, lhs);
+
+ // Move rhs only when rhs is strictly greater (mi).
+ __ VFPCompareAndSetFlags(rhs.low(), lhs.low());
+ __ vmov(dst.low(), rhs.low(), gt);
+ __ VFPCompareAndSetFlags(rhs.high(), lhs.high());
+ __ vmov(dst.high(), rhs.high(), gt);
+ break;
+ }
case kArmI64x2SplatI32Pair: {
Simd128Register dst = i.OutputSimd128Register();
__ vdup(Neon32, dst, i.InputRegister(0));
@@ -2068,7 +2101,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI64x2Neg: {
Simd128Register dst = i.OutputSimd128Register();
- __ vmov(dst, static_cast<uint64_t>(0));
+ __ vmov(dst, uint64_t{0});
__ vqsub(NeonS64, dst, dst, i.InputSimd128Register(0));
break;
}
@@ -2220,6 +2253,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kArmF32x4Pmin: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_NE(dst, lhs);
+ DCHECK_NE(dst, rhs);
+
+ // f32x4.pmin(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.lt(rhs, lhs))
+ // = v128.bitselect(rhs, lhs, f32x4.gt(lhs, rhs))
+ __ vcgt(dst, lhs, rhs);
+ __ vbsl(dst, rhs, lhs);
+ break;
+ }
+ case kArmF32x4Pmax: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_NE(dst, lhs);
+ DCHECK_NE(dst, rhs);
+
+ // f32x4.pmax(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.gt(rhs, lhs))
+ __ vcgt(dst, rhs, lhs);
+ __ vbsl(dst, rhs, lhs);
+ break;
+ }
case kArmI32x4Splat: {
__ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2361,8 +2421,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshr(NeonS32, tmp2, src, 31);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- __ vmov(mask.low(), Double((uint64_t)0x0000'0002'0000'0001));
- __ vmov(mask.high(), Double((uint64_t)0x0000'0008'0000'0004));
+ __ vmov(mask.low(), Double(uint64_t{0x0000'0002'0000'0001}));
+ __ vmov(mask.high(), Double(uint64_t{0x0000'0008'0000'0004}));
__ vand(tmp2, mask, tmp2);
__ vpadd(Neon32, tmp2.low(), tmp2.low(), tmp2.high());
__ vpadd(Neon32, tmp2.low(), tmp2.low(), kDoubleRegZero);
@@ -2538,8 +2598,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshr(NeonS16, tmp2, src, 15);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- __ vmov(mask.low(), Double((uint64_t)0x0008'0004'0002'0001));
- __ vmov(mask.high(), Double((uint64_t)0x0080'0040'0020'0010));
+ __ vmov(mask.low(), Double(uint64_t{0x0008'0004'0002'0001}));
+ __ vmov(mask.high(), Double(uint64_t{0x0080'0040'0020'0010}));
__ vand(tmp2, mask, tmp2);
__ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
__ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
@@ -2692,8 +2752,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshr(NeonS8, tmp2, src, 7);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- __ vmov(mask.low(), Double((uint64_t)0x8040'2010'0804'0201));
- __ vmov(mask.high(), Double((uint64_t)0x8040'2010'0804'0201));
+ __ vmov(mask.low(), Double(uint64_t{0x8040'2010'0804'0201}));
+ __ vmov(mask.high(), Double(uint64_t{0x8040'2010'0804'0201}));
__ vand(tmp2, mask, tmp2);
__ vext(mask, tmp2, tmp2, 8);
__ vzip(Neon8, mask, tmp2);
@@ -3028,7 +3088,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrev16(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmS1x4AnyTrue: {
+ case kArmV32x4AnyTrue:
+ case kArmV16x8AnyTrue:
+ case kArmV8x16AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3039,7 +3101,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS1x4AllTrue: {
+ case kArmV32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3050,19 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS1x8AnyTrue: {
- const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
- DwVfpRegister scratch = temps.AcquireD();
- __ vpmax(NeonU16, scratch, src.low(), src.high());
- __ vpmax(NeonU16, scratch, scratch, scratch);
- __ vpmax(NeonU16, scratch, scratch, scratch);
- __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
- __ cmp(i.OutputRegister(), Operand(0));
- __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
- break;
- }
- case kArmS1x8AllTrue: {
+ case kArmV16x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3074,23 +3124,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS1x16AnyTrue: {
- const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
- QwNeonRegister q_scratch = temps.AcquireQ();
- DwVfpRegister d_scratch = q_scratch.low();
- __ vpmax(NeonU8, d_scratch, src.low(), src.high());
- __ vpmax(NeonU8, d_scratch, d_scratch, d_scratch);
- // vtst to detect any bits in the bottom 32 bits of d_scratch.
- // This saves an instruction vs. the naive sequence of vpmax.
- // kDoubleRegZero is not changed, since it is 0.
- __ vtst(Neon32, q_scratch, q_scratch, q_scratch);
- __ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
- __ cmp(i.OutputRegister(), Operand(0));
- __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
- break;
- }
- case kArmS1x16AllTrue: {
+ case kArmV8x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
index c6365bf7a50..39ed658fc4b 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -144,6 +144,8 @@ namespace compiler {
V(ArmF64x2Ne) \
V(ArmF64x2Lt) \
V(ArmF64x2Le) \
+ V(ArmF64x2Pmin) \
+ V(ArmF64x2Pmax) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
@@ -165,6 +167,8 @@ namespace compiler {
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
V(ArmF32x4Le) \
+ V(ArmF32x4Pmin) \
+ V(ArmF32x4Pmax) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
V(ArmI64x2Neg) \
@@ -304,12 +308,12 @@ namespace compiler {
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
- V(ArmS1x4AnyTrue) \
- V(ArmS1x4AllTrue) \
- V(ArmS1x8AnyTrue) \
- V(ArmS1x8AllTrue) \
- V(ArmS1x16AnyTrue) \
- V(ArmS1x16AllTrue) \
+ V(ArmV32x4AnyTrue) \
+ V(ArmV32x4AllTrue) \
+ V(ArmV16x8AnyTrue) \
+ V(ArmV16x8AllTrue) \
+ V(ArmV8x16AnyTrue) \
+ V(ArmV8x16AllTrue) \
V(ArmS8x16LoadSplat) \
V(ArmS16x8LoadSplat) \
V(ArmS32x4LoadSplat) \
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 8c09acd6df8..196aa1ce6c0 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -124,6 +124,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF64x2Ne:
case kArmF64x2Lt:
case kArmF64x2Le:
+ case kArmF64x2Pmin:
+ case kArmF64x2Pmax:
case kArmF32x4Splat:
case kArmF32x4ExtractLane:
case kArmF32x4ReplaceLane:
@@ -145,6 +147,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4Ne:
case kArmF32x4Lt:
case kArmF32x4Le:
+ case kArmF32x4Pmin:
+ case kArmF32x4Pmax:
case kArmI64x2SplatI32Pair:
case kArmI64x2ReplaceLaneI32Pair:
case kArmI64x2Neg:
@@ -284,12 +288,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS8x8Reverse:
case kArmS8x4Reverse:
case kArmS8x2Reverse:
- case kArmS1x4AnyTrue:
- case kArmS1x4AllTrue:
- case kArmS1x8AnyTrue:
- case kArmS1x8AllTrue:
- case kArmS1x16AnyTrue:
- case kArmS1x16AllTrue:
+ case kArmV32x4AnyTrue:
+ case kArmV32x4AllTrue:
+ case kArmV16x8AnyTrue:
+ case kArmV16x8AllTrue:
+ case kArmV8x16AnyTrue:
+ case kArmV8x16AllTrue:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 74658697b50..de0e7c4162c 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -1495,7 +1495,10 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
V(Float64RoundTruncate, kArmVrintzF64) \
V(Float64RoundTiesAway, kArmVrintaF64) \
V(Float32RoundTiesEven, kArmVrintnF32) \
- V(Float64RoundTiesEven, kArmVrintnF64)
+ V(Float64RoundTiesEven, kArmVrintnF64) \
+ V(F32x4Ceil, kArmVrintpF32) \
+ V(F32x4Floor, kArmVrintmF32) \
+ V(F32x4Trunc, kArmVrintzF32)
#define RRR_OP_LIST(V) \
V(Int32MulHigh, kArmSmmul) \
@@ -2525,12 +2528,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16Neg, kArmI8x16Neg) \
V(I8x16Abs, kArmI8x16Abs) \
V(S128Not, kArmS128Not) \
- V(S1x4AnyTrue, kArmS1x4AnyTrue) \
- V(S1x4AllTrue, kArmS1x4AllTrue) \
- V(S1x8AnyTrue, kArmS1x8AnyTrue) \
- V(S1x8AllTrue, kArmS1x8AllTrue) \
- V(S1x16AnyTrue, kArmS1x16AnyTrue) \
- V(S1x16AllTrue, kArmS1x16AllTrue)
+ V(V32x4AnyTrue, kArmV32x4AnyTrue) \
+ V(V32x4AllTrue, kArmV32x4AllTrue) \
+ V(V16x8AnyTrue, kArmV16x8AnyTrue) \
+ V(V16x8AllTrue, kArmV16x8AllTrue) \
+ V(V8x16AnyTrue, kArmV8x16AnyTrue) \
+ V(V8x16AllTrue, kArmV8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
@@ -2941,6 +2944,42 @@ void InstructionSelector::VisitI32x4BitMask(Node* node) {
VisitBitMask<kArmI32x4BitMask>(this, node);
}
+namespace {
+void VisitF32x4PminOrPmax(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ // Need all unique registers because we first compare the two inputs, then we
+ // need the inputs to remain unchanged for the bitselect later.
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitF64x2PminOrPMax(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitF32x4PminOrPmax(this, kArmF32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitF32x4PminOrPmax(this, kArmF32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitF64x2PminOrPMax(this, kArmF64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitF64x2PminOrPMax(this, kArmF64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 4cf19a5d802..d21440c35b3 100644
--- a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -502,8 +502,9 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- VRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1).gp(); \
+ UseScratchRegisterScope temps(tasm()); \
+ VRegister tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
__ And(shift, i.InputRegister32(1), mask); \
__ Dup(tmp.format(), shift); \
@@ -521,8 +522,9 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- VRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1).gp(); \
+ UseScratchRegisterScope temps(tasm()); \
+ VRegister tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
__ And(shift, i.InputRegister32(1), mask); \
__ Dup(tmp.format(), shift); \
@@ -1901,6 +1903,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
+ case kArm64F64x2Pmin: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ VRegister lhs = i.InputSimd128Register(0).V2D();
+ VRegister rhs = i.InputSimd128Register(1).V2D();
+ // f64x2.pmin(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f64x2.lt(rhs,lhs))
+ // = v128.bitselect(rhs, lhs, f64x2.gt(lhs,rhs))
+ __ Fcmgt(dst, lhs, rhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F64x2Pmax: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ VRegister lhs = i.InputSimd128Register(0).V2D();
+ VRegister rhs = i.InputSimd128Register(1).V2D();
+ // f64x2.pmax(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f64x2.gt(rhs, lhs))
+ __ Fcmgt(dst, rhs, lhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F64x2RoundUp:
+ __ Frintp(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ case kArm64F64x2RoundDown:
+ __ Frintm(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ case kArm64F64x2RoundTruncate:
+ __ Frintz(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ case kArm64F64x2RoundTiesEven:
+ __ Frintn(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -1953,6 +1992,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfms, Fmls, 4S);
+ case kArm64F32x4Pmin: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ VRegister lhs = i.InputSimd128Register(0).V4S();
+ VRegister rhs = i.InputSimd128Register(1).V4S();
+ // f32x4.pmin(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.lt(rhs, lhs))
+ // = v128.bitselect(rhs, lhs, f32x4.gt(lhs, rhs))
+ __ Fcmgt(dst, lhs, rhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F32x4Pmax: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ VRegister lhs = i.InputSimd128Register(0).V4S();
+ VRegister rhs = i.InputSimd128Register(1).V4S();
+ // f32x4.pmax(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.gt(rhs, lhs))
+ __ Fcmgt(dst, rhs, lhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F32x4RoundUp:
+ __ Frintp(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ case kArm64F32x4RoundDown:
+ __ Frintm(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ case kArm64F32x4RoundTruncate:
+ __ Frintz(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ case kArm64F32x4RoundTiesEven:
+ __ Frintn(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -2132,6 +2208,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V4S(), 0);
break;
}
+ case kArm64I32x4DotI16x8S: {
+ UseScratchRegisterScope scope(tasm());
+ VRegister lhs = i.InputSimd128Register(0);
+ VRegister rhs = i.InputSimd128Register(1);
+ VRegister tmp1 = scope.AcquireV(kFormat4S);
+ VRegister tmp2 = scope.AcquireV(kFormat4S);
+ __ Smull(tmp1, lhs.V4H(), rhs.V4H());
+ __ Smull2(tmp2, lhs.V8H(), rhs.V8H());
+ __ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
+ break;
+ }
case kArm64I16x8Splat: {
__ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
break;
@@ -2480,7 +2567,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
- case kArm64S1x2AllTrue: {
+ case kArm64V64x2AllTrue: {
UseScratchRegisterScope scope(tasm());
VRegister temp1 = scope.AcquireV(kFormat2D);
VRegister temp2 = scope.AcquireV(kFormatS);
@@ -2508,32 +2595,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8Load8x8S: {
- __ ld1(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64I16x8Load8x8U: {
- __ ld1(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64I32x4Load16x4S: {
- __ ld1(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64I32x4Load16x4U: {
- __ ld1(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64I64x2Load32x2S: {
- __ ld1(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
case kArm64I64x2Load32x2U: {
- __ ld1(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
@@ -2548,13 +2635,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; \
}
// for AnyTrue, the format does not matter, umaxv does not support 2D
- SIMD_REDUCE_OP_CASE(kArm64S1x2AnyTrue, Umaxv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H);
- SIMD_REDUCE_OP_CASE(kArm64S1x8AllTrue, Uminv, kFormatH, 8H);
- SIMD_REDUCE_OP_CASE(kArm64S1x16AnyTrue, Umaxv, kFormatB, 16B);
- SIMD_REDUCE_OP_CASE(kArm64S1x16AllTrue, Uminv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64V64x2AnyTrue, Umaxv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64V32x4AnyTrue, Umaxv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64V32x4AllTrue, Uminv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64V16x8AnyTrue, Umaxv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64V16x8AllTrue, Uminv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64V8x16AnyTrue, Umaxv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64V8x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
} // NOLINT(readability/fn_size)
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index a8e2b52c028..41f9d78550e 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -186,6 +186,12 @@ namespace compiler {
V(Arm64F64x2Le) \
V(Arm64F64x2Qfma) \
V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2RoundUp) \
+ V(Arm64F64x2RoundDown) \
+ V(Arm64F64x2RoundTruncate) \
+ V(Arm64F64x2RoundTiesEven) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -209,6 +215,12 @@ namespace compiler {
V(Arm64F32x4Le) \
V(Arm64F32x4Qfma) \
V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4RoundUp) \
+ V(Arm64F32x4RoundDown) \
+ V(Arm64F32x4RoundTruncate) \
+ V(Arm64F32x4RoundTiesEven) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
@@ -256,6 +268,7 @@ namespace compiler {
V(Arm64I32x4GeU) \
V(Arm64I32x4Abs) \
V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
@@ -361,14 +374,14 @@ namespace compiler {
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
- V(Arm64S1x2AnyTrue) \
- V(Arm64S1x2AllTrue) \
- V(Arm64S1x4AnyTrue) \
- V(Arm64S1x4AllTrue) \
- V(Arm64S1x8AnyTrue) \
- V(Arm64S1x8AllTrue) \
- V(Arm64S1x16AnyTrue) \
- V(Arm64S1x16AllTrue) \
+ V(Arm64V64x2AnyTrue) \
+ V(Arm64V64x2AllTrue) \
+ V(Arm64V32x4AnyTrue) \
+ V(Arm64V32x4AllTrue) \
+ V(Arm64V16x8AnyTrue) \
+ V(Arm64V16x8AllTrue) \
+ V(Arm64V8x16AnyTrue) \
+ V(Arm64V8x16AllTrue) \
V(Arm64S8x16LoadSplat) \
V(Arm64S16x8LoadSplat) \
V(Arm64S32x4LoadSplat) \
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 128ebdac957..3ea84730801 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -156,6 +156,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Le:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
+ case kArm64F64x2Pmin:
+ case kArm64F64x2Pmax:
+ case kArm64F64x2RoundUp:
+ case kArm64F64x2RoundDown:
+ case kArm64F64x2RoundTruncate:
+ case kArm64F64x2RoundTiesEven:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -179,6 +185,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Le:
case kArm64F32x4Qfma:
case kArm64F32x4Qfms:
+ case kArm64F32x4Pmin:
+ case kArm64F32x4Pmax:
+ case kArm64F32x4RoundUp:
+ case kArm64F32x4RoundDown:
+ case kArm64F32x4RoundTruncate:
+ case kArm64F32x4RoundTiesEven:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
@@ -226,6 +238,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4GeU:
case kArm64I32x4Abs:
case kArm64I32x4BitMask:
+ case kArm64I32x4DotI16x8S:
case kArm64I16x8Splat:
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
@@ -331,14 +344,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x8Reverse:
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
- case kArm64S1x2AnyTrue:
- case kArm64S1x2AllTrue:
- case kArm64S1x4AnyTrue:
- case kArm64S1x4AllTrue:
- case kArm64S1x8AnyTrue:
- case kArm64S1x8AllTrue:
- case kArm64S1x16AnyTrue:
- case kArm64S1x16AllTrue:
+ case kArm64V64x2AnyTrue:
+ case kArm64V64x2AllTrue:
+ case kArm64V32x4AnyTrue:
+ case kArm64V32x4AllTrue:
+ case kArm64V16x8AnyTrue:
+ case kArm64V16x8AllTrue:
+ case kArm64V8x16AnyTrue:
+ case kArm64V8x16AllTrue:
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 06a87a8aab7..2e0d977c3c7 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -163,13 +163,9 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseImmediate(node->InputAt(1)));
}
} else {
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
- // We only need a unique register for the first input (src), since in
- // the codegen we use tmp to store the shifts, and then later use it with
- // src. The second input can be the same as the second temp (shift).
selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
}
}
@@ -608,18 +604,23 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
InstructionCode opcode = kArchNop;
+ bool require_add = false;
switch (params.transformation) {
case LoadTransformation::kS8x16LoadSplat:
opcode = kArm64S8x16LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kS16x8LoadSplat:
opcode = kArm64S16x8LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kS32x4LoadSplat:
opcode = kArm64S32x4LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kS64x2LoadSplat:
opcode = kArm64S64x2LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kI16x8Load8x8S:
opcode = kArm64I16x8Load8x8S;
@@ -655,13 +656,17 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
inputs[1] = g.UseRegister(index);
outputs[0] = g.DefineAsRegister(node);
- // ld1r uses post-index, so construct address first.
- // TODO(v8:9886) If index can be immediate, use vldr without this add.
- InstructionOperand addr = g.TempRegister();
- Emit(kArm64Add, 1, &addr, 2, inputs);
- inputs[0] = addr;
- inputs[1] = g.TempImmediate(0);
- opcode |= AddressingModeField::encode(kMode_MRI);
+ if (require_add) {
+ // ld1r uses post-index, so construct address first.
+ // TODO(v8:9886) If index can be immediate, use vldr without this add.
+ InstructionOperand addr = g.TempRegister();
+ Emit(kArm64Add, 1, &addr, 2, inputs);
+ inputs[0] = addr;
+ inputs[1] = g.TempImmediate(0);
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ } else {
+ opcode |= AddressingModeField::encode(kMode_MRR);
+ }
Emit(opcode, 1, outputs, 2, inputs);
}
@@ -1360,7 +1365,15 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64RoundTiesEven, kArm64Float64RoundTiesEven) \
V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
- V(Float64SilenceNaN, kArm64Float64SilenceNaN)
+ V(Float64SilenceNaN, kArm64Float64SilenceNaN) \
+ V(F32x4Ceil, kArm64F32x4RoundUp) \
+ V(F32x4Floor, kArm64F32x4RoundDown) \
+ V(F32x4Trunc, kArm64F32x4RoundTruncate) \
+ V(F32x4NearestInt, kArm64F32x4RoundTiesEven) \
+ V(F64x2Ceil, kArm64F64x2RoundUp) \
+ V(F64x2Floor, kArm64F64x2RoundDown) \
+ V(F64x2Trunc, kArm64F64x2RoundTruncate) \
+ V(F64x2NearestInt, kArm64F64x2RoundTiesEven)
#define RRR_OP_LIST(V) \
V(Int32Div, kArm64Idiv32) \
@@ -3184,14 +3197,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Neg, kArm64I8x16Neg) \
V(I8x16Abs, kArm64I8x16Abs) \
V(S128Not, kArm64S128Not) \
- V(S1x2AnyTrue, kArm64S1x2AnyTrue) \
- V(S1x2AllTrue, kArm64S1x2AllTrue) \
- V(S1x4AnyTrue, kArm64S1x4AnyTrue) \
- V(S1x4AllTrue, kArm64S1x4AllTrue) \
- V(S1x8AnyTrue, kArm64S1x8AnyTrue) \
- V(S1x8AllTrue, kArm64S1x8AllTrue) \
- V(S1x16AnyTrue, kArm64S1x16AnyTrue) \
- V(S1x16AllTrue, kArm64S1x16AllTrue)
+ V(V64x2AnyTrue, kArm64V64x2AnyTrue) \
+ V(V64x2AllTrue, kArm64V64x2AllTrue) \
+ V(V32x4AnyTrue, kArm64V32x4AnyTrue) \
+ V(V32x4AllTrue, kArm64V32x4AllTrue) \
+ V(V16x8AnyTrue, kArm64V16x8AnyTrue) \
+ V(V16x8AllTrue, kArm64V16x8AllTrue) \
+ V(V8x16AnyTrue, kArm64V8x16AnyTrue) \
+ V(V8x16AllTrue, kArm64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
@@ -3249,6 +3262,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4MaxU, kArm64I32x4MaxU) \
V(I32x4GtU, kArm64I32x4GtU) \
V(I32x4GeU, kArm64I32x4GeU) \
+ V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
@@ -3613,6 +3627,34 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
}
+namespace {
+void VisitPminOrPmax(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ // Need all unique registers because we first compare the two inputs, then we
+ // need the inputs to remain unchanged for the bitselect later.
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitPminOrPmax(this, kArm64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitPminOrPmax(this, kArm64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitPminOrPmax(this, kArm64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitPminOrPmax(this, kArm64F64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/code-generator.cc b/chromium/v8/src/compiler/backend/code-generator.cc
index 72c5750035a..83dccf69e82 100644
--- a/chromium/v8/src/compiler/backend/code-generator.cc
+++ b/chromium/v8/src/compiler/backend/code-generator.cc
@@ -55,19 +55,20 @@ CodeGenerator::CodeGenerator(
frame_access_state_(nullptr),
linkage_(linkage),
instructions_(instructions),
- unwinding_info_writer_(zone()),
+ unwinding_info_writer_(codegen_zone),
info_(info),
- labels_(zone()->NewArray<Label>(instructions->InstructionBlockCount())),
+ labels_(
+ codegen_zone->NewArray<Label>(instructions->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
tasm_(isolate, options, CodeObjectRequired::kNo, std::move(buffer)),
resolver_(this),
- safepoints_(zone()),
- handlers_(zone()),
- deoptimization_exits_(zone()),
- deoptimization_literals_(zone()),
- translations_(zone()),
+ safepoints_(codegen_zone),
+ handlers_(codegen_zone),
+ deoptimization_exits_(codegen_zone),
+ deoptimization_literals_(codegen_zone),
+ translations_(codegen_zone),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
max_pushed_argument_count_(max_pushed_argument_count),
caller_registers_saved_(false),
@@ -77,12 +78,12 @@ CodeGenerator::CodeGenerator(
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
source_position_table_builder_(
- SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
- protected_instructions_(zone()),
+ codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
+ protected_instructions_(codegen_zone),
result_(kSuccess),
poisoning_level_(poisoning_level),
- block_starts_(zone()),
- instr_starts_(zone()) {
+ block_starts_(codegen_zone),
+ instr_starts_(codegen_zone) {
for (int i = 0; i < instructions->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -161,7 +162,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeReason deoptimization_reason = exit->reason();
Address deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
- if (info()->is_source_positions_enabled()) {
+ if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
deoptimization_id);
}
@@ -191,7 +192,7 @@ void CodeGenerator::AssembleCode() {
// the frame (that is done in AssemblePrologue).
FrameScope frame_scope(tasm(), StackFrame::MANUAL);
- if (info->is_source_positions_enabled()) {
+ if (info->source_positions()) {
AssembleSourcePosition(start_source_position());
}
offsets_info_.code_start_register_check = tasm()->pc_offset();
@@ -242,7 +243,7 @@ void CodeGenerator::AssembleCode() {
unwinding_info_writer_.SetNumberOfInstructionBlocks(
instructions()->InstructionBlockCount());
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
block_starts_.assign(instructions()->instruction_blocks().size(), -1);
instr_starts_.assign(instructions()->instructions().size(), {});
}
@@ -253,7 +254,7 @@ void CodeGenerator::AssembleCode() {
if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
tasm()->CodeTargetAlign();
}
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
}
// Bind a label for a block.
@@ -503,6 +504,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
.set_deoptimization_data(deopt_data)
.set_is_turbofanned()
.set_stack_slots(frame()->GetTotalFrameSlotCount())
+ .set_profiler_data(info()->profiler_data())
.TryBuild();
Handle<Code> code;
@@ -721,7 +723,7 @@ RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
int instruction_index, const InstructionBlock* block) {
Instruction* instr = instructions()->InstructionAt(instruction_index);
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
}
int first_unused_stack_slot;
@@ -741,14 +743,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
if (instr->IsJump() && block->must_deconstruct_frame()) {
AssembleDeconstructFrame();
}
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
}
// Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result;
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
}
@@ -832,7 +834,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
// the inlining stack from other information.
- if (info->trace_turbo_json_enabled() || !tasm()->isolate() ||
+ if (info->trace_turbo_json() || !tasm()->isolate() ||
tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
@@ -979,7 +981,8 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
- handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
+ handlers_.push_back(
+ {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
}
if (needs_frame_state) {
@@ -989,7 +992,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 2;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
- int pc_offset = tasm()->pc_offset();
+ int pc_offset = tasm()->pc_offset_for_safepoint();
BuildTranslation(instr, pc_offset, frame_state_offset,
descriptor->state_combine());
}
@@ -1329,7 +1332,7 @@ void CodeGenerator::InitializeSpeculationPoison() {
if (info()->called_with_code_start_register()) {
tasm()->RecordComment("-- Prologue: generate speculation poison --");
GenerateSpeculationPoisonFromCodeStartRegister();
- if (info()->is_poisoning_register_arguments()) {
+ if (info()->poison_register_arguments()) {
AssembleRegisterArgumentPoisoning();
}
} else {
diff --git a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index c673458c753..f5a69eec3ea 100644
--- a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -2032,6 +2032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxpd(dst, dst, i.InputSimd128Register(1));
break;
}
+ case kIA32F64x2Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundpd(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kIA32I64x2SplatI32Pair: {
XMMRegister dst = i.OutputSimd128Register();
__ Pinsrd(dst, i.InputRegister(0), 0);
@@ -2442,6 +2448,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, dst, i.InputSimd128Register(1));
break;
}
+ case kIA32F32x4Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundps(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2795,6 +2807,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kIA32I32x4DotI16x8S: {
+ __ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -3687,7 +3704,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Out-of-range indices should return 0, add 112 so that any value > 15
// saturates to 128 (top bit set), so pshufb will zero that lane.
- __ Move(mask, (uint32_t)0x70707070);
+ __ Move(mask, uint32_t{0x70707070});
__ Pshufd(mask, mask, 0x0);
__ Paddusb(mask, i.InputSimd128Register(1));
__ Pshufb(dst, mask);
@@ -4094,9 +4111,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpor(dst, dst, kScratchDoubleReg);
break;
}
- case kIA32S1x4AnyTrue:
- case kIA32S1x8AnyTrue:
- case kIA32S1x16AnyTrue: {
+ case kIA32V32x4AnyTrue:
+ case kIA32V16x8AnyTrue:
+ case kIA32V8x16AnyTrue: {
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
@@ -4110,13 +4127,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kIA32S1x4AllTrue:
+ case kIA32V32x4AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
- case kIA32S1x8AllTrue:
+ case kIA32V16x8AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
break;
- case kIA32S1x16AllTrue: {
+ case kIA32V8x16AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index d347d672021..4c49539c4e9 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -136,6 +136,7 @@ namespace compiler {
V(IA32F64x2Le) \
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
+ V(IA32F64x2Round) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
V(IA32I64x2Neg) \
@@ -186,6 +187,7 @@ namespace compiler {
V(AVXF32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
+ V(IA32F32x4Round) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
@@ -232,6 +234,7 @@ namespace compiler {
V(AVXI32x4GeU) \
V(IA32I32x4Abs) \
V(IA32I32x4BitMask) \
+ V(IA32I32x4DotI16x8S) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneU) \
V(IA32I16x8ExtractLaneS) \
@@ -396,12 +399,12 @@ namespace compiler {
V(AVXS8x4Reverse) \
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
- V(IA32S1x4AnyTrue) \
- V(IA32S1x4AllTrue) \
- V(IA32S1x8AnyTrue) \
- V(IA32S1x8AllTrue) \
- V(IA32S1x16AnyTrue) \
- V(IA32S1x16AllTrue) \
+ V(IA32V32x4AnyTrue) \
+ V(IA32V32x4AllTrue) \
+ V(IA32V16x8AnyTrue) \
+ V(IA32V16x8AllTrue) \
+ V(IA32V8x16AnyTrue) \
+ V(IA32V8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32AtomicPairStore) \
V(IA32Word32AtomicPairAdd) \
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 52f0b0356ff..6d0062ba09e 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -117,6 +117,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Le:
case kIA32F64x2Pmin:
case kIA32F64x2Pmax:
+ case kIA32F64x2Round:
case kIA32I64x2SplatI32Pair:
case kIA32I64x2ReplaceLaneI32Pair:
case kIA32I64x2Neg:
@@ -167,6 +168,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Le:
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
+ case kIA32F32x4Round:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
@@ -213,6 +215,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI32x4GeU:
case kIA32I32x4Abs:
case kIA32I32x4BitMask:
+ case kIA32I32x4DotI16x8S:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneU:
case kIA32I16x8ExtractLaneS:
@@ -367,12 +370,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS8x4Reverse:
case kSSES8x2Reverse:
case kAVXS8x2Reverse:
- case kIA32S1x4AnyTrue:
- case kIA32S1x4AllTrue:
- case kIA32S1x8AnyTrue:
- case kIA32S1x8AllTrue:
- case kIA32S1x16AnyTrue:
- case kIA32S1x16AllTrue:
+ case kIA32V32x4AnyTrue:
+ case kIA32V32x4AllTrue:
+ case kIA32V16x8AnyTrue:
+ case kIA32V16x8AllTrue:
+ case kIA32V8x16AnyTrue:
+ case kIA32V8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index c50464f4b86..5ed7c24e6bf 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -277,6 +277,23 @@ void VisitRRSimd(InstructionSelector* selector, Node* node,
}
}
+// TODO(v8:9198): Like VisitRROFloat, but for SIMD. SSE requires operand1 to be
+// a register as we don't have memory alignment yet. For AVX, memory operands
+// are fine, but can have performance issues if not aligned to 16/32 bytes
+// (based on load size), see SDM Vol 1, chapter 14.9
+void VisitRROSimd(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0,
+ g.Use(node->InputAt(1)));
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0,
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
void VisitRRISimd(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
@@ -941,7 +958,16 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
V(Float32RoundTiesEven, \
kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+ V(Float64RoundTiesEven, \
+ kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
+ V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
#define RRO_FLOAT_OP_LIST(V) \
V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
@@ -2100,6 +2126,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(I64x2Add) \
V(I64x2Sub) \
+ V(I32x4DotI16x8S) \
V(I16x8RoundingAverageU) \
V(I8x16RoundingAverageU)
@@ -2131,14 +2158,14 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(S128Not)
#define SIMD_ANYTRUE_LIST(V) \
- V(S1x4AnyTrue) \
- V(S1x8AnyTrue) \
- V(S1x16AnyTrue)
+ V(V32x4AnyTrue) \
+ V(V16x8AnyTrue) \
+ V(V8x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
- V(S1x4AllTrue) \
- V(S1x8AllTrue) \
- V(S1x16AllTrue)
+ V(V32x4AllTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AllTrue)
#define SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(V) \
V(I64x2Shl) \
@@ -2372,10 +2399,15 @@ SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX
#undef SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX
-#define VISIT_SIMD_UNOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+// TODO(v8:9198): SSE requires operand0 to be a register as we don't have memory
+// alignment yet. For AVX, memory operands are fine, but can have performance
+// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
+// chapter 14.9
+#define VISIT_SIMD_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ Emit(kIA32##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
}
SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
@@ -2407,23 +2439,23 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
IA32OperandGenerator g(this); \
InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
Emit(kIA32##Opcode, g.DefineAsRegister(node), \
- g.UseUnique(node->InputAt(0)), arraysize(temps), temps); \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
}
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
#undef SIMD_ALLTRUE_LIST
-#define VISIT_SIMD_BINOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROFloat(this, node, kAVX##Opcode, kSSE##Opcode); \
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROSimd(this, node, kAVX##Opcode, kSSE##Opcode); \
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
-#define VISIT_SIMD_BINOP_UNIFIED_SSE_AVX(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROFloat(this, node, kIA32##Opcode, kIA32##Opcode); \
+#define VISIT_SIMD_BINOP_UNIFIED_SSE_AVX(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROSimd(this, node, kIA32##Opcode, kIA32##Opcode); \
}
SIMD_BINOP_UNIFIED_SSE_AVX_LIST(VISIT_SIMD_BINOP_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_BINOP_UNIFIED_SSE_AVX
diff --git a/chromium/v8/src/compiler/backend/instruction-selector-impl.h b/chromium/v8/src/compiler/backend/instruction-selector-impl.h
index aa7da85e42b..7e1f183fb71 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/chromium/v8/src/compiler/backend/instruction-selector-impl.h
@@ -356,6 +356,8 @@ class OperandGenerator {
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
return Constant(static_cast<int32_t>(0));
+ case MachineRepresentation::kWord64:
+ return Constant(static_cast<int64_t>(0));
case MachineRepresentation::kFloat64:
return Constant(static_cast<double>(0));
case MachineRepresentation::kFloat32:
diff --git a/chromium/v8/src/compiler/backend/instruction-selector.cc b/chromium/v8/src/compiler/backend/instruction-selector.cc
index c2022b574ee..8ad88b946b4 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector.cc
+++ b/chromium/v8/src/compiler/backend/instruction-selector.cc
@@ -1043,7 +1043,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
InstructionOperand op = g.UseLocation(*iter, location);
UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
if (unallocated.HasFixedSlotPolicy() && !call_tail) {
- int stack_index = -unallocated.fixed_slot_index() - 1;
+ int stack_index = buffer->descriptor->GetStackIndexFromSlot(
+ unallocated.fixed_slot_index());
// This can insert empty slots before stack_index and will insert enough
// slots after stack_index to store the parameter.
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
@@ -1888,6 +1889,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Pmin(node);
case IrOpcode::kF64x2Pmax:
return MarkAsSimd128(node), VisitF64x2Pmax(node);
+ case IrOpcode::kF64x2Ceil:
+ return MarkAsSimd128(node), VisitF64x2Ceil(node);
+ case IrOpcode::kF64x2Floor:
+ return MarkAsSimd128(node), VisitF64x2Floor(node);
+ case IrOpcode::kF64x2Trunc:
+ return MarkAsSimd128(node), VisitF64x2Trunc(node);
+ case IrOpcode::kF64x2NearestInt:
+ return MarkAsSimd128(node), VisitF64x2NearestInt(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1938,6 +1947,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Pmin(node);
case IrOpcode::kF32x4Pmax:
return MarkAsSimd128(node), VisitF32x4Pmax(node);
+ case IrOpcode::kF32x4Ceil:
+ return MarkAsSimd128(node), VisitF32x4Ceil(node);
+ case IrOpcode::kF32x4Floor:
+ return MarkAsSimd128(node), VisitF32x4Floor(node);
+ case IrOpcode::kF32x4Trunc:
+ return MarkAsSimd128(node), VisitF32x4Trunc(node);
+ case IrOpcode::kF32x4NearestInt:
+ return MarkAsSimd128(node), VisitF32x4NearestInt(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
@@ -2040,6 +2057,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4Abs(node);
case IrOpcode::kI32x4BitMask:
return MarkAsWord32(node), VisitI32x4BitMask(node);
+ case IrOpcode::kI32x4DotI16x8S:
+ return MarkAsSimd128(node), VisitI32x4DotI16x8S(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2188,22 +2207,22 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS8x16Swizzle(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
- case IrOpcode::kS1x2AnyTrue:
- return MarkAsWord32(node), VisitS1x2AnyTrue(node);
- case IrOpcode::kS1x2AllTrue:
- return MarkAsWord32(node), VisitS1x2AllTrue(node);
- case IrOpcode::kS1x4AnyTrue:
- return MarkAsWord32(node), VisitS1x4AnyTrue(node);
- case IrOpcode::kS1x4AllTrue:
- return MarkAsWord32(node), VisitS1x4AllTrue(node);
- case IrOpcode::kS1x8AnyTrue:
- return MarkAsWord32(node), VisitS1x8AnyTrue(node);
- case IrOpcode::kS1x8AllTrue:
- return MarkAsWord32(node), VisitS1x8AllTrue(node);
- case IrOpcode::kS1x16AnyTrue:
- return MarkAsWord32(node), VisitS1x16AnyTrue(node);
- case IrOpcode::kS1x16AllTrue:
- return MarkAsWord32(node), VisitS1x16AllTrue(node);
+ case IrOpcode::kV64x2AnyTrue:
+ return MarkAsWord32(node), VisitV64x2AnyTrue(node);
+ case IrOpcode::kV64x2AllTrue:
+ return MarkAsWord32(node), VisitV64x2AllTrue(node);
+ case IrOpcode::kV32x4AnyTrue:
+ return MarkAsWord32(node), VisitV32x4AnyTrue(node);
+ case IrOpcode::kV32x4AllTrue:
+ return MarkAsWord32(node), VisitV32x4AllTrue(node);
+ case IrOpcode::kV16x8AnyTrue:
+ return MarkAsWord32(node), VisitV16x8AnyTrue(node);
+ case IrOpcode::kV16x8AllTrue:
+ return MarkAsWord32(node), VisitV16x8AllTrue(node);
+ case IrOpcode::kV8x16AnyTrue:
+ return MarkAsWord32(node), VisitV8x16AnyTrue(node);
+ case IrOpcode::kV8x16AllTrue:
+ return MarkAsWord32(node), VisitV8x16AllTrue(node);
default:
FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
node->op()->mnemonic(), node->id());
@@ -2638,8 +2657,8 @@ void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV64x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV64x2AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
@@ -2651,23 +2670,45 @@ void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
+ !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
// TODO(v8:10308) Bitmask operations are in prototype now, we can remove these
// guards when they go into the proposal.
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_X64
void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
- // && !V8_TARGET_ARCH_X64
-
// TODO(v8:10501) Prototyping pmin and pmax instructions.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4Pmin(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Pmax(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Pmin(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Pmax(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
+ // && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X &&
+ // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_S390X && \
+ !V8_TARGET_ARCH_IA32
+// TODO(v8:10553) Prototyping floating point rounding instructions.
+// TODO(zhin): Temporary convoluted way to for unimplemented opcodes on ARM as
+// we are implementing them one at a time.
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitF32x4Ceil(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Floor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Trunc(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitF64x2Ceil(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Floor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Trunc(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2NearestInt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4NearestInt(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_S390X
+ // && !V8_TARGET_ARCH_IA32
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
+// TODO(v8:10583) Prototype i32x4.dot_i16x8_s
+void InstructionSelector::VisitI32x4DotI16x8S(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2808,7 +2849,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
int misc_field = static_cast<int>(call_descriptor->ParameterCount());
-#if defined(_AIX)
+#if ABI_USES_FUNCTION_DESCRIPTORS
// Highest misc_field bit is used on AIX to indicate if a CFunction call
// has function descriptor or not.
if (!call_descriptor->NoFunctionDescriptor()) {
@@ -3038,7 +3079,7 @@ void InstructionSelector::VisitUnreachable(Node* node) {
void InstructionSelector::VisitStaticAssert(Node* node) {
Node* asserted = node->InputAt(0);
- asserted->Print(2);
+ asserted->Print(4);
FATAL("Expected turbofan static assert to hold, but got non-true input!\n");
}
diff --git a/chromium/v8/src/compiler/backend/instruction.h b/chromium/v8/src/compiler/backend/instruction.h
index e189100c346..f40a4198f81 100644
--- a/chromium/v8/src/compiler/backend/instruction.h
+++ b/chromium/v8/src/compiler/backend/instruction.h
@@ -1536,7 +1536,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return virtual_register;
}
Constant GetConstant(int virtual_register) const {
- ConstantMap::const_iterator it = constants_.find(virtual_register);
+ auto it = constants_.find(virtual_register);
DCHECK(it != constants_.end());
DCHECK_EQ(virtual_register, it->first);
return it->second;
diff --git a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
index c83a4e28ee1..b9c1eb11d92 100644
--- a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -2159,6 +2159,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ insert_w(dst, i.InputInt8(1) * 2 + 1, kScratchReg);
break;
}
+ case kMipsF64x2Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_d(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMipsF64x2Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_d(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMipsI64x2Add: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2395,6 +2415,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsF32x4Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_w(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMipsF32x4Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_w(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMipsI32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2442,6 +2482,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI32x4BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_w(scratch0, src, 31);
+ __ srli_d(scratch1, scratch0, 31);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 2);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2609,6 +2664,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI16x8BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_h(scratch0, src, 15);
+ __ srli_w(scratch1, scratch0, 15);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 30);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 4);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMipsI8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2776,6 +2848,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI8x16BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_b(scratch0, src, 7);
+ __ srli_h(scratch1, scratch0, 7);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_w(scratch1, scratch0, 14);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 28);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ ilvev_b(scratch0, scratch1, scratch0);
+ __ copy_u_h(dst, scratch0, 0);
+ break;
+ }
case kMipsS128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2800,9 +2890,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMipsS1x4AnyTrue:
- case kMipsS1x8AnyTrue:
- case kMipsS1x16AnyTrue: {
+ case kMipsV32x4AnyTrue:
+ case kMipsV16x8AnyTrue:
+ case kMipsV8x16AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -2814,7 +2904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMipsS1x4AllTrue: {
+ case kMipsV32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -2825,7 +2915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsS1x8AllTrue: {
+ case kMipsV16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -2836,7 +2926,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsS1x16AllTrue: {
+ case kMipsV8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 0a37dd70683..27418935dd3 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -155,6 +155,8 @@ namespace compiler {
V(MipsF64x2Ne) \
V(MipsF64x2Lt) \
V(MipsF64x2Le) \
+ V(MipsF64x2Pmin) \
+ V(MipsF64x2Pmax) \
V(MipsI64x2Add) \
V(MipsI64x2Sub) \
V(MipsI64x2Mul) \
@@ -196,6 +198,8 @@ namespace compiler {
V(MipsF32x4Ne) \
V(MipsF32x4Lt) \
V(MipsF32x4Le) \
+ V(MipsF32x4Pmin) \
+ V(MipsF32x4Pmax) \
V(MipsI32x4SConvertF32x4) \
V(MipsI32x4UConvertF32x4) \
V(MipsI32x4Neg) \
@@ -204,6 +208,7 @@ namespace compiler {
V(MipsI32x4GtU) \
V(MipsI32x4GeU) \
V(MipsI32x4Abs) \
+ V(MipsI32x4BitMask) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -232,6 +237,7 @@ namespace compiler {
V(MipsI16x8GeU) \
V(MipsI16x8RoundingAverageU) \
V(MipsI16x8Abs) \
+ V(MipsI16x8BitMask) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLaneU) \
V(MipsI8x16ExtractLaneS) \
@@ -259,18 +265,19 @@ namespace compiler {
V(MipsI8x16GeU) \
V(MipsI8x16RoundingAverageU) \
V(MipsI8x16Abs) \
+ V(MipsI8x16BitMask) \
V(MipsS128And) \
V(MipsS128Or) \
V(MipsS128Xor) \
V(MipsS128Not) \
V(MipsS128Select) \
V(MipsS128AndNot) \
- V(MipsS1x4AnyTrue) \
- V(MipsS1x4AllTrue) \
- V(MipsS1x8AnyTrue) \
- V(MipsS1x8AllTrue) \
- V(MipsS1x16AnyTrue) \
- V(MipsS1x16AllTrue) \
+ V(MipsV32x4AnyTrue) \
+ V(MipsV32x4AllTrue) \
+ V(MipsV16x8AnyTrue) \
+ V(MipsV16x8AllTrue) \
+ V(MipsV8x16AnyTrue) \
+ V(MipsV8x16AllTrue) \
V(MipsS32x4InterleaveRight) \
V(MipsS32x4InterleaveLeft) \
V(MipsS32x4PackEven) \
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 81bbfbbfb9b..5180a1d4ed0 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -57,6 +57,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF64x2Splat:
case kMipsF64x2ExtractLane:
case kMipsF64x2ReplaceLane:
+ case kMipsF64x2Pmin:
+ case kMipsF64x2Pmax:
case kMipsI64x2Add:
case kMipsI64x2Sub:
case kMipsI64x2Mul:
@@ -85,6 +87,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Splat:
case kMipsF32x4Sub:
case kMipsF32x4UConvertI32x4:
+ case kMipsF32x4Pmin:
+ case kMipsF32x4Pmax:
case kMipsFloat32Max:
case kMipsFloat32Min:
case kMipsFloat32RoundDown:
@@ -138,6 +142,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8UConvertI8x16High:
case kMipsI16x8UConvertI8x16Low:
case kMipsI16x8Abs:
+ case kMipsI16x8BitMask:
case kMipsI32x4Add:
case kMipsI32x4AddHoriz:
case kMipsI32x4Eq:
@@ -166,6 +171,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4UConvertI16x8High:
case kMipsI32x4UConvertI16x8Low:
case kMipsI32x4Abs:
+ case kMipsI32x4BitMask:
case kMipsI8x16Add:
case kMipsI8x16AddSaturateS:
case kMipsI8x16AddSaturateU:
@@ -195,6 +201,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16SubSaturateU:
case kMipsI8x16UConvertI16x8:
case kMipsI8x16Abs:
+ case kMipsI8x16BitMask:
case kMipsIns:
case kMipsLsa:
case kMipsMaddD:
@@ -238,12 +245,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsS16x8InterleaveRight:
case kMipsS16x8PackEven:
case kMipsS16x8PackOdd:
- case kMipsS1x16AllTrue:
- case kMipsS1x16AnyTrue:
- case kMipsS1x4AllTrue:
- case kMipsS1x4AnyTrue:
- case kMipsS1x8AllTrue:
- case kMipsS1x8AnyTrue:
+ case kMipsV8x16AllTrue:
+ case kMipsV8x16AnyTrue:
+ case kMipsV32x4AllTrue:
+ case kMipsV32x4AnyTrue:
+ case kMipsV16x8AllTrue:
+ case kMipsV16x8AnyTrue:
case kMipsS32x4InterleaveEven:
case kMipsS32x4InterleaveLeft:
case kMipsS32x4InterleaveOdd:
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index dac94fae272..2785186b827 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -113,6 +113,14 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
MipsOperandGenerator g(selector);
selector->Emit(
@@ -2111,12 +2119,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
V(I8x16Neg, kMipsI8x16Neg) \
V(S128Not, kMipsS128Not) \
- V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
- V(S1x4AllTrue, kMipsS1x4AllTrue) \
- V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
- V(S1x8AllTrue, kMipsS1x8AllTrue) \
- V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
- V(S1x16AllTrue, kMipsS1x16AllTrue)
+ V(V32x4AnyTrue, kMipsV32x4AnyTrue) \
+ V(V32x4AllTrue, kMipsV32x4AllTrue) \
+ V(V16x8AnyTrue, kMipsV16x8AnyTrue) \
+ V(V16x8AllTrue, kMipsV16x8AllTrue) \
+ V(V8x16AnyTrue, kMipsV8x16AnyTrue) \
+ V(V8x16AllTrue, kMipsV8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2172,6 +2180,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GtU, kMipsI32x4GtU) \
V(I32x4GeU, kMipsI32x4GeU) \
V(I32x4Abs, kMipsI32x4Abs) \
+ V(I32x4BitMask, kMipsI32x4BitMask) \
V(I16x8Add, kMipsI16x8Add) \
V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
@@ -2194,6 +2203,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \
V(I16x8Abs, kMipsI16x8Abs) \
+ V(I16x8BitMask, kMipsI16x8BitMask) \
V(I8x16Add, kMipsI8x16Add) \
V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
@@ -2215,6 +2225,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
V(I8x16Abs, kMipsI8x16Abs) \
+ V(I8x16BitMask, kMipsI8x16BitMask) \
V(S128And, kMipsS128And) \
V(S128Or, kMipsS128Or) \
V(S128Xor, kMipsS128Xor) \
@@ -2406,6 +2417,22 @@ void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kMipsF32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kMipsF32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kMipsF64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kMipsF64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 197167c01cd..9acd6459de5 100644
--- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -2265,6 +2265,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt8(1));
break;
}
+ case kMips64F64x2Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_d(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMips64F64x2Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_d(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMips64I64x2ReplaceLane: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
@@ -2581,6 +2601,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64F32x4Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_w(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMips64F32x4Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_w(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2634,6 +2674,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I32x4BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_w(scratch0, src, 31);
+ __ srli_d(scratch1, scratch0, 31);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 2);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2820,6 +2875,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I16x8BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_h(scratch0, src, 15);
+ __ srli_w(scratch1, scratch0, 15);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 30);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 4);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMips64I8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3006,6 +3078,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I8x16BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_b(scratch0, src, 7);
+ __ srli_h(scratch1, scratch0, 7);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_w(scratch1, scratch0, 14);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 28);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ ilvev_b(scratch0, scratch1, scratch0);
+ __ copy_u_h(dst, scratch0, 0);
+ break;
+ }
case kMips64S128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3030,9 +3120,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64S1x4AnyTrue:
- case kMips64S1x8AnyTrue:
- case kMips64S1x16AnyTrue: {
+ case kMips64V32x4AnyTrue:
+ case kMips64V16x8AnyTrue:
+ case kMips64V8x16AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3043,7 +3133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMips64S1x4AllTrue: {
+ case kMips64V32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3054,7 +3144,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64S1x8AllTrue: {
+ case kMips64V16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3065,7 +3155,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64S1x16AllTrue: {
+ case kMips64V8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 9303b4572f3..0c42c059ea5 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -203,6 +203,8 @@ namespace compiler {
V(Mips64F64x2Splat) \
V(Mips64F64x2ExtractLane) \
V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
@@ -229,6 +231,8 @@ namespace compiler {
V(Mips64F32x4Ne) \
V(Mips64F32x4Lt) \
V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
@@ -237,6 +241,7 @@ namespace compiler {
V(Mips64I32x4GtU) \
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -265,6 +270,7 @@ namespace compiler {
V(Mips64I16x8GeU) \
V(Mips64I16x8RoundingAverageU) \
V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLaneU) \
V(Mips64I8x16ExtractLaneS) \
@@ -292,18 +298,19 @@ namespace compiler {
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
+ V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
V(Mips64S128Xor) \
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
- V(Mips64S1x4AnyTrue) \
- V(Mips64S1x4AllTrue) \
- V(Mips64S1x8AnyTrue) \
- V(Mips64S1x8AllTrue) \
- V(Mips64S1x16AnyTrue) \
- V(Mips64S1x16AllTrue) \
+ V(Mips64V32x4AnyTrue) \
+ V(Mips64V32x4AllTrue) \
+ V(Mips64V16x8AnyTrue) \
+ V(Mips64V16x8AllTrue) \
+ V(Mips64V8x16AnyTrue) \
+ V(Mips64V8x16AllTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 81fc3b2ca9a..2f8a2722015 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -82,6 +82,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Ne:
case kMips64F64x2Lt:
case kMips64F64x2Le:
+ case kMips64F64x2Pmin:
+ case kMips64F64x2Pmax:
case kMips64I64x2Splat:
case kMips64I64x2ExtractLane:
case kMips64I64x2ReplaceLane:
@@ -113,6 +115,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Splat:
case kMips64F32x4Sub:
case kMips64F32x4UConvertI32x4:
+ case kMips64F32x4Pmin:
+ case kMips64F32x4Pmax:
case kMips64F64x2Splat:
case kMips64F64x2ExtractLane:
case kMips64F64x2ReplaceLane:
@@ -171,6 +175,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8UConvertI8x16Low:
case kMips64I16x8RoundingAverageU:
case kMips64I16x8Abs:
+ case kMips64I16x8BitMask:
case kMips64I32x4Add:
case kMips64I32x4AddHoriz:
case kMips64I32x4Eq:
@@ -199,6 +204,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4UConvertI16x8High:
case kMips64I32x4UConvertI16x8Low:
case kMips64I32x4Abs:
+ case kMips64I32x4BitMask:
case kMips64I8x16Add:
case kMips64I8x16AddSaturateS:
case kMips64I8x16AddSaturateU:
@@ -226,6 +232,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16SubSaturateU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
+ case kMips64I8x16BitMask:
case kMips64Ins:
case kMips64Lsa:
case kMips64MaxD:
@@ -265,12 +272,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S16x8PackOdd:
case kMips64S16x2Reverse:
case kMips64S16x4Reverse:
- case kMips64S1x16AllTrue:
- case kMips64S1x16AnyTrue:
- case kMips64S1x4AllTrue:
- case kMips64S1x4AnyTrue:
- case kMips64S1x8AllTrue:
- case kMips64S1x8AnyTrue:
+ case kMips64V8x16AllTrue:
+ case kMips64V8x16AnyTrue:
+ case kMips64V32x4AllTrue:
+ case kMips64V32x4AnyTrue:
+ case kMips64V16x8AllTrue:
+ case kMips64V16x8AnyTrue:
case kMips64S32x4InterleaveEven:
case kMips64S32x4InterleaveOdd:
case kMips64S32x4InterleaveLeft:
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 719a916b6a5..2c9c8d439b6 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -163,6 +163,14 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Mips64OperandGenerator g(selector);
selector->Emit(
@@ -2778,21 +2786,24 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
V(I32x4Abs, kMips64I32x4Abs) \
+ V(I32x4BitMask, kMips64I32x4BitMask) \
V(I16x8Neg, kMips64I16x8Neg) \
V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
V(I16x8Abs, kMips64I16x8Abs) \
+ V(I16x8BitMask, kMips64I16x8BitMask) \
V(I8x16Neg, kMips64I8x16Neg) \
V(I8x16Abs, kMips64I8x16Abs) \
+ V(I8x16BitMask, kMips64I8x16BitMask) \
V(S128Not, kMips64S128Not) \
- V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
- V(S1x4AllTrue, kMips64S1x4AllTrue) \
- V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
- V(S1x8AllTrue, kMips64S1x8AllTrue) \
- V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
- V(S1x16AllTrue, kMips64S1x16AllTrue)
+ V(V32x4AnyTrue, kMips64V32x4AnyTrue) \
+ V(V32x4AllTrue, kMips64V32x4AllTrue) \
+ V(V16x8AnyTrue, kMips64V16x8AnyTrue) \
+ V(V16x8AllTrue, kMips64V16x8AllTrue) \
+ V(V8x16AnyTrue, kMips64V8x16AnyTrue) \
+ V(V8x16AllTrue, kMips64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -3099,6 +3110,22 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
g.TempImmediate(0));
}
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kMips64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kMips64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kMips64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kMips64F64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index b7fece3f72d..56c5003d2e8 100644
--- a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -1039,7 +1039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
-#if defined(_AIX)
+#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
num_parameters = kNumParametersMask & misc_field;
@@ -2164,6 +2164,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
__ vsro(dst, dst, kScratchDoubleReg);
// reload
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, r0);
__ vor(dst, dst, kScratchDoubleReg);
break;
@@ -2186,6 +2187,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
__ vsro(dst, dst, kScratchDoubleReg);
// reload
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, src);
__ vor(dst, dst, kScratchDoubleReg);
break;
@@ -2208,46 +2210,709 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
+#define SHIFT_TO_CORRECT_LANE(starting_lane_nummber, lane_input, \
+ lane_width_in_bytes, input_register) \
+ int shift_bits = abs(lane_input - starting_lane_nummber) * \
+ lane_width_in_bytes * kBitsPerByte; \
+ if (shift_bits > 0) { \
+ __ li(ip, Operand(shift_bits)); \
+ __ mtvsrd(kScratchDoubleReg, ip); \
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
+ if (lane_input < starting_lane_nummber) { \
+ __ vsro(kScratchDoubleReg, input_register, kScratchDoubleReg); \
+ } else { \
+ DCHECK(lane_input > starting_lane_nummber); \
+ __ vslo(kScratchDoubleReg, input_register, kScratchDoubleReg); \
+ } \
+ input_register = kScratchDoubleReg; \
+ }
case kPPC_F64x2ExtractLane: {
- __ mfvsrd(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 1 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(0, lane, 8, src);
+ __ mfvsrd(kScratchReg, src);
__ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_F32x4ExtractLane: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 3 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
+ __ mfvsrwz(kScratchReg, src);
__ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_I64x2ExtractLane: {
- __ mfvsrd(i.OutputRegister(), i.InputSimd128Register(0));
+ int32_t lane = 1 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(0, lane, 8, src)
+ __ mfvsrd(i.OutputRegister(), src);
break;
}
case kPPC_I32x4ExtractLane: {
- __ mfvsrwz(i.OutputRegister(), i.InputSimd128Register(0));
+ int32_t lane = 3 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
+ __ mfvsrwz(i.OutputRegister(), src);
break;
}
case kPPC_I16x8ExtractLaneU: {
- __ mfvsrwz(r0, i.InputSimd128Register(0));
+ int32_t lane = 7 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
+ __ mfvsrwz(r0, src);
__ li(ip, Operand(16));
__ srd(i.OutputRegister(), r0, ip);
break;
}
case kPPC_I16x8ExtractLaneS: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 7 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
+ __ mfvsrwz(kScratchReg, src);
__ sradi(i.OutputRegister(), kScratchReg, 16);
break;
}
case kPPC_I8x16ExtractLaneU: {
- __ mfvsrwz(r0, i.InputSimd128Register(0));
+ int32_t lane = 15 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
+ __ mfvsrwz(r0, src);
__ li(ip, Operand(24));
__ srd(i.OutputRegister(), r0, ip);
break;
}
case kPPC_I8x16ExtractLaneS: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 15 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
+ __ mfvsrwz(kScratchReg, src);
__ sradi(i.OutputRegister(), kScratchReg, 24);
break;
}
+#undef SHIFT_TO_CORRECT_LANE
+#define GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane, \
+ lane_width_in_bytes) \
+ uint64_t mask = 0; \
+ for (int i = 0, j = 0; i <= kSimd128Size - 1; i++) { \
+ mask <<= kBitsPerByte; \
+ if (i >= lane * lane_width_in_bytes && \
+ i < lane * lane_width_in_bytes + lane_width_in_bytes) { \
+ mask |= replacement_value_byte_lane + j; \
+ j++; \
+ } else { \
+ mask |= i; \
+ } \
+ if (i == (kSimd128Size / 2) - 1) { \
+ __ mov(r0, Operand(mask)); \
+ mask = 0; \
+ } else if (i >= kSimd128Size - 1) { \
+ __ mov(ip, Operand(mask)); \
+ } \
+ } \
+ /* Need to maintain 16 byte alignment for lvx */ \
+ __ addi(sp, sp, Operand(-24)); \
+ __ StoreP(ip, MemOperand(sp, 0)); \
+ __ StoreP(r0, MemOperand(sp, 8)); \
+ __ li(r0, Operand(0)); \
+ __ lvx(kScratchDoubleReg, MemOperand(sp, r0)); \
+ __ addi(sp, sp, Operand(24));
+ case kPPC_F64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 1 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 16;
+ constexpr int lane_width_in_bytes = 8;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
+ __ mtvsrd(dst, r0);
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 3 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 20;
+ constexpr int lane_width_in_bytes = 4;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(2));
+ __ mtvsrd(dst, kScratchReg);
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 1 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 16;
+ constexpr int lane_width_in_bytes = 8;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 3 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 20;
+ constexpr int lane_width_in_bytes = 4;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 7 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 22;
+ constexpr int lane_width_in_bytes = 2;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 15 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 23;
+ constexpr int lane_width_in_bytes = 1;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+#undef GENERATE_REPLACE_LANE_MASK
+ case kPPC_F64x2Add: {
+ __ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Sub: {
+ __ xvsubdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Mul: {
+ __ xvmuldp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Add: {
+ __ vaddfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ constexpr int shift_bits = 32;
+ // generate first operand
+ __ vpkudum(dst, src1, src0);
+ // generate second operand
+ __ li(ip, Operand(shift_bits));
+ __ mtvsrd(tempFPReg2, ip);
+ __ vspltb(tempFPReg2, tempFPReg2, Operand(7));
+ __ vsro(tempFPReg1, src0, tempFPReg2);
+ __ vsro(tempFPReg2, src1, tempFPReg2);
+ __ vpkudum(kScratchDoubleReg, tempFPReg2, tempFPReg1);
+ // add the operands
+ __ vaddfp(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_F32x4Sub: {
+ __ vsubfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Mul: {
+ __ xvmulsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Add: {
+ __ vaddudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Sub: {
+ __ vsubudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Mul: {
+ // Need to maintain 16 byte alignment for stvx and lvx.
+ __ addi(sp, sp, Operand(-40));
+ __ li(r0, Operand(0));
+ __ stvx(i.InputSimd128Register(0), MemOperand(sp, r0));
+ __ li(r0, Operand(16));
+ __ stvx(i.InputSimd128Register(1), MemOperand(sp, r0));
+ for (int i = 0; i < 2; i++) {
+ __ LoadP(r0, MemOperand(sp, kBitsPerByte * i));
+ __ LoadP(ip, MemOperand(sp, (kBitsPerByte * i) + kSimd128Size));
+ __ mulld(r0, r0, ip);
+ __ StoreP(r0, MemOperand(sp, i * kBitsPerByte));
+ }
+ __ li(r0, Operand(0));
+ __ lvx(i.OutputSimd128Register(), MemOperand(sp, r0));
+ __ addi(sp, sp, Operand(40));
+ break;
+ }
+ case kPPC_I32x4Add: {
+ __ vadduwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vsum2sws(dst, src0, kScratchDoubleReg);
+ __ vsum2sws(kScratchDoubleReg, src1, kScratchDoubleReg);
+ __ vpkudum(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_I32x4Sub: {
+ __ vsubuwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4Mul: {
+ __ vmuluwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Add: {
+ __ vadduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vsum4shs(dst, src0, kScratchDoubleReg);
+ __ vsum4shs(kScratchDoubleReg, src1, kScratchDoubleReg);
+ __ vpkuwus(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_I16x8Sub: {
+ __ vsubuhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Mul: {
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmladduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16Add: {
+ __ vaddubm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Sub: {
+ __ vsububm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Mul: {
+ __ vmuleub(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vmuloub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vpkuhum(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kPPC_I64x2MinS: {
+ __ vminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MinS: {
+ __ vminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MinU: {
+ __ vminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MinU: {
+ __ vminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MinS: {
+ __ vminsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MinU: {
+ __ vminuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MinS: {
+ __ vminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MinU: {
+ __ vminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MaxS: {
+ __ vmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MaxS: {
+ __ vmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MaxU: {
+ __ vmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MaxU: {
+ __ vmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MaxS: {
+ __ vmaxsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MaxU: {
+ __ vmaxuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MaxS: {
+ __ vmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MaxU: {
+ __ vmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Eq: {
+ __ xvcmpeqdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Ne: {
+ __ xvcmpeqdp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F64x2Le: {
+ __ xvcmpgedp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F64x2Lt: {
+ __ xvcmpgtdp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Eq: {
+ __ xvcmpeqsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Eq: {
+ __ vcmpequd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4Eq: {
+ __ vcmpequw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Eq: {
+ __ vcmpequh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Eq: {
+ __ vcmpequb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Ne: {
+ __ xvcmpeqsp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2Ne: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4Ne: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8Ne: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16Ne: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4Lt: {
+ __ xvcmpgtsp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Le: {
+ __ xvcmpgesp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2GtS: {
+ __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4GtS: {
+ __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2GeS: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4GeS: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2GtU: {
+ __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4GtU: {
+ __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+
+ break;
+ }
+ case kPPC_I64x2GeU: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+
+ break;
+ }
+ case kPPC_I32x4GeU: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8GtS: {
+ __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8GeS: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8GtU: {
+ __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8GeU: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16GtS: {
+ __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16GeS: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16GtU: {
+ __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16GeU: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+#define VECTOR_SHIFT(op) \
+ { \
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(1)); \
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ kScratchDoubleReg); \
+ }
+ case kPPC_I64x2Shl: {
+ VECTOR_SHIFT(vsld)
+ break;
+ }
+ case kPPC_I64x2ShrS: {
+ VECTOR_SHIFT(vsrad)
+ break;
+ }
+ case kPPC_I64x2ShrU: {
+ VECTOR_SHIFT(vsrd)
+ break;
+ }
+ case kPPC_I32x4Shl: {
+ VECTOR_SHIFT(vslw)
+ break;
+ }
+ case kPPC_I32x4ShrS: {
+ VECTOR_SHIFT(vsraw)
+ break;
+ }
+ case kPPC_I32x4ShrU: {
+ VECTOR_SHIFT(vsrw)
+ break;
+ }
+ case kPPC_I16x8Shl: {
+ VECTOR_SHIFT(vslh)
+ break;
+ }
+ case kPPC_I16x8ShrS: {
+ VECTOR_SHIFT(vsrah)
+ break;
+ }
+ case kPPC_I16x8ShrU: {
+ VECTOR_SHIFT(vsrh)
+ break;
+ }
+ case kPPC_I8x16Shl: {
+ VECTOR_SHIFT(vslb)
+ break;
+ }
+ case kPPC_I8x16ShrS: {
+ VECTOR_SHIFT(vsrab)
+ break;
+ }
+ case kPPC_I8x16ShrU: {
+ VECTOR_SHIFT(vsrb)
+ break;
+ }
+#undef VECTOR_SHIFT
+ case kPPC_S128And: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vand(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Or: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Xor: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vxor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(dst, dst, dst);
+ break;
+ }
+ case kPPC_S128Not: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vnor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Select: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register mask = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ __ vsel(dst, src2, src1, mask);
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 4f6aeced6da..fdffc5f0963 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -192,18 +192,111 @@ namespace compiler {
V(PPC_AtomicXorInt64) \
V(PPC_F64x2Splat) \
V(PPC_F64x2ExtractLane) \
+ V(PPC_F64x2ReplaceLane) \
+ V(PPC_F64x2Add) \
+ V(PPC_F64x2Sub) \
+ V(PPC_F64x2Mul) \
+ V(PPC_F64x2Eq) \
+ V(PPC_F64x2Ne) \
+ V(PPC_F64x2Le) \
+ V(PPC_F64x2Lt) \
V(PPC_F32x4Splat) \
V(PPC_F32x4ExtractLane) \
+ V(PPC_F32x4ReplaceLane) \
+ V(PPC_F32x4Add) \
+ V(PPC_F32x4AddHoriz) \
+ V(PPC_F32x4Sub) \
+ V(PPC_F32x4Mul) \
+ V(PPC_F32x4Eq) \
+ V(PPC_F32x4Ne) \
+ V(PPC_F32x4Lt) \
+ V(PPC_F32x4Le) \
V(PPC_I64x2Splat) \
V(PPC_I64x2ExtractLane) \
+ V(PPC_I64x2ReplaceLane) \
+ V(PPC_I64x2Add) \
+ V(PPC_I64x2Sub) \
+ V(PPC_I64x2Mul) \
+ V(PPC_I64x2MinS) \
+ V(PPC_I64x2MinU) \
+ V(PPC_I64x2MaxS) \
+ V(PPC_I64x2MaxU) \
+ V(PPC_I64x2Eq) \
+ V(PPC_I64x2Ne) \
+ V(PPC_I64x2GtS) \
+ V(PPC_I64x2GtU) \
+ V(PPC_I64x2GeU) \
+ V(PPC_I64x2GeS) \
+ V(PPC_I64x2Shl) \
+ V(PPC_I64x2ShrS) \
+ V(PPC_I64x2ShrU) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
+ V(PPC_I32x4ReplaceLane) \
+ V(PPC_I32x4Add) \
+ V(PPC_I32x4AddHoriz) \
+ V(PPC_I32x4Sub) \
+ V(PPC_I32x4Mul) \
+ V(PPC_I32x4MinS) \
+ V(PPC_I32x4MinU) \
+ V(PPC_I32x4MaxS) \
+ V(PPC_I32x4MaxU) \
+ V(PPC_I32x4Eq) \
+ V(PPC_I32x4Ne) \
+ V(PPC_I32x4GtS) \
+ V(PPC_I32x4GeS) \
+ V(PPC_I32x4GtU) \
+ V(PPC_I32x4GeU) \
+ V(PPC_I32x4Shl) \
+ V(PPC_I32x4ShrS) \
+ V(PPC_I32x4ShrU) \
V(PPC_I16x8Splat) \
V(PPC_I16x8ExtractLaneU) \
V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I16x8ReplaceLane) \
+ V(PPC_I16x8Add) \
+ V(PPC_I16x8AddHoriz) \
+ V(PPC_I16x8Sub) \
+ V(PPC_I16x8Mul) \
+ V(PPC_I16x8MinS) \
+ V(PPC_I16x8MinU) \
+ V(PPC_I16x8MaxS) \
+ V(PPC_I16x8MaxU) \
+ V(PPC_I16x8Eq) \
+ V(PPC_I16x8Ne) \
+ V(PPC_I16x8GtS) \
+ V(PPC_I16x8GeS) \
+ V(PPC_I16x8GtU) \
+ V(PPC_I16x8GeU) \
+ V(PPC_I16x8Shl) \
+ V(PPC_I16x8ShrS) \
+ V(PPC_I16x8ShrU) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
+ V(PPC_I8x16ReplaceLane) \
+ V(PPC_I8x16Add) \
+ V(PPC_I8x16Sub) \
+ V(PPC_I8x16Mul) \
+ V(PPC_I8x16MinS) \
+ V(PPC_I8x16MinU) \
+ V(PPC_I8x16MaxS) \
+ V(PPC_I8x16MaxU) \
+ V(PPC_I8x16Eq) \
+ V(PPC_I8x16Ne) \
+ V(PPC_I8x16GtS) \
+ V(PPC_I8x16GeS) \
+ V(PPC_I8x16GtU) \
+ V(PPC_I8x16GeU) \
+ V(PPC_I8x16Shl) \
+ V(PPC_I8x16ShrS) \
+ V(PPC_I8x16ShrU) \
+ V(PPC_S128And) \
+ V(PPC_S128Or) \
+ V(PPC_S128Xor) \
+ V(PPC_S128Zero) \
+ V(PPC_S128Not) \
+ V(PPC_S128Select) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 68d0aaedc4b..b1d124432ef 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -115,18 +115,111 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
+ case kPPC_F64x2ReplaceLane:
+ case kPPC_F64x2Add:
+ case kPPC_F64x2Sub:
+ case kPPC_F64x2Mul:
+ case kPPC_F64x2Eq:
+ case kPPC_F64x2Ne:
+ case kPPC_F64x2Le:
+ case kPPC_F64x2Lt:
case kPPC_F32x4Splat:
case kPPC_F32x4ExtractLane:
+ case kPPC_F32x4ReplaceLane:
+ case kPPC_F32x4Add:
+ case kPPC_F32x4AddHoriz:
+ case kPPC_F32x4Sub:
+ case kPPC_F32x4Mul:
+ case kPPC_F32x4Eq:
+ case kPPC_F32x4Ne:
+ case kPPC_F32x4Lt:
+ case kPPC_F32x4Le:
case kPPC_I64x2Splat:
case kPPC_I64x2ExtractLane:
+ case kPPC_I64x2ReplaceLane:
+ case kPPC_I64x2Add:
+ case kPPC_I64x2Sub:
+ case kPPC_I64x2Mul:
+ case kPPC_I64x2MinS:
+ case kPPC_I64x2MinU:
+ case kPPC_I64x2MaxS:
+ case kPPC_I64x2MaxU:
+ case kPPC_I64x2Eq:
+ case kPPC_I64x2Ne:
+ case kPPC_I64x2GtS:
+ case kPPC_I64x2GtU:
+ case kPPC_I64x2GeU:
+ case kPPC_I64x2GeS:
+ case kPPC_I64x2Shl:
+ case kPPC_I64x2ShrS:
+ case kPPC_I64x2ShrU:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
+ case kPPC_I32x4ReplaceLane:
+ case kPPC_I32x4Add:
+ case kPPC_I32x4AddHoriz:
+ case kPPC_I32x4Sub:
+ case kPPC_I32x4Mul:
+ case kPPC_I32x4MinS:
+ case kPPC_I32x4MinU:
+ case kPPC_I32x4MaxS:
+ case kPPC_I32x4MaxU:
+ case kPPC_I32x4Eq:
+ case kPPC_I32x4Ne:
+ case kPPC_I32x4GtS:
+ case kPPC_I32x4GeS:
+ case kPPC_I32x4GtU:
+ case kPPC_I32x4GeU:
+ case kPPC_I32x4Shl:
+ case kPPC_I32x4ShrS:
+ case kPPC_I32x4ShrU:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
+ case kPPC_I16x8ReplaceLane:
+ case kPPC_I16x8Add:
+ case kPPC_I16x8AddHoriz:
+ case kPPC_I16x8Sub:
+ case kPPC_I16x8Mul:
+ case kPPC_I16x8MinS:
+ case kPPC_I16x8MinU:
+ case kPPC_I16x8MaxS:
+ case kPPC_I16x8MaxU:
+ case kPPC_I16x8Eq:
+ case kPPC_I16x8Ne:
+ case kPPC_I16x8GtS:
+ case kPPC_I16x8GeS:
+ case kPPC_I16x8GtU:
+ case kPPC_I16x8GeU:
+ case kPPC_I16x8Shl:
+ case kPPC_I16x8ShrS:
+ case kPPC_I16x8ShrU:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
+ case kPPC_I8x16ReplaceLane:
+ case kPPC_I8x16Add:
+ case kPPC_I8x16Sub:
+ case kPPC_I8x16Mul:
+ case kPPC_I8x16MinS:
+ case kPPC_I8x16MinU:
+ case kPPC_I8x16MaxS:
+ case kPPC_I8x16MaxU:
+ case kPPC_I8x16Eq:
+ case kPPC_I8x16Ne:
+ case kPPC_I8x16GtS:
+ case kPPC_I8x16GeS:
+ case kPPC_I8x16GtU:
+ case kPPC_I8x16GeU:
+ case kPPC_I8x16Shl:
+ case kPPC_I8x16ShrS:
+ case kPPC_I8x16ShrU:
+ case kPPC_S128And:
+ case kPPC_S128Or:
+ case kPPC_S128Xor:
+ case kPPC_S128Zero:
+ case kPPC_S128Not:
+ case kPPC_S128Select:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 1598fbad041..d5ec475a808 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2127,6 +2127,86 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Le) \
+ V(F64x2Lt) \
+ V(F32x4Add) \
+ V(F32x4AddHoriz) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I32x4Add) \
+ V(I32x4AddHoriz) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I16x8Add) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Mul) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_UNOP_LIST(V) V(S128Not)
+
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
PPCOperandGenerator g(this); \
@@ -2135,7 +2215,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
}
SIMD_TYPES(SIMD_VISIT_SPLAT)
#undef SIMD_VISIT_SPLAT
-#undef SIMD_TYPES
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
@@ -2153,72 +2232,74 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ PPCOperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kPPC_##Type##ReplaceLane, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), g.UseImmediate(lane), \
+ g.UseUniqueRegister(node->InputAt(1))); \
+ }
+SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempSimd128Register()}; \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
+
+#define SIMD_VISIT_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
+
+#define SIMD_VISIT_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1))); \
+ }
+SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
+#undef SIMD_VISIT_SHIFT
+#undef SIMD_SHIFT_LIST
+#undef SIMD_TYPES
-void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Zero(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_S128Zero, g.DefineAsRegister(node));
+}
-void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_S128Select, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2227,20 +2308,8 @@ void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8RoundingAverageU(Node* node) {
UNIMPLEMENTED();
}
@@ -2251,32 +2320,14 @@ void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2285,36 +2336,8 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
@@ -2338,12 +2361,6 @@ void InstructionSelector::EmitPrepareResults(
}
}
-void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
@@ -2352,8 +2369,6 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
@@ -2364,10 +2379,6 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2431,68 +2442,32 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV32x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV32x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV16x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV16x8AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV8x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV8x16AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
diff --git a/chromium/v8/src/compiler/backend/register-allocator.cc b/chromium/v8/src/compiler/backend/register-allocator.cc
index 8b74ef68b14..aab47722044 100644
--- a/chromium/v8/src/compiler/backend/register-allocator.cc
+++ b/chromium/v8/src/compiler/backend/register-allocator.cc
@@ -391,8 +391,8 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
next_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
- current_hint_position_(nullptr),
- splitting_pointer_(nullptr) {
+ splitting_pointer_(nullptr),
+ current_hint_position_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep) |
@@ -473,11 +473,41 @@ RegisterKind LiveRange::kind() const {
return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
}
-UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
- for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
- if (pos->HintRegister(register_index)) return pos;
+UsePosition* LiveRange::FirstHintPosition(int* register_index) {
+ if (!first_pos_) return nullptr;
+ if (current_hint_position_) {
+ if (current_hint_position_->pos() < first_pos_->pos()) {
+ current_hint_position_ = first_pos_;
+ }
+ if (current_hint_position_->pos() > End()) {
+ current_hint_position_ = nullptr;
+ }
}
- return nullptr;
+ bool needs_revisit = false;
+ UsePosition* pos = current_hint_position_;
+ for (; pos != nullptr; pos = pos->next()) {
+ if (pos->HintRegister(register_index)) {
+ break;
+ }
+ // Phi and use position hints can be assigned during allocation which
+ // would invalidate the cached hint position. Make sure we revisit them.
+ needs_revisit = needs_revisit ||
+ pos->hint_type() == UsePositionHintType::kPhi ||
+ pos->hint_type() == UsePositionHintType::kUsePos;
+ }
+ if (!needs_revisit) {
+ current_hint_position_ = pos;
+ }
+#ifdef DEBUG
+ UsePosition* pos_check = first_pos_;
+ for (; pos_check != nullptr; pos_check = pos_check->next()) {
+ if (pos_check->HasHint()) {
+ break;
+ }
+ }
+ CHECK_EQ(pos, pos_check);
+#endif
+ return pos;
}
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
@@ -684,6 +714,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
first_pos_ = nullptr;
}
result->first_pos_ = use_after;
+ result->current_hint_position_ = current_hint_position_;
// Discard cached iteration state. It might be pointing
// to the use that no longer belongs to this live range.
@@ -693,6 +724,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
if (connect_hints == ConnectHints && use_before != nullptr &&
use_after != nullptr) {
use_after->SetHint(use_before);
+ result->current_hint_position_ = use_after;
}
#ifdef DEBUG
VerifyChildStructure();
@@ -2660,6 +2692,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
pos->set_type(new_type, true);
}
}
+ range->ResetCurrentHintPosition();
}
for (auto preassigned : data()->preassigned_slot_ranges()) {
TopLevelLiveRange* range = preassigned.first;
@@ -3493,7 +3526,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
// Choose the live ranges from the majority.
const size_t majority =
(current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
- bool taken_registers[RegisterConfiguration::kMaxRegisters] = {0};
+ bool taken_registers[RegisterConfiguration::kMaxRegisters] = {false};
auto assign_to_live = [this, counts, majority](
std::function<bool(TopLevelLiveRange*)> filter,
RangeWithRegisterSet* to_be_live,
diff --git a/chromium/v8/src/compiler/backend/register-allocator.h b/chromium/v8/src/compiler/backend/register-allocator.h
index f890bd868b7..85a9cf12170 100644
--- a/chromium/v8/src/compiler/backend/register-allocator.h
+++ b/chromium/v8/src/compiler/backend/register-allocator.h
@@ -618,14 +618,14 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
LiveRange* SplitAt(LifetimePosition position, Zone* zone);
// Returns nullptr when no register is hinted, otherwise sets register_index.
- UsePosition* FirstHintPosition(int* register_index) const;
- UsePosition* FirstHintPosition() const {
+ // Uses {current_hint_position_} as a cache, and tries to update it.
+ UsePosition* FirstHintPosition(int* register_index);
+ UsePosition* FirstHintPosition() {
int register_index;
return FirstHintPosition(&register_index);
}
UsePosition* current_hint_position() const {
- DCHECK(current_hint_position_ == FirstHintPosition());
return current_hint_position_;
}
@@ -656,6 +656,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
const InstructionOperand& spill_op);
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
+ void ResetCurrentHintPosition() { current_hint_position_ = first_pos_; }
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
@@ -701,10 +702,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
mutable UseInterval* current_interval_;
// This is used as a cache, it doesn't affect correctness.
mutable UsePosition* last_processed_use_;
- // This is used as a cache, it's invalid outside of BuildLiveRanges.
- mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
+ // This is used as a cache in BuildLiveRanges and during register allocation.
+ UsePosition* current_hint_position_;
LiveRangeBundle* bundle_ = nullptr;
// Next interval start, relative to the current linear scan position.
LifetimePosition next_start_;
diff --git a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
index cb79373b425..bef8e7c15aa 100644
--- a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -3853,10 +3853,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector boolean unops
- case kS390_S1x2AnyTrue:
- case kS390_S1x4AnyTrue:
- case kS390_S1x8AnyTrue:
- case kS390_S1x16AnyTrue: {
+ case kS390_V64x2AnyTrue:
+ case kS390_V32x4AnyTrue:
+ case kS390_V16x8AnyTrue:
+ case kS390_V8x16AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
Register temp = i.TempRegister(0);
@@ -3879,19 +3879,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
- case kS390_S1x2AllTrue: {
+ case kS390_V64x2AllTrue: {
SIMD_ALL_TRUE(3)
break;
}
- case kS390_S1x4AllTrue: {
+ case kS390_V32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
}
- case kS390_S1x8AllTrue: {
+ case kS390_V16x8AllTrue: {
SIMD_ALL_TRUE(1)
break;
}
- case kS390_S1x16AllTrue: {
+ case kS390_V8x16AllTrue: {
SIMD_ALL_TRUE(0)
break;
}
@@ -4154,10 +4154,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
for (int i = 0, j = 0; i < 2; i++, j = +2) {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
- __ aih(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
+ __ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
#else
__ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
- __ aih(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
+ __ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
#endif
}
__ vlvgp(kScratchDoubleReg, ip, r0);
@@ -4185,6 +4185,119 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
break;
}
+ case kS390_I32x4BitMask: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(kScratchReg, Operand(0x204060));
+ __ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
+#else
+ __ lgfi(kScratchReg, Operand(0x80808080));
+ __ iihf(kScratchReg, Operand(0x60402000));
+#endif
+ __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
+ __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
+ Condition(0));
+ break;
+ }
+ case kS390_I16x8BitMask: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(kScratchReg, Operand(0x40506070));
+ __ iihf(kScratchReg, Operand(0x102030));
+#else
+ __ lgfi(kScratchReg, Operand(0x30201000));
+ __ iihf(kScratchReg, Operand(0x70605040));
+#endif
+ __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
+ __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
+ Condition(0));
+ break;
+ }
+ case kS390_I8x16BitMask: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(r0, Operand(0x60687078));
+ __ iihf(r0, Operand(0x40485058));
+ __ lgfi(ip, Operand(0x20283038));
+ __ iihf(ip, Operand(0x81018));
+#else
+ __ lgfi(ip, Operand(0x58504840));
+ __ iihf(ip, Operand(0x78706860));
+ __ lgfi(r0, Operand(0x18100800));
+ __ iihf(r0, Operand(0x38302820));
+#endif
+ __ vlvgp(kScratchDoubleReg, ip, r0);
+ __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 3),
+ Condition(1));
+ break;
+ }
+ case kS390_F32x4Pmin: {
+ __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F32x4Pmax: {
+ __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F64x2Pmin: {
+ __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Pmax: {
+ __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Ceil: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Floor: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Trunc: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2NearestInt: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F32x4Ceil: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_F32x4Floor: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_F32x4Trunc: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_F32x4NearestInt: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
+ Condition(0), Condition(2));
+ break;
+ }
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h b/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 6101b22166c..f588e854265 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -215,6 +215,12 @@ namespace compiler {
V(S390_F64x2ExtractLane) \
V(S390_F64x2Qfma) \
V(S390_F64x2Qfms) \
+ V(S390_F64x2Pmin) \
+ V(S390_F64x2Pmax) \
+ V(S390_F64x2Ceil) \
+ V(S390_F64x2Floor) \
+ V(S390_F64x2Trunc) \
+ V(S390_F64x2NearestInt) \
V(S390_F32x4Splat) \
V(S390_F32x4ExtractLane) \
V(S390_F32x4ReplaceLane) \
@@ -238,6 +244,12 @@ namespace compiler {
V(S390_F32x4Max) \
V(S390_F32x4Qfma) \
V(S390_F32x4Qfms) \
+ V(S390_F32x4Pmin) \
+ V(S390_F32x4Pmax) \
+ V(S390_F32x4Ceil) \
+ V(S390_F32x4Floor) \
+ V(S390_F32x4Trunc) \
+ V(S390_F32x4NearestInt) \
V(S390_I64x2Neg) \
V(S390_I64x2Add) \
V(S390_I64x2Sub) \
@@ -286,6 +298,7 @@ namespace compiler {
V(S390_I32x4UConvertI16x8Low) \
V(S390_I32x4UConvertI16x8High) \
V(S390_I32x4Abs) \
+ V(S390_I32x4BitMask) \
V(S390_I16x8Splat) \
V(S390_I16x8ExtractLaneU) \
V(S390_I16x8ExtractLaneS) \
@@ -320,6 +333,7 @@ namespace compiler {
V(S390_I16x8SubSaturateU) \
V(S390_I16x8RoundingAverageU) \
V(S390_I16x8Abs) \
+ V(S390_I16x8BitMask) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
@@ -349,16 +363,17 @@ namespace compiler {
V(S390_I8x16SubSaturateU) \
V(S390_I8x16RoundingAverageU) \
V(S390_I8x16Abs) \
+ V(S390_I8x16BitMask) \
V(S390_S8x16Shuffle) \
V(S390_S8x16Swizzle) \
- V(S390_S1x2AnyTrue) \
- V(S390_S1x4AnyTrue) \
- V(S390_S1x8AnyTrue) \
- V(S390_S1x16AnyTrue) \
- V(S390_S1x2AllTrue) \
- V(S390_S1x4AllTrue) \
- V(S390_S1x8AllTrue) \
- V(S390_S1x16AllTrue) \
+ V(S390_V64x2AnyTrue) \
+ V(S390_V32x4AnyTrue) \
+ V(S390_V16x8AnyTrue) \
+ V(S390_V8x16AnyTrue) \
+ V(S390_V64x2AllTrue) \
+ V(S390_V32x4AllTrue) \
+ V(S390_V16x8AllTrue) \
+ V(S390_V8x16AllTrue) \
V(S390_S128And) \
V(S390_S128Or) \
V(S390_S128Xor) \
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 502ce229f50..775590a863d 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -161,6 +161,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F64x2ExtractLane:
case kS390_F64x2Qfma:
case kS390_F64x2Qfms:
+ case kS390_F64x2Pmin:
+ case kS390_F64x2Pmax:
+ case kS390_F64x2Ceil:
+ case kS390_F64x2Floor:
+ case kS390_F64x2Trunc:
+ case kS390_F64x2NearestInt:
case kS390_F32x4Splat:
case kS390_F32x4ExtractLane:
case kS390_F32x4ReplaceLane:
@@ -184,6 +190,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F32x4Max:
case kS390_F32x4Qfma:
case kS390_F32x4Qfms:
+ case kS390_F32x4Pmin:
+ case kS390_F32x4Pmax:
+ case kS390_F32x4Ceil:
+ case kS390_F32x4Floor:
+ case kS390_F32x4Trunc:
+ case kS390_F32x4NearestInt:
case kS390_I64x2Neg:
case kS390_I64x2Add:
case kS390_I64x2Sub:
@@ -232,6 +244,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4UConvertI16x8Low:
case kS390_I32x4UConvertI16x8High:
case kS390_I32x4Abs:
+ case kS390_I32x4BitMask:
case kS390_I16x8Splat:
case kS390_I16x8ExtractLaneU:
case kS390_I16x8ExtractLaneS:
@@ -266,6 +279,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8SubSaturateU:
case kS390_I16x8RoundingAverageU:
case kS390_I16x8Abs:
+ case kS390_I16x8BitMask:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
@@ -295,16 +309,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16SubSaturateU:
case kS390_I8x16RoundingAverageU:
case kS390_I8x16Abs:
+ case kS390_I8x16BitMask:
case kS390_S8x16Shuffle:
case kS390_S8x16Swizzle:
- case kS390_S1x2AnyTrue:
- case kS390_S1x4AnyTrue:
- case kS390_S1x8AnyTrue:
- case kS390_S1x16AnyTrue:
- case kS390_S1x2AllTrue:
- case kS390_S1x4AllTrue:
- case kS390_S1x8AllTrue:
- case kS390_S1x16AllTrue:
+ case kS390_V64x2AnyTrue:
+ case kS390_V32x4AnyTrue:
+ case kS390_V16x8AnyTrue:
+ case kS390_V8x16AnyTrue:
+ case kS390_V64x2AllTrue:
+ case kS390_V32x4AllTrue:
+ case kS390_V16x8AllTrue:
+ case kS390_V8x16AllTrue:
case kS390_S128And:
case kS390_S128Or:
case kS390_S128Xor:
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 515e8dd127b..39089f346ed 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -2635,11 +2635,19 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(F64x2Abs) \
V(F64x2Neg) \
V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Sqrt) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
V(I64x2Neg) \
V(I16x8Abs) \
V(I32x4Neg) \
@@ -2672,14 +2680,14 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(S1x2AnyTrue) \
- V(S1x4AnyTrue) \
- V(S1x8AnyTrue) \
- V(S1x16AnyTrue) \
- V(S1x2AllTrue) \
- V(S1x4AllTrue) \
- V(S1x8AllTrue) \
- V(S1x16AllTrue)
+ V(V64x2AnyTrue) \
+ V(V32x4AnyTrue) \
+ V(V16x8AnyTrue) \
+ V(V8x16AnyTrue) \
+ V(V64x2AllTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AllTrue)
#define SIMD_CONVERSION_LIST(V) \
V(I32x4SConvertF32x4) \
@@ -2794,6 +2802,29 @@ SIMD_VISIT_QFMOP(F64x2Qfms)
SIMD_VISIT_QFMOP(F32x4Qfma)
SIMD_VISIT_QFMOP(F32x4Qfms)
#undef SIMD_VISIT_QFMOP
+
+#define SIMD_VISIT_BITMASK(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ S390OperandGenerator g(this); \
+ Emit(kS390_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0))); \
+ }
+SIMD_VISIT_BITMASK(I8x16BitMask)
+SIMD_VISIT_BITMASK(I16x8BitMask)
+SIMD_VISIT_BITMASK(I32x4BitMask)
+#undef SIMD_VISIT_BITMASK
+
+#define SIMD_VISIT_PMIN_MAX(Type) \
+ void InstructionSelector::Visit##Type(Node* node) { \
+ S390OperandGenerator g(this); \
+ Emit(kS390_##Type, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ }
+SIMD_VISIT_PMIN_MAX(F64x2Pmin)
+SIMD_VISIT_PMIN_MAX(F32x4Pmin)
+SIMD_VISIT_PMIN_MAX(F64x2Pmax)
+SIMD_VISIT_PMIN_MAX(F32x4Pmax)
+#undef SIMD_VISIT_PMIN_MAX
#undef SIMD_TYPES
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
diff --git a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
index 4f99ad49ba8..110a478c543 100644
--- a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -194,6 +194,94 @@ class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
XMMRegister const result_;
};
+class OutOfLineF32x4Min final : public OutOfLineCode {
+ public:
+ OutOfLineF32x4Min(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // propagate -0's and NaNs (possibly non-canonical) from the error.
+ __ Orps(error_, result_);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmpps(result_, error_, int8_t{3});
+ __ Orps(error_, result_);
+ __ Psrld(result_, byte{10});
+ __ Andnps(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
+class OutOfLineF64x2Min final : public OutOfLineCode {
+ public:
+ OutOfLineF64x2Min(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // propagate -0's and NaNs (possibly non-canonical) from the error.
+ __ Orpd(error_, result_);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmppd(result_, error_, int8_t{3});
+ __ Orpd(error_, result_);
+ __ Psrlq(result_, 13);
+ __ Andnpd(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
+class OutOfLineF32x4Max final : public OutOfLineCode {
+ public:
+ OutOfLineF32x4Max(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // Propagate NaNs (possibly non-canonical).
+ __ Orps(result_, error_);
+ // Propagate sign errors and (subtle) quiet NaNs.
+ __ Subps(result_, error_);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmpps(error_, result_, int8_t{3});
+ __ Psrld(error_, byte{10});
+ __ Andnps(error_, result_);
+ __ Movaps(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
+class OutOfLineF64x2Max final : public OutOfLineCode {
+ public:
+ OutOfLineF64x2Max(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // Propagate NaNs (possibly non-canonical).
+ __ Orpd(result_, error_);
+ // Propagate sign errors and (subtle) quiet NaNs.
+ __ Subpd(result_, error_);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmppd(error_, result_, int8_t{3});
+ __ Psrlq(error_, byte{13});
+ __ Andnpd(error_, result_);
+ __ Movapd(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
@@ -2328,18 +2416,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(1),
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the resuls, and adjust.
+ // The minpd instruction doesn't propagate NaNs and -0's in its first
+ // operand. Perform minpd in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movapd(kScratchDoubleReg, src1);
__ Minpd(kScratchDoubleReg, dst);
__ Minpd(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orpd(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
- __ Orpd(kScratchDoubleReg, dst);
- __ Psrlq(dst, 13);
- __ Andnpd(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorpd(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF64x2Min(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F64x2Max: {
@@ -2347,20 +2435,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
// The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+ // operand. Perform maxpd in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movapd(kScratchDoubleReg, src1);
__ Maxpd(kScratchDoubleReg, dst);
__ Maxpd(dst, src1);
- // Find discrepancies.
- __ Xorpd(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orpd(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subpd(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
- __ Psrlq(dst, 13);
- __ Andnpd(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorpd(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF64x2Max(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F64x2Eq: {
@@ -2524,18 +2609,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(1),
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
+ // The minps instruction doesn't propagate NaNs and -0's in its first
+ // operand. Perform minps in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movaps(kScratchDoubleReg, src1);
__ Minps(kScratchDoubleReg, dst);
__ Minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
- __ Orps(kScratchDoubleReg, dst);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorps(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF32x4Min(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F32x4Max: {
@@ -2543,20 +2628,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
// The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
+ // operand. Perform maxps in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movaps(kScratchDoubleReg, src1);
__ Maxps(kScratchDoubleReg, dst);
__ Maxps(dst, src1);
- // Find discrepancies.
- __ Xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorps(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF32x4Max(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F32x4Eq: {
@@ -2619,6 +2701,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, i.InputSimd128Register(1));
break;
}
+ case kX64F32x4Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundps(i.OutputSimd128Register(), i.InputSimd128Register(0), mode);
+ break;
+ }
+ case kX64F64x2Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundpd(i.OutputSimd128Register(), i.InputSimd128Register(0), mode);
+ break;
+ }
case kX64F64x2Pmin: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
@@ -3093,6 +3187,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kX64I32x4DotI16x8S: {
+ __ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Xorps(dst, dst);
@@ -3926,10 +4024,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Por(dst, kScratchDoubleReg);
break;
}
- case kX64S1x2AnyTrue:
- case kX64S1x4AnyTrue:
- case kX64S1x8AnyTrue:
- case kX64S1x16AnyTrue: {
+ case kX64V64x2AnyTrue:
+ case kX64V32x4AnyTrue:
+ case kX64V16x8AnyTrue:
+ case kX64V8x16AnyTrue: {
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
@@ -3942,19 +4040,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kX64S1x2AllTrue: {
+ case kX64V64x2AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
break;
}
- case kX64S1x4AllTrue: {
+ case kX64V32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
}
- case kX64S1x8AllTrue: {
+ case kX64V16x8AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqw);
break;
}
- case kX64S1x16AllTrue: {
+ case kX64V8x16AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 745f5c6cb25..ed7d2060f59 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -174,6 +174,7 @@ namespace compiler {
V(X64F64x2Qfms) \
V(X64F64x2Pmin) \
V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -199,6 +200,7 @@ namespace compiler {
V(X64F32x4Qfms) \
V(X64F32x4Pmin) \
V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2ReplaceLane) \
@@ -248,6 +250,7 @@ namespace compiler {
V(X64I32x4GeU) \
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneU) \
V(X64I16x8ExtractLaneS) \
@@ -357,14 +360,14 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
- V(X64S1x2AnyTrue) \
- V(X64S1x2AllTrue) \
- V(X64S1x4AnyTrue) \
- V(X64S1x4AllTrue) \
- V(X64S1x8AnyTrue) \
- V(X64S1x8AllTrue) \
- V(X64S1x16AnyTrue) \
- V(X64S1x16AllTrue) \
+ V(X64V64x2AnyTrue) \
+ V(X64V64x2AllTrue) \
+ V(X64V32x4AnyTrue) \
+ V(X64V32x4AllTrue) \
+ V(X64V16x8AnyTrue) \
+ V(X64V16x8AllTrue) \
+ V(X64V8x16AnyTrue) \
+ V(X64V8x16AllTrue) \
V(X64Word64AtomicLoadUint8) \
V(X64Word64AtomicLoadUint16) \
V(X64Word64AtomicLoadUint32) \
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index d2c1d14855c..395c4a4e9c7 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -146,6 +146,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Qfms:
case kX64F64x2Pmin:
case kX64F64x2Pmax:
+ case kX64F64x2Round:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -171,6 +172,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Qfms:
case kX64F32x4Pmin:
case kX64F32x4Pmax:
+ case kX64F32x4Round:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
case kX64I64x2ReplaceLane:
@@ -220,6 +222,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4GeU:
case kX64I32x4Abs:
case kX64I32x4BitMask:
+ case kX64I32x4DotI16x8S:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneU:
case kX64I16x8ExtractLaneS:
@@ -292,12 +295,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Select:
case kX64S128Zero:
case kX64S128AndNot:
- case kX64S1x2AnyTrue:
- case kX64S1x2AllTrue:
- case kX64S1x4AnyTrue:
- case kX64S1x4AllTrue:
- case kX64S1x8AnyTrue:
- case kX64S1x8AllTrue:
+ case kX64V64x2AnyTrue:
+ case kX64V64x2AllTrue:
+ case kX64V32x4AnyTrue:
+ case kX64V32x4AllTrue:
+ case kX64V16x8AnyTrue:
+ case kX64V16x8AllTrue:
case kX64S8x16Swizzle:
case kX64S8x16Shuffle:
case kX64S32x4Swizzle:
@@ -325,8 +328,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x8Reverse:
case kX64S8x4Reverse:
case kX64S8x2Reverse:
- case kX64S1x16AnyTrue:
- case kX64S1x16AllTrue:
+ case kX64V8x16AnyTrue:
+ case kX64V8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index dd3f556937d..ab669864954 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -1461,7 +1461,16 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
V(Float32RoundTiesEven, \
kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+ V(Float64RoundTiesEven, \
+ kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kX64F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kX64F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kX64F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kX64F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kX64F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kX64F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kX64F64x2Round | MiscField::encode(kRoundToZero)) \
+ V(F64x2NearestInt, kX64F64x2Round | MiscField::encode(kRoundToNearest))
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1898,16 +1907,33 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
X64OperandGenerator g(selector);
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
- CompressedHeapObjectBinopMatcher m(node);
- if (m.right().HasValue() &&
- roots_table.IsRootHandle(m.right().Value(), &root_index)) {
+ Node* left = nullptr;
+ Handle<HeapObject> right;
+ // HeapConstants and CompressedHeapConstants can be treated the same when
+ // using them as an input to a 32-bit comparison. Check whether either is
+ // present.
+ {
+ CompressedHeapObjectBinopMatcher m(node);
+ if (m.right().HasValue()) {
+ left = m.left().node();
+ right = m.right().Value();
+ } else {
+ HeapObjectBinopMatcher m2(node);
+ if (m2.right().HasValue()) {
+ left = m2.left().node();
+ right = m2.right().Value();
+ }
+ }
+ }
+ if (!right.is_null() && roots_table.IsRootHandle(right, &root_index)) {
+ DCHECK_NE(left, nullptr);
InstructionCode opcode =
kX64Cmp32 | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
- g.UseRegister(m.left().node()), cont);
+ g.UseRegister(left), cont);
}
}
VisitWordCompare(selector, node, kX64Cmp32, cont);
@@ -2674,6 +2700,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MinU) \
V(I32x4MaxU) \
V(I32x4GeU) \
+ V(I32x4DotI16x8S) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
@@ -2766,16 +2793,16 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
- V(S1x2AnyTrue) \
- V(S1x4AnyTrue) \
- V(S1x8AnyTrue) \
- V(S1x16AnyTrue)
+ V(V64x2AnyTrue) \
+ V(V32x4AnyTrue) \
+ V(V16x8AnyTrue) \
+ V(V8x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
- V(S1x2AllTrue) \
- V(S1x4AllTrue) \
- V(S1x8AllTrue) \
- V(S1x16AllTrue)
+ V(V64x2AllTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AllTrue)
void InstructionSelector::VisitS128Zero(Node* node) {
X64OperandGenerator g(this);
diff --git a/chromium/v8/src/compiler/basic-block-instrumentor.cc b/chromium/v8/src/compiler/basic-block-instrumentor.cc
index c2548b77267..ca6a60b7827 100644
--- a/chromium/v8/src/compiler/basic-block-instrumentor.cc
+++ b/chromium/v8/src/compiler/basic-block-instrumentor.cc
@@ -37,16 +37,21 @@ static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
return i;
}
+static const Operator* IntPtrConstant(CommonOperatorBuilder* common,
+ intptr_t value) {
+ return kSystemPointerSize == 8
+ ? common->Int64Constant(value)
+ : common->Int32Constant(static_cast<int32_t>(value));
+}
// TODO(dcarney): need to mark code as non-serializable.
static const Operator* PointerConstant(CommonOperatorBuilder* common,
- intptr_t ptr) {
- return kSystemPointerSize == 8
- ? common->Int64Constant(ptr)
- : common->Int32Constant(static_cast<int32_t>(ptr));
+ const void* ptr) {
+ intptr_t ptr_as_int = reinterpret_cast<intptr_t>(ptr);
+ return IntPtrConstant(common, ptr_as_int);
}
-BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
+BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
Isolate* isolate) {
// Basic block profiling disables concurrent compilation, so handle deref is
@@ -54,41 +59,68 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
AllowHandleDereference allow_handle_dereference;
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
- size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
- BasicBlockProfiler::Data* data = BasicBlockProfiler::Get()->NewData(n_blocks);
+ size_t n_blocks = schedule->RpoBlockCount() - 1;
+ BasicBlockProfilerData* data = BasicBlockProfiler::Get()->NewData(n_blocks);
// Set the function name.
data->SetFunctionName(info->GetDebugName());
// Capture the schedule string before instrumentation.
- {
+ if (FLAG_turbo_profiling_verbose) {
std::ostringstream os;
os << *schedule;
- data->SetSchedule(&os);
+ data->SetSchedule(os);
}
+ // Check whether we should write counts to a JS heap object or to the
+ // BasicBlockProfilerData directly. The JS heap object is only used for
+ // builtins.
+ bool on_heap_counters = isolate && isolate->IsGeneratingEmbeddedBuiltins();
// Add the increment instructions to the start of every block.
CommonOperatorBuilder common(graph->zone());
- Node* zero = graph->NewNode(common.Int32Constant(0));
- Node* one = graph->NewNode(common.Int32Constant(1));
MachineOperatorBuilder machine(graph->zone());
+ Node* counters_array = nullptr;
+ if (on_heap_counters) {
+ // Allocation is disallowed here, so rather than referring to an actual
+ // counters array, create a reference to a special marker object. This
+ // object will get fixed up later in the constants table (see
+ // PatchBasicBlockCountersReference). An important and subtle point: we
+ // cannot use the root handle basic_block_counters_marker_handle() and must
+ // create a new separate handle. Otherwise
+ // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a
+ // root-relative load rather than putting this value in the constants table
+ // where we expect it to be for patching.
+ counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(
+ ReadOnlyRoots(isolate).basic_block_counters_marker(), isolate)));
+ } else {
+ counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
+ }
+ Node* one = graph->NewNode(common.Int32Constant(1));
BasicBlockVector* blocks = schedule->rpo_order();
size_t block_number = 0;
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
++it, ++block_number) {
BasicBlock* block = (*it);
data->SetBlockRpoNumber(block_number, block->rpo_number());
- // TODO(dcarney): wire effect and control deps for load and store.
+ // It is unnecessary to wire effect and control deps for load and store
+ // since this happens after scheduling.
// Construct increment operation.
- Node* base = graph->NewNode(
- PointerConstant(&common, data->GetCounterAddress(block_number)));
- Node* load = graph->NewNode(machine.Load(MachineType::Uint32()), base, zero,
- graph->start(), graph->start());
+ int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
+ if (on_heap_counters) {
+ offset_to_counter_value += ByteArray::kHeaderSize - kHeapObjectTag;
+ }
+ Node* offset_to_counter =
+ graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
+ Node* load =
+ graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
+ offset_to_counter, graph->start(), graph->start());
Node* inc = graph->NewNode(machine.Int32Add(), load, one);
- Node* store =
- graph->NewNode(machine.Store(StoreRepresentation(
- MachineRepresentation::kWord32, kNoWriteBarrier)),
- base, zero, inc, graph->start(), graph->start());
+ Node* store = graph->NewNode(
+ machine.Store(StoreRepresentation(MachineRepresentation::kWord32,
+ kNoWriteBarrier)),
+ counters_array, offset_to_counter, inc, graph->start(), graph->start());
// Insert the new nodes.
static const int kArraySize = 6;
- Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
+ Node* to_insert[kArraySize] = {counters_array, one, offset_to_counter,
+ load, inc, store};
+ // The first two Nodes are constant across all blocks.
int insertion_start = block_number == 0 ? 0 : 2;
NodeVector::iterator insertion_point = FindInsertionPoint(block);
block->InsertNodes(insertion_point, &to_insert[insertion_start],
diff --git a/chromium/v8/src/compiler/basic-block-instrumentor.h b/chromium/v8/src/compiler/basic-block-instrumentor.h
index c8bc94c16bf..e63a2cac5d9 100644
--- a/chromium/v8/src/compiler/basic-block-instrumentor.h
+++ b/chromium/v8/src/compiler/basic-block-instrumentor.h
@@ -20,9 +20,9 @@ class Schedule;
class BasicBlockInstrumentor : public AllStatic {
public:
- static BasicBlockProfiler::Data* Instrument(OptimizedCompilationInfo* info,
- Graph* graph, Schedule* schedule,
- Isolate* isolate);
+ static BasicBlockProfilerData* Instrument(OptimizedCompilationInfo* info,
+ Graph* graph, Schedule* schedule,
+ Isolate* isolate);
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.cc b/chromium/v8/src/compiler/bytecode-graph-builder.cc
index b59b5a1b844..93aaca2512e 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.cc
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.cc
@@ -63,12 +63,30 @@ class BytecodeGraphBuilder {
// Get or create the node that represents the outer function closure.
Node* GetFunctionClosure();
+ bool native_context_independent() const {
+ return native_context_independent_;
+ }
+
+ // The node representing the current feedback vector is generated once prior
+ // to visiting bytecodes, and is later passed as input to other nodes that
+ // may need it.
+ // TODO(jgruber): Remove feedback_vector() and rename feedback_vector_node()
+ // to feedback_vector() once all uses of the direct heap object reference
+ // have been replaced with a Node* reference.
+ void CreateFeedbackVectorNode();
+ Node* BuildLoadFeedbackVector();
+ Node* feedback_vector_node() const {
+ DCHECK_NOT_NULL(feedback_vector_node_);
+ return feedback_vector_node_;
+ }
+
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
// Helper function for creating a feedback source containing type feedback
// vector and a feedback slot.
FeedbackSource CreateFeedbackSource(int slot_id);
+ FeedbackSource CreateFeedbackSource(FeedbackSlot slot);
void set_environment(Environment* env) { environment_ = env; }
const Environment* environment() const { return environment_; }
@@ -191,6 +209,7 @@ class BytecodeGraphBuilder {
void BuildUnaryOp(const Operator* op);
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
+ void BuildInstanceOf(const Operator* op);
void BuildCompareOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
@@ -243,14 +262,6 @@ class BytecodeGraphBuilder {
Environment* CheckContextExtensionAtDepth(Environment* slow_environment,
uint32_t depth);
- // Helper function to create binary operation hint from the recorded
- // type feedback.
- BinaryOperationHint GetBinaryOperationHint(int operand_index);
-
- // Helper function to create compare operation hint from the recorded
- // type feedback.
- CompareOperationHint GetCompareOperationHint();
-
// Helper function to create for-in mode from the recorded type feedback.
ForInMode GetForInMode(int operand_index);
@@ -423,6 +434,9 @@ class BytecodeGraphBuilder {
int input_buffer_size_;
Node** input_buffer_;
+ const bool native_context_independent_;
+ Node* feedback_vector_node_;
+
// Optimization to only create checkpoints when the current position in the
// control-flow is not effect-dominated by another checkpoint already. All
// operations that do not have observable side-effects can be re-evaluated.
@@ -443,10 +457,11 @@ class BytecodeGraphBuilder {
TickCounter* const tick_counter_;
- static int const kBinaryOperationHintIndex = 1;
- static int const kCountOperationHintIndex = 0;
- static int const kBinaryOperationSmiHintIndex = 1;
- static int const kUnaryOperationHintIndex = 0;
+ static constexpr int kBinaryOperationHintIndex = 1;
+ static constexpr int kBinaryOperationSmiHintIndex = 1;
+ static constexpr int kCompareOperationHintIndex = 1;
+ static constexpr int kCountOperationHintIndex = 0;
+ static constexpr int kUnaryOperationHintIndex = 0;
DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
@@ -984,6 +999,9 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
+ native_context_independent_(
+ flags & BytecodeGraphBuilderFlag::kNativeContextIndependent),
+ feedback_vector_node_(nullptr),
needs_eager_checkpoint_(true),
exit_controls_(local_zone),
state_values_cache_(jsgraph),
@@ -1014,6 +1032,36 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
return function_closure_.get();
}
+void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
+ DCHECK_NULL(feedback_vector_node_);
+ feedback_vector_node_ = native_context_independent()
+ ? BuildLoadFeedbackVector()
+ : jsgraph()->Constant(feedback_vector());
+}
+
+Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
+ DCHECK(native_context_independent());
+ DCHECK_NULL(feedback_vector_node_);
+
+ // The feedback vector must exist and remain live while the generated code
+ // lives. Specifically that means it must be created when NCI code is
+ // installed, and must not be flushed.
+
+ Environment* env = environment();
+ Node* control = env->GetControlDependency();
+ Node* effect = env->GetEffectDependency();
+
+ Node* feedback_cell = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionFeedbackCell()),
+ GetFunctionClosure(), effect, control);
+ Node* vector = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
+ feedback_cell, effect, control);
+
+ env->UpdateEffectDependency(effect);
+ return vector;
+}
+
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
NodeProperties::ReplaceContextInput(result,
@@ -1022,7 +1070,10 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
}
FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(int slot_id) {
- FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
+ return CreateFeedbackSource(FeedbackVector::ToSlot(slot_id));
+}
+
+FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(FeedbackSlot slot) {
return FeedbackSource(feedback_vector(), slot);
}
@@ -1042,6 +1093,7 @@ void BytecodeGraphBuilder::CreateGraph() {
graph()->start());
set_environment(&env);
+ CreateFeedbackVectorNode();
VisitBytecodes();
// Finish the basic structure of the graph.
@@ -2179,8 +2231,7 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
}
void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
- Node* literal =
- NewNode(javascript()->CreateEmptyLiteralObject(), GetFunctionClosure());
+ Node* literal = NewNode(javascript()->CreateEmptyLiteralObject());
environment()->BindAccumulator(literal);
}
@@ -2210,10 +2261,7 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
Node* callee, Node* receiver, interpreter::Register first_arg,
int arg_count) {
- // The arity of the Call node -- includes the callee, receiver and function
- // arguments.
- int arity = 2 + arg_count;
-
+ int arity = kTargetAndReceiver + arg_count;
Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
all[0] = callee;
@@ -2222,7 +2270,7 @@ Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
// The function arguments are in consecutive registers.
int arg_base = first_arg.index();
for (int i = 0; i < arg_count; ++i) {
- all[2 + i] =
+ all[kTargetAndReceiver + i] =
environment()->LookupRegister(interpreter::Register(arg_base + i));
}
@@ -2247,7 +2295,8 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
Node* const* call_args = GetCallArgumentsFromRegisters(callee, receiver_node,
first_arg, arg_count);
- return ProcessCallArguments(call_op, call_args, 2 + arg_count);
+ return ProcessCallArguments(call_op, call_args,
+ kTargetAndReceiver + arg_count);
}
void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
@@ -2318,8 +2367,8 @@ void BytecodeGraphBuilder::BuildCallVarArgs(ConvertReceiverMode receiver_mode) {
: static_cast<int>(reg_count) - 1;
Node* const* call_args =
ProcessCallVarArgs(receiver_mode, callee, first_reg, arg_count);
- BuildCall(receiver_mode, call_args, static_cast<size_t>(2 + arg_count),
- slot_id);
+ BuildCall(receiver_mode, call_args,
+ static_cast<size_t>(kTargetAndReceiver + arg_count), slot_id);
}
void BytecodeGraphBuilder::VisitCallAnyReceiver() {
@@ -2341,9 +2390,7 @@ void BytecodeGraphBuilder::VisitCallNoFeedback() {
// The receiver is the first register, followed by the arguments in the
// consecutive registers.
int arg_count = static_cast<int>(reg_count) - 1;
- // The arity of the Call node -- includes the callee, receiver and function
- // arguments.
- int arity = 2 + arg_count;
+ int arity = kTargetAndReceiver + arg_count;
// Setting call frequency to a value less than min_inlining frequency to
// prevent inlining of one-shot call node.
@@ -2459,7 +2506,7 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = ProcessCallArguments(op, args, 2 + arg_count);
+ node = ProcessCallArguments(op, args, kTargetAndReceiver + arg_count);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2472,10 +2519,11 @@ void BytecodeGraphBuilder::VisitCallJSRuntime() {
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
int arg_count = static_cast<int>(reg_count);
- const Operator* call = javascript()->Call(2 + arg_count);
+ const Operator* call = javascript()->Call(kTargetAndReceiver + arg_count);
Node* const* call_args = ProcessCallVarArgs(
ConvertReceiverMode::kNullOrUndefined, callee, first_reg, arg_count);
- Node* value = ProcessCallArguments(call, call_args, 2 + arg_count);
+ Node* value =
+ ProcessCallArguments(call, call_args, kTargetAndReceiver + arg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -2532,8 +2580,7 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Node* const* BytecodeGraphBuilder::GetConstructArgumentsFromRegister(
Node* target, Node* new_target, interpreter::Register first_arg,
int arg_count) {
- // arity is args + callee and new target.
- int arity = arg_count + 2;
+ int arity = kTargetAndNewTarget + arg_count;
Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
all[0] = target;
int first_arg_index = first_arg.index();
@@ -2563,9 +2610,10 @@ void BytecodeGraphBuilder::VisitConstruct() {
Node* callee = environment()->LookupRegister(callee_reg);
CallFrequency frequency = ComputeCallFrequency(slot_id);
- const Operator* op = javascript()->Construct(
- static_cast<uint32_t>(reg_count + 2), frequency, feedback);
- int arg_count = static_cast<int>(reg_count);
+ const uint32_t arg_count = static_cast<uint32_t>(reg_count);
+ const uint32_t arg_count_with_extra_args = kTargetAndNewTarget + arg_count;
+ const Operator* op =
+ javascript()->Construct(arg_count_with_extra_args, frequency, feedback);
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
@@ -2577,7 +2625,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = ProcessConstructArguments(op, args, 2 + arg_count);
+ node = ProcessConstructArguments(op, args, arg_count_with_extra_args);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2594,9 +2642,10 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
Node* callee = environment()->LookupRegister(callee_reg);
CallFrequency frequency = ComputeCallFrequency(slot_id);
+ const uint32_t arg_count = static_cast<uint32_t>(reg_count);
+ const uint32_t arg_count_with_extra_args = kTargetAndNewTarget + arg_count;
const Operator* op = javascript()->ConstructWithSpread(
- static_cast<uint32_t>(reg_count + 2), frequency, feedback);
- int arg_count = static_cast<int>(reg_count);
+ arg_count_with_extra_args, frequency, feedback);
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
@@ -2608,7 +2657,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = ProcessConstructArguments(op, args, 2 + arg_count);
+ node = ProcessConstructArguments(op, args, arg_count_with_extra_args);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2711,6 +2760,7 @@ void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
}
void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
+ DCHECK(JSOperator::IsUnaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
Node* operand = environment()->LookupAccumulator();
@@ -2725,13 +2775,14 @@ void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = NewNode(op, operand);
+ node = NewNode(op, operand, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -2748,29 +2799,12 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = NewNode(op, left, right);
+ node = NewNode(op, left, right, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-// Helper function to create binary operation hint from the recorded type
-// feedback.
-BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
- int operand_index) {
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackSource source(feedback_vector(), slot);
- return broker()->GetFeedbackForBinaryOperation(source);
-}
-
-// Helper function to create compare operation hint from the recorded type
-// feedback.
-CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackSource source(feedback_vector(), slot);
- return broker()->GetFeedbackForCompareOperation(source);
-}
-
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
@@ -2810,69 +2844,103 @@ SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
}
void BytecodeGraphBuilder::VisitBitwiseNot() {
- BuildUnaryOp(javascript()->BitwiseNot());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->BitwiseNot(feedback));
}
void BytecodeGraphBuilder::VisitDec() {
- BuildUnaryOp(javascript()->Decrement());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->Decrement(feedback));
}
void BytecodeGraphBuilder::VisitInc() {
- BuildUnaryOp(javascript()->Increment());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->Increment(feedback));
}
void BytecodeGraphBuilder::VisitNegate() {
- BuildUnaryOp(javascript()->Negate());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->Negate(feedback));
}
void BytecodeGraphBuilder::VisitAdd() {
- BuildBinaryOp(
- javascript()->Add(GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Add(feedback));
}
void BytecodeGraphBuilder::VisitSub() {
- BuildBinaryOp(javascript()->Subtract());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Subtract(feedback));
}
void BytecodeGraphBuilder::VisitMul() {
- BuildBinaryOp(javascript()->Multiply());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Multiply(feedback));
}
-void BytecodeGraphBuilder::VisitDiv() { BuildBinaryOp(javascript()->Divide()); }
+void BytecodeGraphBuilder::VisitDiv() {
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Divide(feedback));
+}
void BytecodeGraphBuilder::VisitMod() {
- BuildBinaryOp(javascript()->Modulus());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Modulus(feedback));
}
void BytecodeGraphBuilder::VisitExp() {
- BuildBinaryOp(javascript()->Exponentiate());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Exponentiate(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseOr() {
- BuildBinaryOp(javascript()->BitwiseOr());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->BitwiseOr(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseXor() {
- BuildBinaryOp(javascript()->BitwiseXor());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->BitwiseXor(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseAnd() {
- BuildBinaryOp(javascript()->BitwiseAnd());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->BitwiseAnd(feedback));
}
void BytecodeGraphBuilder::VisitShiftLeft() {
- BuildBinaryOp(javascript()->ShiftLeft());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->ShiftLeft(feedback));
}
void BytecodeGraphBuilder::VisitShiftRight() {
- BuildBinaryOp(javascript()->ShiftRight());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->ShiftRight(feedback));
}
void BytecodeGraphBuilder::VisitShiftRightLogical() {
- BuildBinaryOp(javascript()->ShiftRightLogical());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->ShiftRightLogical(feedback));
}
void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
Node* left = environment()->LookupAccumulator();
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
@@ -2888,58 +2956,81 @@ void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = NewNode(op, left, right);
+ node = NewNode(op, left, right, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitAddSmi() {
- BuildBinaryOpWithImmediate(
- javascript()->Add(GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Add(feedback));
}
void BytecodeGraphBuilder::VisitSubSmi() {
- BuildBinaryOpWithImmediate(javascript()->Subtract());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Subtract(feedback));
}
void BytecodeGraphBuilder::VisitMulSmi() {
- BuildBinaryOpWithImmediate(javascript()->Multiply());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Multiply(feedback));
}
void BytecodeGraphBuilder::VisitDivSmi() {
- BuildBinaryOpWithImmediate(javascript()->Divide());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Divide(feedback));
}
void BytecodeGraphBuilder::VisitModSmi() {
- BuildBinaryOpWithImmediate(javascript()->Modulus());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Modulus(feedback));
}
void BytecodeGraphBuilder::VisitExpSmi() {
- BuildBinaryOpWithImmediate(javascript()->Exponentiate());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Exponentiate(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseOr(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseXorSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseXor());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseXor(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseAnd());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(feedback));
}
void BytecodeGraphBuilder::VisitShiftLeftSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftLeft());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->ShiftLeft(feedback));
}
void BytecodeGraphBuilder::VisitShiftRightSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftRight());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->ShiftRight(feedback));
}
void BytecodeGraphBuilder::VisitShiftRightLogicalSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftRightLogical());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->ShiftRightLogical(feedback));
}
void BytecodeGraphBuilder::VisitLogicalNot() {
@@ -2986,7 +3077,9 @@ void BytecodeGraphBuilder::VisitGetSuperConstructor() {
Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
+void BytecodeGraphBuilder::BuildInstanceOf(const Operator* op) {
+ // TODO(jgruber, v8:8888): Treat InstanceOf like other compare ops.
+ DCHECK_EQ(op->opcode(), IrOpcode::kJSInstanceOf);
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -3007,28 +3100,62 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(op->opcode()));
+ PrepareEagerCheckpoint();
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(op, left, right, feedback_vector_node());
+ }
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitTestEqual() {
- BuildCompareOp(javascript()->Equal(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->Equal(feedback));
}
void BytecodeGraphBuilder::VisitTestEqualStrict() {
- BuildCompareOp(javascript()->StrictEqual(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->StrictEqual(feedback));
}
void BytecodeGraphBuilder::VisitTestLessThan() {
- BuildCompareOp(javascript()->LessThan(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->LessThan(feedback));
}
void BytecodeGraphBuilder::VisitTestGreaterThan() {
- BuildCompareOp(javascript()->GreaterThan(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->GreaterThan(feedback));
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
- BuildCompareOp(javascript()->LessThanOrEqual(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->LessThanOrEqual(feedback));
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
- BuildCompareOp(javascript()->GreaterThanOrEqual(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->GreaterThanOrEqual(feedback));
}
void BytecodeGraphBuilder::VisitTestReferenceEqual() {
@@ -3052,7 +3179,7 @@ void BytecodeGraphBuilder::VisitTestIn() {
void BytecodeGraphBuilder::VisitTestInstanceOf() {
int const slot_index = bytecode_iterator().GetIndexOperand(1);
- BuildCompareOp(javascript()->InstanceOf(CreateFeedbackSource(slot_index)));
+ BuildInstanceOf(javascript()->InstanceOf(CreateFeedbackSource(slot_index)));
}
void BytecodeGraphBuilder::VisitTestUndetectable() {
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.h b/chromium/v8/src/compiler/bytecode-graph-builder.h
index 03e900c214e..1667a4d57d2 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.h
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.h
@@ -33,6 +33,7 @@ enum class BytecodeGraphBuilderFlag : uint8_t {
// bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2,
+ kNativeContextIndependent = 1 << 3,
};
using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
diff --git a/chromium/v8/src/compiler/code-assembler.cc b/chromium/v8/src/compiler/code-assembler.cc
index 035d64144f6..44177c16b5b 100644
--- a/chromium/v8/src/compiler/code-assembler.cc
+++ b/chromium/v8/src/compiler/code-assembler.cc
@@ -1027,11 +1027,7 @@ Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
inputs.Add(new_target);
}
inputs.Add(arity);
-#ifdef V8_REVERSE_JSARGS
- for (auto arg : base::Reversed(args)) inputs.Add(arg);
-#else
for (auto arg : args) inputs.Add(arg);
-#endif
if (descriptor.HasContextParameter()) {
inputs.Add(context);
}
@@ -1393,6 +1389,7 @@ void CodeAssemblerLabel::MergeVariables() {
}
// If the following asserts, then you've jumped to a label without a bound
// variable along that path that expects to merge its value into a phi.
+ // This can also occur if a label is bound that is never jumped to.
DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
count == merge_count_);
USE(count);
diff --git a/chromium/v8/src/compiler/code-assembler.h b/chromium/v8/src/compiler/code-assembler.h
index d9d81cfe30c..de15e05497d 100644
--- a/chromium/v8/src/compiler/code-assembler.h
+++ b/chromium/v8/src/compiler/code-assembler.h
@@ -73,10 +73,9 @@ class PromiseFulfillReactionJobTask;
class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
-class WasmDebugInfo;
class Zone;
#define MAKE_FORWARD_DECLARATION(Name) class Name;
-TORQUE_INTERNAL_CLASS_LIST(MAKE_FORWARD_DECLARATION)
+TORQUE_DEFINED_CLASS_LIST(MAKE_FORWARD_DECLARATION)
#undef MAKE_FORWARD_DECLARATION
template <typename T>
diff --git a/chromium/v8/src/compiler/effect-control-linearizer.cc b/chromium/v8/src/compiler/effect-control-linearizer.cc
index 20391eacce6..65bb2eaf053 100644
--- a/chromium/v8/src/compiler/effect-control-linearizer.cc
+++ b/chromium/v8/src/compiler/effect-control-linearizer.cc
@@ -2700,6 +2700,20 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
check_number, frame_state);
break;
}
+ case CheckTaggedInputMode::kNumberOrBoolean: {
+ auto check_done = __ MakeLabel();
+
+ __ GotoIf(check_number, &check_done);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrBoolean, feedback,
+ __ TaggedEqual(value_map, __ BooleanMapConstant()),
+ frame_state);
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ __ Goto(&check_done);
+
+ __ Bind(&check_done);
+ break;
+ }
case CheckTaggedInputMode::kNumberOrOddball: {
auto check_done = __ MakeLabel();
@@ -3756,7 +3770,7 @@ Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
Node* unreachable = __ Unreachable();
NodeProperties::ReplaceValueInput(node, unreachable, 0);
}
- return node;
+ return gasm()->AddNode(node);
}
Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
diff --git a/chromium/v8/src/compiler/globals.h b/chromium/v8/src/compiler/globals.h
index a8d8a47c59c..fe96783c23d 100644
--- a/chromium/v8/src/compiler/globals.h
+++ b/chromium/v8/src/compiler/globals.h
@@ -6,11 +6,24 @@
#define V8_COMPILER_GLOBALS_H_
#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
namespace compiler {
+// The nci flag is currently used to experiment with feedback collection in
+// optimized code produced by generic lowering.
+// Considerations:
+// - Should we increment the call count? https://crbug.com/v8/10524
+// - Is feedback already megamorphic in all these cases?
+//
+// TODO(jgruber): Remove once we've made a decision whether to collect feedback
+// unconditionally.
+inline bool CollectFeedbackInGenericLowering() {
+ return FLAG_turbo_collect_feedback_in_generic_lowering;
+}
+
enum class StackCheckKind {
kJSFunctionEntry = 0,
kJSIterationBody,
diff --git a/chromium/v8/src/compiler/graph-assembler.cc b/chromium/v8/src/compiler/graph-assembler.cc
index 6057f1ce649..c25930150ed 100644
--- a/chromium/v8/src/compiler/graph-assembler.cc
+++ b/chromium/v8/src/compiler/graph-assembler.cc
@@ -32,7 +32,6 @@ class GraphAssembler::BasicBlockUpdater {
void AddBranch(Node* branch, BasicBlock* tblock, BasicBlock* fblock);
void AddGoto(BasicBlock* to);
void AddGoto(BasicBlock* from, BasicBlock* to);
- void AddThrow(Node* node);
void StartBlock(BasicBlock* block);
BasicBlock* Finalize(BasicBlock* original);
@@ -268,92 +267,6 @@ void GraphAssembler::BasicBlockUpdater::AddGoto(BasicBlock* from,
current_block_ = nullptr;
}
-void GraphAssembler::BasicBlockUpdater::RemoveSuccessorsFromSchedule() {
- ZoneSet<BasicBlock*> blocks(temp_zone());
- ZoneQueue<BasicBlock*> worklist(temp_zone());
-
- for (SuccessorInfo succ : saved_successors_) {
- BasicBlock* block = succ.block;
- block->predecessors().erase(block->predecessors().begin() + succ.index);
- blocks.insert(block);
- worklist.push(block);
- }
- saved_successors_.clear();
-
- // Walk through blocks until we get to the end node, then remove the path from
- // end, clearing their successors / predecessors.
- // This works because the unreachable paths form self-contained control flow
- // that doesn't re-merge with reachable control flow (checked below) and
- // DeadCodeElimination::ReduceEffectPhi preventing Unreachable from going into
- // an effect-phi. We would need to extend this if we need the ability to mark
- // control flow as unreachable later in the pipeline.
- while (!worklist.empty()) {
- BasicBlock* current = worklist.front();
- worklist.pop();
-
- for (BasicBlock* successor : current->successors()) {
- // Remove the block from sucessors predecessors.
- ZoneVector<BasicBlock*>& predecessors = successor->predecessors();
- auto it = std::find(predecessors.begin(), predecessors.end(), current);
- DCHECK_EQ(*it, current);
- predecessors.erase(it);
-
- if (successor == schedule_->end()) {
- // If we have reached the end block, remove this block's control input
- // from the end node's control inputs.
- DCHECK_EQ(current->SuccessorCount(), 1);
- NodeProperties::RemoveControlFromEnd(graph_, common_,
- current->control_input());
- } else {
- // Otherwise, add successor to worklist if it's not already been seen.
- if (blocks.insert(successor).second) {
- worklist.push(successor);
- }
- }
- }
- current->ClearSuccessors();
- }
-
-#ifdef DEBUG
- // Ensure that the set of blocks being removed from the schedule are self
- // contained, i.e., all predecessors have been removed from these blocks.
- for (BasicBlock* block : blocks) {
- CHECK_EQ(block->PredecessorCount(), 0);
- CHECK_EQ(block->SuccessorCount(), 0);
- }
-#endif
-}
-
-void GraphAssembler::BasicBlockUpdater::AddThrow(Node* node) {
- if (state_ == kUnchanged) {
- CopyForChange();
- }
-
- // Clear original successors and replace the block's original control and
- // control input to the throw, since this block is now connected directly to
- // the end.
- if (original_control_input_ != nullptr) {
- NodeProperties::ReplaceUses(original_control_input_, node, nullptr, node);
- original_control_input_->Kill();
- }
- original_control_input_ = node;
- original_control_ = BasicBlock::kThrow;
-
- bool already_connected_to_end =
- saved_successors_.size() == 1 &&
- saved_successors_[0].block == schedule_->end();
- if (!already_connected_to_end) {
- // Remove all successor blocks from the schedule.
- RemoveSuccessorsFromSchedule();
-
- // Update current block's successor withend.
- DCHECK(saved_successors_.empty());
- size_t index = schedule_->end()->predecessors().size();
- schedule_->end()->AddPredecessor(current_block_);
- saved_successors_.push_back({schedule_->end(), index});
- }
-}
-
void GraphAssembler::BasicBlockUpdater::UpdateSuccessors(BasicBlock* block) {
for (SuccessorInfo succ : saved_successors_) {
(succ.block->predecessors())[succ.index] = block;
@@ -716,6 +629,11 @@ Node* GraphAssembler::Unreachable() {
graph()->NewNode(common()->Unreachable(), effect(), control()));
}
+TNode<RawPtrT> GraphAssembler::StackSlot(int size, int alignment) {
+ return AddNode<RawPtrT>(
+ graph()->NewNode(machine()->StackSlot(size, alignment)));
+}
+
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* value) {
return AddNode(graph()->NewNode(machine()->Store(rep), object, offset, value,
@@ -906,11 +824,15 @@ BasicBlock* GraphAssembler::FinalizeCurrentBlock(BasicBlock* block) {
void GraphAssembler::ConnectUnreachableToEnd() {
DCHECK_EQ(effect()->opcode(), IrOpcode::kUnreachable);
- Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- effect_ = control_ = mcgraph()->Dead();
- if (block_updater_) {
- block_updater_->AddThrow(throw_node);
+ // When maintaining the schedule we can't easily rewire the successor blocks
+ // to disconnect them from the graph, so we just leave the unreachable nodes
+ // in the schedule.
+ // TODO(9684): Add a scheduled dead-code elimination phase to remove all the
+ // subsiquent unreacahble code from the schedule.
+ if (!block_updater_) {
+ Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ effect_ = control_ = mcgraph()->Dead();
}
}
diff --git a/chromium/v8/src/compiler/graph-assembler.h b/chromium/v8/src/compiler/graph-assembler.h
index f57c732912b..9b0b5b42c11 100644
--- a/chromium/v8/src/compiler/graph-assembler.h
+++ b/chromium/v8/src/compiler/graph-assembler.h
@@ -133,16 +133,6 @@ class GraphAssembler;
// Wrapper classes for special node/edge types (effect, control, frame states)
// that otherwise don't fit into the type system.
-class NodeWrapper {
- public:
- explicit constexpr NodeWrapper(Node* node) : node_(node) {}
- operator Node*() const { return node_; }
- Node* operator->() const { return node_; }
-
- private:
- Node* node_;
-};
-
class Effect : public NodeWrapper {
public:
explicit constexpr Effect(Node* node) : NodeWrapper(node) {
@@ -313,6 +303,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* TypeGuard(Type type, Node* value);
Node* Checkpoint(FrameState frame_state);
+ TNode<RawPtrT> StackSlot(int size, int alignment);
+
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
Node* Store(StoreRepresentation rep, Node* object, int offset, Node* value);
Node* Load(MachineType type, Node* object, Node* offset);
diff --git a/chromium/v8/src/compiler/graph-visualizer.cc b/chromium/v8/src/compiler/graph-visualizer.cc
index 86e3da9d27a..4b327ca285e 100644
--- a/chromium/v8/src/compiler/graph-visualizer.cc
+++ b/chromium/v8/src/compiler/graph-visualizer.cc
@@ -964,6 +964,118 @@ void PrintScheduledGraph(std::ostream& os, const Schedule* schedule) {
} // namespace
+std::ostream& operator<<(std::ostream& os,
+ const LiveRangeAsJSON& live_range_json) {
+ const LiveRange& range = live_range_json.range_;
+ os << "{\"id\":" << range.relative_id() << ",\"type\":";
+ if (range.HasRegisterAssigned()) {
+ const InstructionOperand op = range.GetAssignedOperand();
+ os << "\"assigned\",\"op\":"
+ << InstructionOperandAsJSON{&op, &(live_range_json.code_)};
+ } else if (range.spilled() && !range.TopLevel()->HasNoSpillType()) {
+ const TopLevelLiveRange* top = range.TopLevel();
+ if (top->HasSpillOperand()) {
+ os << "\"assigned\",\"op\":"
+ << InstructionOperandAsJSON{top->GetSpillOperand(),
+ &(live_range_json.code_)};
+ } else {
+ int index = top->GetSpillRange()->assigned_slot();
+ os << "\"spilled\",\"op\":";
+ if (IsFloatingPoint(top->representation())) {
+ os << "\"fp_stack:" << index << "\"";
+ } else {
+ os << "\"stack:" << index << "\"";
+ }
+ }
+ } else {
+ os << "\"none\"";
+ }
+
+ os << ",\"intervals\":[";
+ bool first = true;
+ for (const UseInterval* interval = range.first_interval();
+ interval != nullptr; interval = interval->next()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << "[" << interval->start().value() << "," << interval->end().value()
+ << "]";
+ }
+
+ os << "],\"uses\":[";
+ first = true;
+ for (UsePosition* current_pos = range.first_pos(); current_pos != nullptr;
+ current_pos = current_pos->next()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << current_pos->pos().value();
+ }
+
+ os << "]}";
+ return os;
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ const TopLevelLiveRangeAsJSON& top_level_live_range_json) {
+ int vreg = top_level_live_range_json.range_.vreg();
+ bool first = true;
+ os << "\"" << (vreg > 0 ? vreg : -vreg) << "\":{ \"child_ranges\":[";
+ for (const LiveRange* child = &(top_level_live_range_json.range_);
+ child != nullptr; child = child->next()) {
+ if (!top_level_live_range_json.range_.IsEmpty()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << LiveRangeAsJSON{*child, top_level_live_range_json.code_};
+ }
+ }
+ os << "]";
+ if (top_level_live_range_json.range_.IsFixed()) {
+ os << ", \"is_deferred\": "
+ << (top_level_live_range_json.range_.IsDeferredFixed() ? "true"
+ : "false");
+ }
+ os << "}";
+ return os;
+}
+
+void PrintTopLevelLiveRanges(std::ostream& os,
+ const ZoneVector<TopLevelLiveRange*> ranges,
+ const InstructionSequence& code) {
+ bool first = true;
+ os << "{";
+ for (const TopLevelLiveRange* range : ranges) {
+ if (range != nullptr && !range->IsEmpty()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << TopLevelLiveRangeAsJSON{*range, code};
+ }
+ }
+ os << "}";
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const RegisterAllocationDataAsJSON& ac) {
+ os << "\"fixed_double_live_ranges\": ";
+ PrintTopLevelLiveRanges(os, ac.data_.fixed_double_live_ranges(), ac.code_);
+ os << ",\"fixed_live_ranges\": ";
+ PrintTopLevelLiveRanges(os, ac.data_.fixed_live_ranges(), ac.code_);
+ os << ",\"live_ranges\": ";
+ PrintTopLevelLiveRanges(os, ac.data_.live_ranges(), ac.code_);
+ return os;
+}
+
std::ostream& operator<<(std::ostream& os, const AsScheduledGraph& scheduled) {
PrintScheduledGraph(os, scheduled.schedule);
return os;
@@ -1121,8 +1233,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i_json) {
bool first = true;
for (MoveOperands* move : *pm) {
if (move->IsEliminated()) continue;
- if (!first) os << ",";
- first = false;
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
os << "[" << InstructionOperandAsJSON{&move->destination(), i_json.code_}
<< "," << InstructionOperandAsJSON{&move->source(), i_json.code_}
<< "]";
@@ -1228,7 +1343,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b) {
std::ostream& operator<<(std::ostream& os, const InstructionSequenceAsJSON& s) {
const InstructionSequence* code = s.sequence_;
- os << "\"blocks\": [";
+ os << "[";
bool need_comma = false;
for (int i = 0; i < code->InstructionBlockCount(); i++) {
diff --git a/chromium/v8/src/compiler/graph-visualizer.h b/chromium/v8/src/compiler/graph-visualizer.h
index 05f522b6bca..55859330157 100644
--- a/chromium/v8/src/compiler/graph-visualizer.h
+++ b/chromium/v8/src/compiler/graph-visualizer.h
@@ -22,6 +22,8 @@ class SourcePosition;
namespace compiler {
class Graph;
+class LiveRange;
+class TopLevelLiveRange;
class Instruction;
class InstructionBlock;
class InstructionOperand;
@@ -155,6 +157,30 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac);
+struct LiveRangeAsJSON {
+ const LiveRange& range_;
+ const InstructionSequence& code_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const LiveRangeAsJSON& live_range_json);
+
+struct TopLevelLiveRangeAsJSON {
+ const TopLevelLiveRange& range_;
+ const InstructionSequence& code_;
+};
+
+std::ostream& operator<<(
+ std::ostream& os, const TopLevelLiveRangeAsJSON& top_level_live_range_json);
+
+struct RegisterAllocationDataAsJSON {
+ const RegisterAllocationData& data_;
+ const InstructionSequence& code_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const RegisterAllocationDataAsJSON& ac);
+
struct InstructionOperandAsJSON {
const InstructionOperand* op_;
const InstructionSequence* code_;
diff --git a/chromium/v8/src/compiler/js-call-reducer.cc b/chromium/v8/src/compiler/js-call-reducer.cc
index 947f54c4109..a2f9aaeb6ff 100644
--- a/chromium/v8/src/compiler/js-call-reducer.cc
+++ b/chromium/v8/src/compiler/js-call-reducer.cc
@@ -65,7 +65,6 @@ class JSCallReducerAssembler : public JSGraphAssembler {
outermost_catch_scope_.set_has_handler(has_handler);
outermost_catch_scope_.set_gasm(this);
}
- virtual ~JSCallReducerAssembler() {}
TNode<Object> ReduceMathUnary(const Operator* op);
TNode<Object> ReduceMathBinary(const Operator* op);
@@ -793,11 +792,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
int ConstructArity() const {
DCHECK_EQ(IrOpcode::kJSConstruct, node_ptr()->opcode());
ConstructParameters const& p = ConstructParametersOf(node_ptr()->op());
- static constexpr int kTarget = 1; // The first input.
- static constexpr int kNewTarget = 1; // The last input.
- static constexpr int kExtraArgs = kTarget + kNewTarget;
- DCHECK_GE(p.arity(), kExtraArgs);
- return static_cast<int>(p.arity() - kExtraArgs);
+ return p.arity_without_implicit_args();
}
TNode<Object> NewTargetInput() const {
@@ -846,7 +841,8 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
FeedbackSource no_feedback_source{};
MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(4, p.frequency(), no_feedback_source,
+ javascript()->Call(2 + kTargetAndReceiver, p.frequency(),
+ no_feedback_source,
ConvertReceiverMode::kNullOrUndefined),
executor, UndefinedConstant(), resolve, reject, ContextInput(),
frame_state, effect(), control()));
@@ -859,7 +855,8 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
FeedbackSource no_feedback_source{};
MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(3, p.frequency(), no_feedback_source,
+ javascript()->Call(1 + kTargetAndReceiver, p.frequency(),
+ no_feedback_source,
ConvertReceiverMode::kNullOrUndefined),
reject, UndefinedConstant(), exception, ContextInput(), frame_state,
effect(), control()));
@@ -1012,7 +1009,7 @@ TNode<Object> JSCallReducerAssembler::JSCall3(
CallParameters const& p = CallParametersOf(node_ptr()->op());
return MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(5, p.frequency(), p.feedback(),
+ javascript()->Call(3 + kTargetAndReceiver, p.frequency(), p.feedback(),
ConvertReceiverMode::kAny, p.speculation_mode(),
CallFeedbackRelation::kUnrelated),
function, this_arg, arg0, arg1, arg2, ContextInput(), frame_state,
@@ -1027,7 +1024,7 @@ TNode<Object> JSCallReducerAssembler::JSCall4(
CallParameters const& p = CallParametersOf(node_ptr()->op());
return MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(6, p.frequency(), p.feedback(),
+ javascript()->Call(4 + kTargetAndReceiver, p.frequency(), p.feedback(),
ConvertReceiverMode::kAny, p.speculation_mode(),
CallFeedbackRelation::kUnrelated),
function, this_arg, arg0, arg1, arg2, arg3, ContextInput(), frame_state,
@@ -2340,8 +2337,7 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
// Turn the {node} into a {JSCreateArray} call.
- DCHECK_LE(2u, p.arity());
- size_t const arity = p.arity() - 2;
+ size_t const arity = p.arity_without_implicit_args();
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
NodeProperties::ChangeOp(
@@ -2355,9 +2351,9 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
// Replace the {node} with a proper {ToBoolean} operator.
- DCHECK_LE(2u, p.arity());
- Node* value = (p.arity() == 2) ? jsgraph()->UndefinedConstant()
- : NodeProperties::GetValueInput(node, 2);
+ Node* value = (p.arity_without_implicit_args() == 0)
+ ? jsgraph()->UndefinedConstant()
+ : NodeProperties::GetValueInput(node, 2);
value = graph()->NewNode(simplified()->ToBoolean(), value);
ReplaceWithValue(node, value);
return Replace(value);
@@ -2367,9 +2363,8 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- if (p.arity() < 3) return NoChange();
- Node* value = (p.arity() >= 3) ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
+ if (p.arity_without_implicit_args() < 1) return NoChange();
+ Node* value = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
// We can fold away the Object(x) call if |x| is definitely not a primitive.
@@ -2394,15 +2389,14 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
+ size_t arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
- if (arity == 2) {
+ if (arity == 0) {
// Neither thisArg nor argArray was provided.
convert_mode = ConvertReceiverMode::kNullOrUndefined;
node->ReplaceInput(0, node->InputAt(1));
node->ReplaceInput(1, jsgraph()->UndefinedConstant());
- } else if (arity == 3) {
+ } else if (arity == 1) {
// The argArray was not provided, just remove the {target}.
node->RemoveInput(0);
--arity;
@@ -2423,7 +2417,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
node->ReplaceInput(0, target);
node->ReplaceInput(1, this_argument);
node->ReplaceInput(2, arguments_list);
- while (arity-- > 3) node->RemoveInput(3);
+ while (arity-- > 1) node->RemoveInput(3);
// Morph the {node} to a {JSCallWithArrayLike}.
NodeProperties::ChangeOp(
@@ -2465,9 +2459,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
Node* effect1 = effect;
Node* control1 =
graph()->NewNode(common()->Merge(2), if_null, if_undefined);
- Node* value1 = effect1 = control1 =
- graph()->NewNode(javascript()->Call(2), target, this_argument,
- context, frame_state, effect1, control1);
+ Node* value1 = effect1 = control1 = graph()->NewNode(
+ javascript()->Call(0 + kTargetAndReceiver), target, this_argument,
+ context, frame_state, effect1, control1);
// Rewire potential exception edges.
Node* if_exception = nullptr;
@@ -2504,8 +2498,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
// Change {node} to the new {JSCall} operator.
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(), convert_mode,
- p.speculation_mode(),
+ node, javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -2625,17 +2619,19 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
control, p.feedback());
// Replace the {node} with a JSCreateBoundFunction.
- int const arity = std::max(0, node->op()->ValueInputCount() - 3);
- int const input_count = 2 + arity + 3;
+ static constexpr int kContextEffectAndControl = 3;
+ int const arity =
+ std::max(0, node->op()->ValueInputCount() - kContextEffectAndControl);
+ int const input_count = kTargetAndReceiver + arity + kContextEffectAndControl;
Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
inputs[0] = receiver;
inputs[1] = bound_this;
for (int i = 0; i < arity; ++i) {
- inputs[2 + i] = NodeProperties::GetValueInput(node, 3 + i);
+ inputs[kTargetAndReceiver + i] = NodeProperties::GetValueInput(node, 3 + i);
}
- inputs[2 + arity + 0] = context;
- inputs[2 + arity + 1] = effect;
- inputs[2 + arity + 2] = control;
+ inputs[kTargetAndReceiver + arity + 0] = context;
+ inputs[kTargetAndReceiver + arity + 1] = effect;
+ inputs[kTargetAndReceiver + arity + 2] = control;
Node* value = effect =
graph()->NewNode(javascript()->CreateBoundFunction(arity, map.object()),
input_count, inputs);
@@ -2675,10 +2671,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
// Remove the target from {node} and use the receiver as target instead, and
// the thisArg becomes the new target. If thisArg was not provided, insert
// undefined instead.
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
+ size_t arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode;
- if (arity == 2) {
+ if (arity == 0) {
// The thisArg was not provided, use undefined as receiver.
convert_mode = ConvertReceiverMode::kNullOrUndefined;
node->ReplaceInput(0, node->InputAt(1));
@@ -2690,8 +2685,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
--arity;
}
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(), convert_mode,
- p.speculation_mode(),
+ node, javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -2780,7 +2775,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototypeOf(Node* node) {
Reduction JSCallReducer::ReduceObjectIs(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& params = CallParametersOf(node->op());
- int const argc = static_cast<int>(params.arity() - 2);
+ int const argc = params.arity_without_implicit_args();
Node* lhs = (argc >= 1) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
Node* rhs = (argc >= 2) ? NodeProperties::GetValueInput(node, 3)
@@ -2801,7 +2796,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& params = CallParametersOf(node->op());
- int const argc = static_cast<int>(params.arity() - 2);
+ int const argc = params.arity_without_implicit_args();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* name = (argc >= 1) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
@@ -2911,8 +2906,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
Reduction JSCallReducer::ReduceReflectApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
- DCHECK_LE(0, arity);
+ int arity = p.arity_without_implicit_args();
// Massage value inputs appropriately.
node->RemoveInput(0);
node->RemoveInput(0);
@@ -2933,8 +2927,7 @@ Reduction JSCallReducer::ReduceReflectApply(Node* node) {
Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
- DCHECK_LE(0, arity);
+ int arity = p.arity_without_implicit_args();
// Massage value inputs appropriately.
node->RemoveInput(0);
node->RemoveInput(0);
@@ -2947,8 +2940,8 @@ Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
while (arity-- > 3) {
node->RemoveInput(arity);
}
- NodeProperties::ChangeOp(node,
- javascript()->ConstructWithArrayLike(p.frequency()));
+ NodeProperties::ChangeOp(
+ node, javascript()->ConstructWithArrayLike(p.frequency(), p.feedback()));
return Changed(node).FollowedBy(ReduceJSConstructWithArrayLike(node));
}
@@ -2988,7 +2981,7 @@ Reduction JSCallReducer::ReduceObjectCreate(Node* node) {
Reduction JSCallReducer::ReduceReflectGet(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
if (arity != 2) return NoChange();
Node* target = NodeProperties::GetValueInput(node, 2);
Node* key = NodeProperties::GetValueInput(node, 3);
@@ -3063,8 +3056,7 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
Reduction JSCallReducer::ReduceReflectHas(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
- DCHECK_LE(0, arity);
+ int arity = p.arity_without_implicit_args();
Node* target = (arity >= 1) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
Node* key = (arity >= 2) ? NodeProperties::GetValueInput(node, 3)
@@ -3403,7 +3395,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int const argc = static_cast<int>(p.arity()) - 2;
+ int const argc = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* global_proxy =
jsgraph()->Constant(native_context().global_proxy_object());
@@ -3491,10 +3483,14 @@ Reduction JSCallReducer::ReduceCallApiFunction(
function_template_info.LookupHolderOfExpectedType(receiver_map);
if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
- if (!(api_holder.holder.has_value() && holder_i.holder.has_value()))
- return inference.NoChange();
- if (!api_holder.holder->equals(*holder_i.holder))
- return inference.NoChange();
+ DCHECK(holder_i.lookup == CallOptimization::kHolderFound ||
+ holder_i.lookup == CallOptimization::kHolderIsReceiver);
+ if (holder_i.lookup == CallOptimization::kHolderFound) {
+ DCHECK(api_holder.holder.has_value() && holder_i.holder.has_value());
+ if (!api_holder.holder->equals(*holder_i.holder)) {
+ return inference.NoChange();
+ }
+ }
CHECK(receiver_map.IsJSReceiverMap());
CHECK(!receiver_map.is_access_check_needed() ||
@@ -3677,14 +3673,14 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
case IrOpcode::kJSCallWithSpread: {
// Ignore uses as spread input to calls with spread.
CallParameters p = CallParametersOf(user->op());
- int const arity = static_cast<int>(p.arity() - 1);
- if (user->InputAt(arity) == arguments_list) continue;
+ int const arity = p.arity_without_implicit_args();
+ if (user->InputAt(arity + 1) == arguments_list) continue;
break;
}
case IrOpcode::kJSConstructWithSpread: {
// Ignore uses as spread input to construct with spread.
ConstructParameters p = ConstructParametersOf(user->op());
- int const arity = static_cast<int>(p.arity() - 2);
+ int const arity = p.arity_without_implicit_args();
if (user->InputAt(arity) == arguments_list) continue;
break;
}
@@ -3775,7 +3771,8 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
return Changed(node).FollowedBy(ReduceJSCall(node));
} else {
NodeProperties::ChangeOp(
- node, javascript()->Construct(arity + 2, frequency, feedback));
+ node, javascript()->Construct(arity + kTargetAndNewTarget, frequency,
+ feedback));
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* context = NodeProperties::GetContextInput(node);
@@ -3875,8 +3872,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
+ size_t arity = p.arity_without_implicit_args();
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
@@ -3923,9 +3919,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
}
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(),
- convert_mode, p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node,
+ javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -3976,9 +3973,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
? ConvertReceiverMode::kAny
: ConvertReceiverMode::kNotNullOrUndefined;
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(),
- convert_mode, p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node,
+ javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -4416,30 +4414,29 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
const CallParameters& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity());
- DCHECK_EQ(arity, 2);
+ int arity = p.arity_without_implicit_args();
+ DCHECK_EQ(arity, 0);
return ReduceCallOrConstructWithArrayLikeOrSpread(
- node, arity, p.frequency(), p.feedback(), p.speculation_mode(),
- p.feedback_relation());
+ node, arity + kTargetAndReceiver, p.frequency(), p.feedback(),
+ p.speculation_mode(), p.feedback_relation());
}
Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- DCHECK_LE(3u, p.arity());
- int arity = static_cast<int>(p.arity() - 1);
+ int arity = p.arity_without_implicit_args();
+ DCHECK_GE(p.arity(), 1);
CallFrequency frequency = p.frequency();
FeedbackSource feedback = p.feedback();
return ReduceCallOrConstructWithArrayLikeOrSpread(
- node, arity, frequency, feedback, p.speculation_mode(),
- p.feedback_relation());
+ node, arity + kTargetAndReceiver - 1, frequency, feedback,
+ p.speculation_mode(), p.feedback_relation());
}
Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- DCHECK_LE(2u, p.arity());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -4615,8 +4612,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Update the JSConstruct operator on {node}.
NodeProperties::ChangeOp(
- node,
- javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
+ node, javascript()->Construct(arity + kTargetAndNewTarget,
+ p.frequency(), FeedbackSource()));
// Try to further reduce the JSConstruct {node}.
return Changed(node).FollowedBy(ReduceJSConstruct(node));
@@ -4655,8 +4652,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Update the JSConstruct operator on {node}.
NodeProperties::ChangeOp(
- node,
- javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
+ node, javascript()->Construct(arity + kTargetAndNewTarget,
+ p.frequency(), FeedbackSource()));
// Try to further reduce the JSConstruct {node}.
return Changed(node).FollowedBy(ReduceJSConstruct(node));
@@ -4835,17 +4832,19 @@ Reduction JSCallReducer::ReduceStringPrototypeSubstr(Node* node) {
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
- CallFrequency frequency = CallFrequencyOf(node->op());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
+ const int arity = p.arity_without_implicit_args();
+ DCHECK_EQ(arity, 1);
return ReduceCallOrConstructWithArrayLikeOrSpread(
- node, 1, frequency, FeedbackSource(),
+ node, arity, p.frequency(), p.feedback(),
SpeculationMode::kDisallowSpeculation, CallFeedbackRelation::kRelated);
}
Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- DCHECK_LE(3u, p.arity());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
+ DCHECK_LE(1u, arity);
CallFrequency frequency = p.frequency();
FeedbackSource feedback = p.feedback();
return ReduceCallOrConstructWithArrayLikeOrSpread(
@@ -6094,6 +6093,10 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
}
Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) {
+ // TODO(jgruber): We could reduce here when generating native context
+ // independent code, if LowerJSCreateStringIterator were implemented in
+ // generic lowering.
+ if (broker()->is_native_context_independent()) return NoChange();
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
@@ -6219,6 +6222,11 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) {
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ // TODO(jgruber): We could reduce here when generating native context
+ // independent code, if LowerJSCreatePromise were implemented in generic
+ // lowering.
+ if (broker()->is_native_context_independent()) return NoChange();
+
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
PromiseBuiltinReducerAssembler a(jsgraph(), temp_zone(), node, broker());
@@ -6261,7 +6269,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -6285,10 +6293,10 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
node->InsertInput(graph()->zone(), 2, jsgraph()->UndefinedConstant());
}
NodeProperties::ChangeOp(
- node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
- ConvertReceiverMode::kNotNullOrUndefined,
- p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node, javascript()->Call(
+ arity + kTargetAndReceiver, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
return Changed(node).FollowedBy(ReducePromisePrototypeThen(node));
}
@@ -6309,7 +6317,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* on_finally = arity >= 1 ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
@@ -6414,10 +6422,10 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
node->ReplaceInput(2, then_finally);
node->ReplaceInput(3, catch_finally);
NodeProperties::ChangeOp(
- node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
- ConvertReceiverMode::kNotNullOrUndefined,
- p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node, javascript()->Call(
+ arity + kTargetAndReceiver, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
return Changed(node).FollowedBy(ReducePromisePrototypeThen(node));
}
@@ -6525,7 +6533,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
Node* node, const SharedFunctionInfoRef& shared) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* arg1 = (arity >= 1) ? NodeProperties::GetValueInput(node, 1)
: jsgraph()->UndefinedConstant();
diff --git a/chromium/v8/src/compiler/js-create-lowering.cc b/chromium/v8/src/compiler/js-create-lowering.cc
index d0059030d50..9674a436adb 100644
--- a/chromium/v8/src/compiler/js-create-lowering.cc
+++ b/chromium/v8/src/compiler/js-create-lowering.cc
@@ -28,6 +28,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -1507,16 +1508,15 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), arguments, control);
- a.AllocateArray(mapped_count + 2,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
- context);
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
- arguments);
+ a.AllocateSloppyArgumentElements(
+ mapped_count,
+ MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
int idx = shared.context_header_size() + parameter_count - 1 - i;
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i + 2),
- jsgraph()->Constant(idx));
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsMappedEntry(),
+ jsgraph()->Constant(i), jsgraph()->Constant(idx));
}
return a.Finish();
}
@@ -1553,12 +1553,11 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(mapped_count + 2,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
- context);
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
- arguments);
+ a.AllocateSloppyArgumentElements(
+ mapped_count,
+ MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
int idx = shared.context_header_size() + parameter_count - 1 - i;
Node* value = graph()->NewNode(
@@ -1566,8 +1565,8 @@ Node* JSCreateLowering::AllocateAliasedArguments(
graph()->NewNode(simplified()->NumberLessThan(), jsgraph()->Constant(i),
arguments_length),
jsgraph()->Constant(idx), jsgraph()->TheHoleConstant());
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i + 2),
- value);
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsMappedEntry(),
+ jsgraph()->Constant(i), value);
}
return a.Finish();
}
diff --git a/chromium/v8/src/compiler/js-generic-lowering.cc b/chromium/v8/src/compiler/js-generic-lowering.cc
index cedb5bc42d5..8dbb64fe662 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.cc
+++ b/chromium/v8/src/compiler/js-generic-lowering.cc
@@ -18,6 +18,7 @@
#include "src/objects/feedback-cell.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/scope-info.h"
+#include "src/objects/template-objects-inl.h"
namespace v8 {
namespace internal {
@@ -42,10 +43,10 @@ JSGenericLowering::~JSGenericLowering() = default;
Reduction JSGenericLowering::Reduce(Node* node) {
switch (node->opcode()) {
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- Lower##x(node); \
- break;
+#define DECLARE_CASE(x, ...) \
+ case IrOpcode::k##x: \
+ Lower##x(node); \
+ break;
JS_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
default:
@@ -55,34 +56,11 @@ Reduction JSGenericLowering::Reduce(Node* node) {
return Changed(node);
}
-#define REPLACE_STUB_CALL(Name) \
- void JSGenericLowering::LowerJS##Name(Node* node) { \
- CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
- Callable callable = Builtins::CallableFor(isolate(), Builtins::k##Name); \
- ReplaceWithStubCall(node, callable, flags); \
+#define REPLACE_STUB_CALL(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ ReplaceWithBuiltinCall(node, Builtins::k##Name); \
}
-REPLACE_STUB_CALL(Add)
-REPLACE_STUB_CALL(Subtract)
-REPLACE_STUB_CALL(Multiply)
-REPLACE_STUB_CALL(Divide)
-REPLACE_STUB_CALL(Modulus)
-REPLACE_STUB_CALL(Exponentiate)
-REPLACE_STUB_CALL(BitwiseAnd)
-REPLACE_STUB_CALL(BitwiseOr)
-REPLACE_STUB_CALL(BitwiseXor)
-REPLACE_STUB_CALL(ShiftLeft)
-REPLACE_STUB_CALL(ShiftRight)
-REPLACE_STUB_CALL(ShiftRightLogical)
-REPLACE_STUB_CALL(LessThan)
-REPLACE_STUB_CALL(LessThanOrEqual)
-REPLACE_STUB_CALL(GreaterThan)
-REPLACE_STUB_CALL(GreaterThanOrEqual)
-REPLACE_STUB_CALL(BitwiseNot)
-REPLACE_STUB_CALL(Decrement)
-REPLACE_STUB_CALL(Increment)
-REPLACE_STUB_CALL(Negate)
REPLACE_STUB_CALL(HasProperty)
-REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToNumberConvertBigInt)
@@ -101,16 +79,21 @@ REPLACE_STUB_CALL(RejectPromise)
REPLACE_STUB_CALL(ResolvePromise)
#undef REPLACE_STUB_CALL
-void JSGenericLowering::ReplaceWithStubCall(Node* node,
- Callable callable,
- CallDescriptor::Flags flags) {
- ReplaceWithStubCall(node, callable, flags, node->op()->properties());
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
+ Builtins::Name builtin) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = Builtins::CallableFor(isolate(), builtin);
+ ReplaceWithBuiltinCall(node, callable, flags);
}
-void JSGenericLowering::ReplaceWithStubCall(Node* node,
- Callable callable,
- CallDescriptor::Flags flags,
- Operator::Properties properties) {
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node, Callable callable,
+ CallDescriptor::Flags flags) {
+ ReplaceWithBuiltinCall(node, callable, flags, node->op()->properties());
+}
+
+void JSGenericLowering::ReplaceWithBuiltinCall(
+ Node* node, Callable callable, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(), flags,
@@ -120,7 +103,6 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node,
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
-
void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Runtime::FunctionId f,
int nargs_override) {
@@ -138,13 +120,114 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
+void JSGenericLowering::ReplaceUnaryOpWithBuiltinCall(
+ Node* node, Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback) {
+ DCHECK(JSOperator::IsUnaryWithFeedback(node->opcode()));
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Callable callable = Builtins::CallableFor(isolate(), builtin_with_feedback);
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().slot.ToInt());
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), descriptor, descriptor.GetStackParameterCount(), flags,
+ node->op()->properties());
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ STATIC_ASSERT(JSUnaryOpNode::ValueIndex() == 0);
+ STATIC_ASSERT(JSUnaryOpNode::FeedbackVectorIndex() == 1);
+ DCHECK_EQ(node->op()->ValueInputCount(), 2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, slot);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ node->RemoveInput(JSUnaryOpNode::FeedbackVectorIndex());
+ ReplaceWithBuiltinCall(node, builtin_without_feedback);
+ }
+}
+
+#define DEF_UNARY_LOWERING(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ ReplaceUnaryOpWithBuiltinCall(node, Builtins::k##Name, \
+ Builtins::k##Name##_WithFeedback); \
+ }
+DEF_UNARY_LOWERING(BitwiseNot)
+DEF_UNARY_LOWERING(Decrement)
+DEF_UNARY_LOWERING(Increment)
+DEF_UNARY_LOWERING(Negate)
+#undef DEF_UNARY_LOWERING
+
+void JSGenericLowering::ReplaceBinaryOpWithBuiltinCall(
+ Node* node, Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(node->opcode()));
+ Builtins::Name builtin_id;
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().slot.ToInt());
+ STATIC_ASSERT(JSBinaryOpNode::LeftIndex() == 0);
+ STATIC_ASSERT(JSBinaryOpNode::RightIndex() == 1);
+ STATIC_ASSERT(JSBinaryOpNode::FeedbackVectorIndex() == 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 3);
+ node->InsertInput(zone(), 2, slot);
+ builtin_id = builtin_with_feedback;
+ } else {
+ node->RemoveInput(JSBinaryOpNode::FeedbackVectorIndex());
+ builtin_id = builtin_without_feedback;
+ }
+
+ ReplaceWithBuiltinCall(node, builtin_id);
+}
+
+#define DEF_BINARY_LOWERING(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ ReplaceBinaryOpWithBuiltinCall(node, Builtins::k##Name, \
+ Builtins::k##Name##_WithFeedback); \
+ }
+// Binary ops.
+DEF_BINARY_LOWERING(Add)
+DEF_BINARY_LOWERING(BitwiseAnd)
+DEF_BINARY_LOWERING(BitwiseOr)
+DEF_BINARY_LOWERING(BitwiseXor)
+DEF_BINARY_LOWERING(Divide)
+DEF_BINARY_LOWERING(Exponentiate)
+DEF_BINARY_LOWERING(Modulus)
+DEF_BINARY_LOWERING(Multiply)
+DEF_BINARY_LOWERING(ShiftLeft)
+DEF_BINARY_LOWERING(ShiftRight)
+DEF_BINARY_LOWERING(ShiftRightLogical)
+DEF_BINARY_LOWERING(Subtract)
+// Compare ops.
+DEF_BINARY_LOWERING(Equal)
+DEF_BINARY_LOWERING(GreaterThan)
+DEF_BINARY_LOWERING(GreaterThanOrEqual)
+DEF_BINARY_LOWERING(LessThan)
+DEF_BINARY_LOWERING(LessThanOrEqual)
+#undef DEF_BINARY_LOWERING
+
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
// The === operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kStrictEqual);
- node->RemoveInput(4); // control
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
- Operator::kEliminatable);
+ node->RemoveInput(NodeProperties::FirstControlIndex(node));
+
+ Builtins::Name builtin_id;
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().slot.ToInt());
+ STATIC_ASSERT(JSStrictEqualNode::LeftIndex() == 0);
+ STATIC_ASSERT(JSStrictEqualNode::RightIndex() == 1);
+ STATIC_ASSERT(JSStrictEqualNode::FeedbackVectorIndex() == 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 3);
+ node->InsertInput(zone(), 2, slot);
+ builtin_id = Builtins::kStrictEqual_WithFeedback;
+ } else {
+ node->RemoveInput(JSStrictEqualNode::FeedbackVectorIndex());
+ builtin_id = Builtins::kStrictEqual;
+ }
+
+ Callable callable = Builtins::CallableFor(isolate(), builtin_id);
+ ReplaceWithBuiltinCall(node, callable, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
}
namespace {
@@ -164,57 +247,49 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
} // namespace
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kKeyedLoadICTrampoline_Megamorphic
- : Builtins::kKeyedLoadICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kKeyedLoadICTrampoline_Megamorphic
+ : Builtins::kKeyedLoadICTrampoline);
} else {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kKeyedLoadIC_Megamorphic
- : Builtins::kKeyedLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kKeyedLoadIC_Megamorphic
+ : Builtins::kKeyedLoadIC);
}
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
if (!p.feedback().IsValid()) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kGetProperty);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kGetProperty);
return;
}
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kLoadICTrampoline_Megamorphic
- : Builtins::kLoadICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kLoadICTrampoline_Megamorphic
+ : Builtins::kLoadICTrampoline);
} else {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kLoadIC_Megamorphic
- : Builtins::kLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kLoadIC_Megamorphic
+ : Builtins::kLoadIC);
}
}
@@ -228,50 +303,56 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
} else {
Callable callable =
CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 2, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
}
}
void JSGenericLowering::LowerJSGetIterator(Node* node) {
// TODO(v8:9625): Currently, the GetIterator operator is desugared in the
// native context specialization phase. Thus, the following generic lowering
- // would never be reachable. We can add a check in native context
- // specialization to avoid desugaring the GetIterator operator when in the
- // case of megamorphic feedback and here, add a call to the
- // 'GetIteratorWithFeedback' builtin. This would reduce the size of the
- // compiled code as it would insert 1 call to the builtin instead of 2 calls
- // resulting from the generic lowering of the LoadNamed and Call operators.
- UNREACHABLE();
+ // is not reachable unless that phase is disabled (e.g. for
+ // native-context-independent code).
+ // We can add a check in native context specialization to avoid desugaring
+ // the GetIterator operator when feedback is megamorphic. This would reduce
+ // the size of the compiled code as it would insert 1 call to the builtin
+ // instead of 2 calls resulting from the generic lowering of the LoadNamed
+ // and Call operators.
+
+ GetIteratorParameters const& p = GetIteratorParametersOf(node->op());
+ Node* load_slot =
+ jsgraph()->TaggedIndexConstant(p.loadFeedback().slot.ToInt());
+ Node* call_slot =
+ jsgraph()->TaggedIndexConstant(p.callFeedback().slot.ToInt());
+ Node* feedback = jsgraph()->HeapConstant(p.callFeedback().vector);
+ node->InsertInput(zone(), 1, load_slot);
+ node->InsertInput(zone(), 2, call_slot);
+ node->InsertInput(zone(), 3, feedback);
+
+ ReplaceWithBuiltinCall(node, Builtins::kGetIteratorWithFeedback);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kKeyedStoreICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kKeyedStoreICTrampoline);
} else {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 4, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kKeyedStoreIC);
}
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
@@ -283,14 +364,11 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreICTrampoline);
} else {
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 4, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreIC);
}
}
@@ -304,17 +382,16 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::StoreOwnIC(isolate());
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
} else {
Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 4, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
}
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
@@ -322,15 +399,11 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreGlobalICTrampoline);
} else {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreGlobalIC);
}
}
@@ -344,29 +417,20 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
}
void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreInArrayLiteralIC);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
RelaxControls(node);
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreInArrayLiteralIC);
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kDeleteProperty);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kDeleteProperty);
}
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kGetSuperConstructor);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kGetSuperConstructor);
}
void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
@@ -374,16 +438,12 @@ void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
}
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kInstanceOf);
- ReplaceWithStubCall(node, callable, flags);
+ // TODO(jgruber, v8:8888): Collect feedback.
+ ReplaceWithBuiltinCall(node, Builtins::kInstanceOf);
}
void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kOrdinaryHasInstance);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kOrdinaryHasInstance);
}
void JSGenericLowering::LowerJSHasContextExtension(Node* node) {
@@ -401,10 +461,7 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
void JSGenericLowering::LowerJSCreate(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kFastNewObject);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kFastNewObject);
}
@@ -465,23 +522,15 @@ void JSGenericLowering::LowerJSObjectIsArray(Node* node) {
}
void JSGenericLowering::LowerJSCreateObject(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(
- isolate(), Builtins::kCreateObjectWithoutProperties);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateObjectWithoutProperties);
}
void JSGenericLowering::LowerJSParseInt(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kParseInt);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kParseInt);
}
void JSGenericLowering::LowerJSRegExpTest(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kRegExpPrototypeTestFast);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kRegExpPrototypeTestFast);
}
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
@@ -493,10 +542,7 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
// Use the FastNewClosure builtin only for functions allocated in new space.
if (p.allocation() == AllocationType::kYoung) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kFastNewClosure);
} else {
ReplaceWithRuntimeCall(node, Runtime::kNewClosure_Tenured);
}
@@ -516,7 +562,7 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
CodeFactory::FastNewFunctionContext(isolate(), scope_type);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
} else {
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
@@ -524,15 +570,12 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
}
void JSGenericLowering::LowerJSCreateGeneratorObject(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateGeneratorObject);
node->RemoveInput(4); // control
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateGeneratorObject);
}
void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ ReplaceWithBuiltinCall(node, Builtins::kCreateIterResultObject);
}
void JSGenericLowering::LowerJSCreateStringIterator(Node* node) {
@@ -548,15 +591,11 @@ void JSGenericLowering::LowerJSCreatePromise(Node* node) {
}
void JSGenericLowering::LowerJSCreateTypedArray(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateTypedArray);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateTypedArray);
}
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
@@ -566,9 +605,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
// without properties up to the number of elements that the stubs can handle.
if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateShallowArrayLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateShallowArrayLiteral);
} else {
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
@@ -576,31 +613,36 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
}
void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
- UNREACHABLE(); // Eliminated in native context specialization.
+ GetTemplateObjectParameters const& p =
+ GetTemplateObjectParametersOf(node->op());
+ SharedFunctionInfoRef shared(broker(), p.shared());
+ TemplateObjectDescriptionRef description(broker(), p.description());
+
+ node->InsertInput(zone(), 0, jsgraph()->Constant(shared));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(description));
+ node->InsertInput(zone(), 2,
+ jsgraph()->UintPtrConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
+ node->RemoveInput(6); // control
+
+ ReplaceWithBuiltinCall(node, Builtins::kGetTemplateObject);
}
void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->RemoveInput(4); // control
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateEmptyArrayLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateEmptyArrayLiteral);
}
void JSGenericLowering::LowerJSCreateArrayFromIterable(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(
- isolate(), Builtins::kIterableToListWithSymbolLookup);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kIterableToListWithSymbolLookup);
}
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
@@ -612,9 +654,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateShallowObjectLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateShallowObjectLiteral);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
}
@@ -622,40 +662,38 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
void JSGenericLowering::LowerJSCloneObject(Node* node) {
CloneObjectParameters const& p = CloneObjectParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCloneObjectIC);
}
void JSGenericLowering::LowerJSCreateEmptyLiteralObject(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ ReplaceWithBuiltinCall(node, Builtins::kCreateEmptyLiteralObject);
}
void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateRegExpLiteral);
}
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
@@ -688,64 +726,178 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
void JSGenericLowering::LowerJSConstruct(Node* node) {
ConstructParameters const& p = ConstructParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
+ int const arg_count = p.arity_without_implicit_args();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::Construct(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count + 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* new_target = node->InputAt(arg_count + 1);
- Node* receiver = jsgraph()->UndefinedConstant();
- node->RemoveInput(arg_count + 1); // Drop new target.
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, new_target);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ // TODO(jgruber): Understand and document how stack_argument_count is
+ // calculated. I've made some educated guesses below but they should be
+ // verified and documented in other lowerings as well.
+ static constexpr int kReceiver = 1;
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count =
+ arg_count + kReceiver + kMaybeFeedbackVector;
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kConstruct_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), arg_count + 1, feedback_vector);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, slot);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count + kReceiver;
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kConstruct);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ ConstructParameters const& p = ConstructParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(zone(), callable.descriptor(), 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = jsgraph()->UndefinedConstant();
- Node* arguments_list = node->InputAt(1);
- Node* new_target = node->InputAt(2);
- node->InsertInput(zone(), 0, stub_code);
- node->ReplaceInput(2, new_target);
- node->ReplaceInput(3, arguments_list);
- node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ const int arg_count = p.arity_without_implicit_args();
+ DCHECK_EQ(arg_count, 1);
+
+ static constexpr int kReceiver = 1;
+ static constexpr int kArgumentList = 1;
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count =
+ arg_count - kArgumentList + kReceiver + kMaybeFeedbackVector;
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kConstructWithArrayLike_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arguments_list = node->InputAt(1);
+ Node* new_target = node->InputAt(2);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, new_target);
+ node->ReplaceInput(3, arguments_list);
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), 4, slot);
+ node->InsertInput(zone(), 5, receiver);
+ node->InsertInput(zone(), 6, feedback_vector);
+
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count - kArgumentList + kReceiver;
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arguments_list = node->InputAt(1);
+ Node* new_target = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, new_target);
+ node->ReplaceInput(3, arguments_list);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
ConstructParameters const& p = ConstructParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
+ int const arg_count = p.arity_without_implicit_args();
int const spread_index = arg_count;
int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::ConstructWithSpread(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
- Node* new_target = node->InputAt(new_target_index);
- Node* spread = node->InputAt(spread_index);
- Node* receiver = jsgraph()->UndefinedConstant();
- DCHECK(new_target_index > spread_index);
- node->RemoveInput(new_target_index); // Drop new target.
- node->RemoveInput(spread_index);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, new_target);
- node->InsertInput(zone(), 3, stack_arg_count);
- node->InsertInput(zone(), 4, spread);
- node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ static constexpr int kReceiver = 1;
+ static constexpr int kTheSpread = 1; // Included in `arg_count`.
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count =
+ arg_count + kReceiver + kMaybeFeedbackVector;
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kConstructWithSpread_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+
+ // The single available register is needed for `slot`, thus `spread` remains
+ // on the stack here.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* new_target = node->InputAt(new_target_index);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(new_target_index);
+
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), arg_count + 1, feedback_vector);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, slot);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count + kReceiver - kTheSpread;
+ Callable callable = CodeFactory::ConstructWithSpread(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* new_target = node->InputAt(new_target_index);
+ Node* spread = node->InputAt(spread_index);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ DCHECK(new_target_index > spread_index);
+ node->RemoveInput(new_target_index);
+ node->RemoveInput(spread_index);
+
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, spread);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
@@ -766,49 +918,126 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
void JSGenericLowering::LowerJSCall(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
+ int const arg_count = p.arity_without_implicit_args();
ConvertReceiverMode const mode = p.convert_mode();
- Callable callable = CodeFactory::Call(isolate(), mode);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count + 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Callable callable = CodeFactory::Call_WithFeedback(isolate(), mode);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, slot);
+ node->InsertInput(zone(), 4, feedback_vector);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ Callable callable = CodeFactory::Call(isolate(), mode);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
- Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ CallParameters const& p = CallParametersOf(node->op());
+ const int arg_count = p.arity_without_implicit_args();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(zone(), callable.descriptor(), 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = node->InputAt(1);
- Node* arguments_list = node->InputAt(2);
- node->InsertInput(zone(), 0, stub_code);
- node->ReplaceInput(3, receiver);
- node->ReplaceInput(2, arguments_list);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ DCHECK_EQ(arg_count, 0);
+ static constexpr int kReceiver = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kCallWithArrayLike_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + kReceiver, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = node->InputAt(1);
+ Node* arguments_list = node->InputAt(2);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, arguments_list);
+ node->ReplaceInput(3, receiver);
+ node->InsertInput(zone(), 3, slot);
+ node->InsertInput(zone(), 4, feedback_vector);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + kReceiver, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = node->InputAt(1);
+ Node* arguments_list = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, arguments_list);
+ node->ReplaceInput(3, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
- int const spread_index = static_cast<int>(p.arity() + 1);
+ int const arg_count = p.arity_without_implicit_args();
+ int const spread_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::CallWithSpread(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- // We pass the spread in a register, not on the stack.
- Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stack_arg_count);
- node->InsertInput(zone(), 3, node->InputAt(spread_index));
- node->RemoveInput(spread_index + 1);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ static constexpr int kTheSpread = 1;
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count = arg_count + kMaybeFeedbackVector;
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kCallWithSpread_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* spread = node->InputAt(spread_index);
+ node->RemoveInput(spread_index);
+
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), arg_count + 1, feedback_vector);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, spread);
+ node->InsertInput(zone(), 4, slot);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count;
+ Callable callable = CodeFactory::CallWithSpread(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* spread = node->InputAt(spread_index);
+ node->RemoveInput(spread_index);
+
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, spread);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
@@ -932,9 +1161,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
}
void JSGenericLowering::LowerJSDebugger(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::HandleDebuggerStatement(isolate());
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kHandleDebuggerStatement);
}
Zone* JSGenericLowering::zone() const { return graph()->zone(); }
diff --git a/chromium/v8/src/compiler/js-generic-lowering.h b/chromium/v8/src/compiler/js-generic-lowering.h
index 2a4ac808b1a..2addadffab1 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.h
+++ b/chromium/v8/src/compiler/js-generic-lowering.h
@@ -31,17 +31,27 @@ class JSGenericLowering final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
protected:
-#define DECLARE_LOWER(x) void Lower##x(Node* node);
+#define DECLARE_LOWER(x, ...) void Lower##x(Node* node);
// Dispatched depending on opcode.
JS_OP_LIST(DECLARE_LOWER)
#undef DECLARE_LOWER
// Helpers to replace existing nodes with a generic call.
- void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
- void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
- Operator::Properties properties);
+ void ReplaceWithBuiltinCall(Node* node, Builtins::Name builtin);
+ void ReplaceWithBuiltinCall(Node* node, Callable c,
+ CallDescriptor::Flags flags);
+ void ReplaceWithBuiltinCall(Node* node, Callable c,
+ CallDescriptor::Flags flags,
+ Operator::Properties properties);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
+ void ReplaceUnaryOpWithBuiltinCall(Node* node,
+ Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback);
+ void ReplaceBinaryOpWithBuiltinCall(Node* node,
+ Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback);
+
Zone* zone() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/chromium/v8/src/compiler/js-heap-broker.cc b/chromium/v8/src/compiler/js-heap-broker.cc
index 8ff520921f2..47bc291c8d3 100644
--- a/chromium/v8/src/compiler/js-heap-broker.cc
+++ b/chromium/v8/src/compiler/js-heap-broker.cc
@@ -2385,7 +2385,8 @@ base::Optional<ObjectRef> ContextRef::get(int index,
}
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
- bool tracing_enabled, bool is_concurrent_inlining)
+ bool tracing_enabled, bool is_concurrent_inlining,
+ bool is_native_context_independent)
: isolate_(isolate),
zone_(broker_zone),
refs_(new (zone())
@@ -2394,6 +2395,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
+ is_native_context_independent_(is_native_context_independent),
feedback_(zone()),
bytecode_analyses_(zone()),
property_access_infos_(zone()),
@@ -2407,9 +2409,11 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
TRACE(this, "Constructing heap broker");
}
-std::ostream& JSHeapBroker::Trace() const {
- return trace_out_ << "[" << this << "] "
- << std::string(trace_indentation_ * 2, ' ');
+std::string JSHeapBroker::Trace() const {
+ std::ostringstream oss;
+ oss << "[" << this << "] ";
+ for (unsigned i = 0; i < trace_indentation_ * 2; ++i) oss.put(' ');
+ return oss.str();
}
void JSHeapBroker::StopSerializing() {
diff --git a/chromium/v8/src/compiler/js-heap-broker.h b/chromium/v8/src/compiler/js-heap-broker.h
index 424da1df55b..b3e256d6864 100644
--- a/chromium/v8/src/compiler/js-heap-broker.h
+++ b/chromium/v8/src/compiler/js-heap-broker.h
@@ -33,20 +33,20 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
#define TRACE_BROKER(broker, x) \
do { \
if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \
- broker->Trace() << x << '\n'; \
+ StdoutStream{} << broker->Trace() << x << '\n'; \
} while (false)
#define TRACE_BROKER_MEMORY(broker, x) \
do { \
if (broker->tracing_enabled() && FLAG_trace_heap_broker_memory) \
- broker->Trace() << x << std::endl; \
+ StdoutStream{} << broker->Trace() << x << std::endl; \
} while (false)
-#define TRACE_BROKER_MISSING(broker, x) \
- do { \
- if (broker->tracing_enabled()) \
- broker->Trace() << "Missing " << x << " (" << __FILE__ << ":" \
- << __LINE__ << ")" << std::endl; \
+#define TRACE_BROKER_MISSING(broker, x) \
+ do { \
+ if (broker->tracing_enabled()) \
+ StdoutStream{} << broker->Trace() << "Missing " << x << " (" << __FILE__ \
+ << ":" << __LINE__ << ")" << std::endl; \
} while (false)
struct PropertyAccessTarget {
@@ -74,7 +74,13 @@ struct PropertyAccessTarget {
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
- bool is_concurrent_inlining);
+ bool is_concurrent_inlining, bool is_native_context_independent);
+
+ // For use only in tests, sets default values for some arguments. Avoids
+ // churn when new flags are added.
+ JSHeapBroker(Isolate* isolate, Zone* broker_zone)
+ : JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker, false,
+ false) {}
// The compilation target's native context. We need the setter because at
// broker construction time we don't yet have the canonical handle.
@@ -89,6 +95,9 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
+ bool is_native_context_independent() const {
+ return is_native_context_independent_;
+ }
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; }
@@ -193,7 +202,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const;
- std::ostream& Trace() const;
+ std::string Trace() const;
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -242,7 +251,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- mutable StdoutStream trace_out_;
+ bool const is_native_context_independent_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
diff --git a/chromium/v8/src/compiler/js-heap-copy-reducer.cc b/chromium/v8/src/compiler/js-heap-copy-reducer.cc
index 820928ec8ca..689732eea15 100644
--- a/chromium/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/chromium/v8/src/compiler/js-heap-copy-reducer.cc
@@ -85,6 +85,50 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
}
break;
}
+ /* Unary ops. */
+ case IrOpcode::kJSBitwiseNot:
+ case IrOpcode::kJSDecrement:
+ case IrOpcode::kJSIncrement:
+ case IrOpcode::kJSNegate: {
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ // Unary ops are treated as binary ops with respect to feedback.
+ broker()->ProcessFeedbackForBinaryOperation(p.feedback());
+ }
+ break;
+ }
+ /* Binary ops. */
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSSubtract:
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus:
+ case IrOpcode::kJSExponentiate:
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSBitwiseAnd:
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical: {
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForBinaryOperation(p.feedback());
+ }
+ break;
+ }
+ /* Compare ops. */
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSStrictEqual: {
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForCompareOperation(p.feedback());
+ }
+ break;
+ }
case IrOpcode::kJSCreateFunctionContext: {
CreateFunctionContextParameters const& p =
CreateFunctionContextParametersOf(node->op());
diff --git a/chromium/v8/src/compiler/js-inlining.cc b/chromium/v8/src/compiler/js-inlining.cc
index 16a6fb2f0f5..64ed0ed0893 100644
--- a/chromium/v8/src/compiler/js-inlining.cc
+++ b/chromium/v8/src/compiler/js-inlining.cc
@@ -429,8 +429,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// always hold true.
CHECK(shared_info->is_compiled());
- if (!broker()->is_concurrent_inlining() &&
- info_->is_source_positions_enabled()) {
+ if (!broker()->is_concurrent_inlining() && info_->source_positions()) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
shared_info->object());
}
@@ -462,10 +461,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Graph::SubgraphScope scope(graph());
BytecodeGraphBuilderFlags flags(
BytecodeGraphBuilderFlag::kSkipFirstStackCheck);
- if (info_->is_analyze_environment_liveness()) {
+ if (info_->analyze_environment_liveness()) {
flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
}
- if (info_->is_bailout_on_uninitialized()) {
+ if (info_->bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
{
diff --git a/chromium/v8/src/compiler/js-native-context-specialization.cc b/chromium/v8/src/compiler/js-native-context-specialization.cc
index 3283ebd0efc..73b10435e27 100644
--- a/chromium/v8/src/compiler/js-native-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-native-context-specialization.cc
@@ -488,7 +488,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
node->ReplaceInput(4, continuation_frame_state);
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
- node, javascript()->Call(3, CallFrequency(), FeedbackSource(),
+ node, javascript()->Call(1 + kTargetAndReceiver, CallFrequency(),
+ FeedbackSource(),
ConvertReceiverMode::kNotNullOrUndefined));
// Rewire the value uses of {node} to ToBoolean conversion of the result.
@@ -1428,10 +1429,10 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
SpeculationMode mode = feedback.IsInsufficient()
? SpeculationMode::kDisallowSpeculation
: feedback.AsCall().speculation_mode();
- const Operator* call_op =
- javascript()->Call(2, CallFrequency(), p.callFeedback(),
- ConvertReceiverMode::kNotNullOrUndefined, mode,
- CallFeedbackRelation::kRelated);
+ const Operator* call_op = javascript()->Call(
+ 0 + kTargetAndReceiver, CallFrequency(), p.callFeedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, mode,
+ CallFeedbackRelation::kRelated);
Node* call_property = graph()->NewNode(call_op, load_property, receiver,
context, frame_state, effect, control);
@@ -2048,7 +2049,8 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* value;
if (constant.IsJSFunction()) {
value = *effect = *control = graph()->NewNode(
- jsgraph()->javascript()->Call(2, CallFrequency(), FeedbackSource(),
+ jsgraph()->javascript()->Call(0 + kTargetAndReceiver, CallFrequency(),
+ FeedbackSource(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state, *effect, *control);
} else {
@@ -2085,7 +2087,8 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
// Introduce the call to the setter function.
if (constant.IsJSFunction()) {
*effect = *control = graph()->NewNode(
- jsgraph()->javascript()->Call(3, CallFrequency(), FeedbackSource(),
+ jsgraph()->javascript()->Call(1 + kTargetAndReceiver, CallFrequency(),
+ FeedbackSource(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state, *effect, *control);
} else {
diff --git a/chromium/v8/src/compiler/js-operator.cc b/chromium/v8/src/compiler/js-operator.cc
index 45e144094b9..b152569ae1f 100644
--- a/chromium/v8/src/compiler/js-operator.cc
+++ b/chromium/v8/src/compiler/js-operator.cc
@@ -7,7 +7,6 @@
#include <limits>
#include "src/base/lazy-instance.h"
-#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
@@ -17,16 +16,22 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+// Returns properties for the given binary op.
+constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) {
+ CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(opcode));
+ return opcode == IrOpcode::kJSStrictEqual ? Operator::kPure
+ : Operator::kNoProperties;
+}
+
+} // namespace
+
std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
if (f.IsUnknown()) return os << "unknown";
return os << f.value();
}
-CallFrequency CallFrequencyOf(Operator const* op) {
- DCHECK_EQ(op->opcode(), IrOpcode::kJSConstructWithArrayLike);
- return OpParameter<CallFrequency>(op);
-}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
return os << p.arity() << ", " << p.start_index();
@@ -60,6 +65,7 @@ std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) {
ConstructParameters const& ConstructParametersOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
+ op->opcode() == IrOpcode::kJSConstructWithArrayLike ||
op->opcode() == IrOpcode::kJSConstructWithSpread);
return OpParameter<ConstructParameters>(op);
}
@@ -230,7 +236,9 @@ std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
}
FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
+ DCHECK(JSOperator::IsUnaryWithFeedback(op->opcode()) ||
+ JSOperator::IsBinaryWithFeedback(op->opcode()) ||
+ op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
op->opcode() == IrOpcode::kJSInstanceOf ||
op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
op->opcode() == IrOpcode::kJSStoreInArrayLiteral);
@@ -636,37 +644,7 @@ ForInMode ForInModeOf(Operator const* op) {
return OpParameter<ForInMode>(op);
}
-BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
- return OpParameter<BinaryOperationHint>(op);
-}
-
-CompareOperationHint CompareOperationHintOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSEqual ||
- op->opcode() == IrOpcode::kJSStrictEqual ||
- op->opcode() == IrOpcode::kJSLessThan ||
- op->opcode() == IrOpcode::kJSGreaterThan ||
- op->opcode() == IrOpcode::kJSLessThanOrEqual ||
- op->opcode() == IrOpcode::kJSGreaterThanOrEqual);
- return OpParameter<CompareOperationHint>(op);
-}
-
#define CACHED_OP_LIST(V) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(Exponentiate, Operator::kNoProperties, 2, 1) \
- V(BitwiseNot, Operator::kNoProperties, 1, 1) \
- V(Decrement, Operator::kNoProperties, 1, 1) \
- V(Increment, Operator::kNoProperties, 1, 1) \
- V(Negate, Operator::kNoProperties, 1, 1) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
@@ -703,16 +681,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ParseInt, Operator::kNoProperties, 2, 1) \
V(RegExpTest, Operator::kNoProperties, 2, 1)
-#define BINARY_OP_LIST(V) V(Add)
-
-#define COMPARE_OP_LIST(V) \
- V(Equal, Operator::kNoProperties) \
- V(StrictEqual, Operator::kPure) \
- V(LessThan, Operator::kNoProperties) \
- V(GreaterThan, Operator::kNoProperties) \
- V(LessThanOrEqual, Operator::kNoProperties) \
- V(GreaterThanOrEqual, Operator::kNoProperties)
-
struct JSOperatorGlobalCache final {
#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
struct Name##Operator final : public Operator { \
@@ -726,55 +694,6 @@ struct JSOperatorGlobalCache final {
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED_OP)
#undef CACHED_OP
-
-#define BINARY_OP(Name) \
- template <BinaryOperationHint kHint> \
- struct Name##Operator final : public Operator1<BinaryOperationHint> { \
- Name##Operator() \
- : Operator1<BinaryOperationHint>(IrOpcode::kJS##Name, \
- Operator::kNoProperties, "JS" #Name, \
- 2, 1, 1, 1, 1, 2, kHint) {} \
- }; \
- Name##Operator<BinaryOperationHint::kNone> k##Name##NoneOperator; \
- Name##Operator<BinaryOperationHint::kSignedSmall> \
- k##Name##SignedSmallOperator; \
- Name##Operator<BinaryOperationHint::kSignedSmallInputs> \
- k##Name##SignedSmallInputsOperator; \
- Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator; \
- Name##Operator<BinaryOperationHint::kNumber> k##Name##NumberOperator; \
- Name##Operator<BinaryOperationHint::kNumberOrOddball> \
- k##Name##NumberOrOddballOperator; \
- Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
- Name##Operator<BinaryOperationHint::kBigInt> k##Name##BigIntOperator; \
- Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
- BINARY_OP_LIST(BINARY_OP)
-#undef BINARY_OP
-
-#define COMPARE_OP(Name, properties) \
- template <CompareOperationHint kHint> \
- struct Name##Operator final : public Operator1<CompareOperationHint> { \
- Name##Operator() \
- : Operator1<CompareOperationHint>( \
- IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
- Operator::ZeroIfNoThrow(properties), kHint) {} \
- }; \
- Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator; \
- Name##Operator<CompareOperationHint::kSignedSmall> \
- k##Name##SignedSmallOperator; \
- Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
- Name##Operator<CompareOperationHint::kNumberOrOddball> \
- k##Name##NumberOrOddballOperator; \
- Name##Operator<CompareOperationHint::kInternalizedString> \
- k##Name##InternalizedStringOperator; \
- Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
- Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
- Name##Operator<CompareOperationHint::kBigInt> k##Name##BigIntOperator; \
- Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
- Name##Operator<CompareOperationHint::kReceiverOrNullOrUndefined> \
- k##Name##ReceiverOrNullOrUndefinedOperator; \
- Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
- COMPARE_OP_LIST(COMPARE_OP)
-#undef COMPARE_OP
};
namespace {
@@ -791,65 +710,26 @@ JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
CACHED_OP_LIST(CACHED_OP)
#undef CACHED_OP
-#define BINARY_OP(Name) \
- const Operator* JSOperatorBuilder::Name(BinaryOperationHint hint) { \
- switch (hint) { \
- case BinaryOperationHint::kNone: \
- return &cache_.k##Name##NoneOperator; \
- case BinaryOperationHint::kSignedSmall: \
- return &cache_.k##Name##SignedSmallOperator; \
- case BinaryOperationHint::kSignedSmallInputs: \
- return &cache_.k##Name##SignedSmallInputsOperator; \
- case BinaryOperationHint::kSigned32: \
- return &cache_.k##Name##Signed32Operator; \
- case BinaryOperationHint::kNumber: \
- return &cache_.k##Name##NumberOperator; \
- case BinaryOperationHint::kNumberOrOddball: \
- return &cache_.k##Name##NumberOrOddballOperator; \
- case BinaryOperationHint::kString: \
- return &cache_.k##Name##StringOperator; \
- case BinaryOperationHint::kBigInt: \
- return &cache_.k##Name##BigIntOperator; \
- case BinaryOperationHint::kAny: \
- return &cache_.k##Name##AnyOperator; \
- } \
- UNREACHABLE(); \
- return nullptr; \
+#define UNARY_OP(JSName, Name) \
+ const Operator* JSOperatorBuilder::Name(FeedbackSource const& feedback) { \
+ FeedbackParameter parameters(feedback); \
+ return new (zone()) Operator1<FeedbackParameter>( \
+ IrOpcode::k##JSName, Operator::kNoProperties, #JSName, 2, 1, 1, 1, 1, \
+ 2, parameters); \
}
-BINARY_OP_LIST(BINARY_OP)
-#undef BINARY_OP
-
-#define COMPARE_OP(Name, ...) \
- const Operator* JSOperatorBuilder::Name(CompareOperationHint hint) { \
- switch (hint) { \
- case CompareOperationHint::kNone: \
- return &cache_.k##Name##NoneOperator; \
- case CompareOperationHint::kSignedSmall: \
- return &cache_.k##Name##SignedSmallOperator; \
- case CompareOperationHint::kNumber: \
- return &cache_.k##Name##NumberOperator; \
- case CompareOperationHint::kNumberOrOddball: \
- return &cache_.k##Name##NumberOrOddballOperator; \
- case CompareOperationHint::kInternalizedString: \
- return &cache_.k##Name##InternalizedStringOperator; \
- case CompareOperationHint::kString: \
- return &cache_.k##Name##StringOperator; \
- case CompareOperationHint::kSymbol: \
- return &cache_.k##Name##SymbolOperator; \
- case CompareOperationHint::kBigInt: \
- return &cache_.k##Name##BigIntOperator; \
- case CompareOperationHint::kReceiver: \
- return &cache_.k##Name##ReceiverOperator; \
- case CompareOperationHint::kReceiverOrNullOrUndefined: \
- return &cache_.k##Name##ReceiverOrNullOrUndefinedOperator; \
- case CompareOperationHint::kAny: \
- return &cache_.k##Name##AnyOperator; \
- } \
- UNREACHABLE(); \
- return nullptr; \
+JS_UNOP_WITH_FEEDBACK(UNARY_OP)
+#undef UNARY_OP
+
+#define BINARY_OP(JSName, Name) \
+ const Operator* JSOperatorBuilder::Name(FeedbackSource const& feedback) { \
+ static constexpr auto kProperties = BinopProperties(IrOpcode::k##JSName); \
+ FeedbackParameter parameters(feedback); \
+ return new (zone()) Operator1<FeedbackParameter>( \
+ IrOpcode::k##JSName, kProperties, #JSName, 3, 1, 1, 1, 1, \
+ Operator::ZeroIfNoThrow(kProperties), parameters); \
}
-COMPARE_OP_LIST(COMPARE_OP)
-#undef COMPARE_OP
+JS_BINOP_WITH_FEEDBACK(BINARY_OP)
+#undef BINARY_OP
const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
const FeedbackSource& feedback) {
@@ -972,13 +852,15 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
}
const Operator* JSOperatorBuilder::ConstructWithArrayLike(
- CallFrequency const& frequency) {
- return new (zone()) Operator1<CallFrequency>( // --
- IrOpcode::kJSConstructWithArrayLike, // opcode
- Operator::kNoProperties, // properties
- "JSConstructWithArrayLike", // name
- 3, 1, 1, 1, 1, 2, // counts
- frequency); // parameter
+ CallFrequency const& frequency, FeedbackSource const& feedback) {
+ static constexpr uint32_t arity = 3;
+ ConstructParameters parameters(arity, frequency, feedback);
+ return new (zone()) Operator1<ConstructParameters>( // --
+ IrOpcode::kJSConstructWithArrayLike, // opcode
+ Operator::kNoProperties, // properties
+ "JSConstructWithArrayLike", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::ConstructWithSpread(
@@ -1359,7 +1241,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
IrOpcode::kJSCreateEmptyLiteralObject, // opcode
Operator::kNoProperties, // properties
"JSCreateEmptyLiteralObject", // name
- 1, 1, 1, 1, 1, 2); // counts
+ 0, 1, 1, 1, 1, 2); // counts
}
const Operator* JSOperatorBuilder::CreateLiteralRegExp(
@@ -1420,9 +1302,7 @@ Handle<ScopeInfo> ScopeInfoOf(const Operator* op) {
return OpParameter<Handle<ScopeInfo>>(op);
}
-#undef BINARY_OP_LIST
#undef CACHED_OP_LIST
-#undef COMPARE_OP_LIST
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/js-operator.h b/chromium/v8/src/compiler/js-operator.h
index 1f9230d22b6..ad9365b4b59 100644
--- a/chromium/v8/src/compiler/js-operator.h
+++ b/chromium/v8/src/compiler/js-operator.h
@@ -8,6 +8,8 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/globals.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
#include "src/handles/maybe-handles.h"
#include "src/objects/type-hints.h"
#include "src/runtime/runtime.h"
@@ -27,6 +29,77 @@ namespace compiler {
class Operator;
struct JSOperatorGlobalCache;
+// Macro lists.
+#define JS_UNOP_WITH_FEEDBACK(V) \
+ JS_BITWISE_UNOP_LIST(V) \
+ JS_ARITH_UNOP_LIST(V)
+
+#define JS_BINOP_WITH_FEEDBACK(V) \
+ JS_ARITH_BINOP_LIST(V) \
+ JS_BITWISE_BINOP_LIST(V) \
+ JS_COMPARE_BINOP_LIST(V)
+
+// Predicates.
+class JSOperator final : public AllStatic {
+ public:
+ static constexpr bool IsUnaryWithFeedback(Operator::Opcode opcode) {
+#define CASE(Name, ...) \
+ case IrOpcode::k##Name: \
+ return true;
+ switch (opcode) {
+ JS_UNOP_WITH_FEEDBACK(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ return false;
+ }
+
+ static constexpr bool IsBinaryWithFeedback(Operator::Opcode opcode) {
+#define CASE(Name, ...) \
+ case IrOpcode::k##Name: \
+ return true;
+ switch (opcode) {
+ JS_BINOP_WITH_FEEDBACK(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ return false;
+ }
+};
+
+// Node wrappers.
+
+class JSUnaryOpNode final : public NodeWrapper {
+ public:
+ explicit constexpr JSUnaryOpNode(Node* node) : NodeWrapper(node) {
+ CONSTEXPR_DCHECK(JSOperator::IsUnaryWithFeedback(node->opcode()));
+ }
+
+ static constexpr int ValueIndex() { return 0; }
+ static constexpr int FeedbackVectorIndex() { return 1; }
+};
+
+#define V(JSName, ...) using JSName##Node = JSUnaryOpNode;
+JS_UNOP_WITH_FEEDBACK(V)
+#undef V
+
+class JSBinaryOpNode final : public NodeWrapper {
+ public:
+ explicit constexpr JSBinaryOpNode(Node* node) : NodeWrapper(node) {
+ CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(node->opcode()));
+ }
+
+ static constexpr int LeftIndex() { return 0; }
+ static constexpr int RightIndex() { return 1; }
+ static constexpr int FeedbackVectorIndex() { return 2; }
+};
+
+#define V(JSName, ...) using JSName##Node = JSBinaryOpNode;
+JS_BINOP_WITH_FEEDBACK(V)
+#undef V
+
// Defines the frequency a given Call/Construct site was executed. For some
// call sites the frequency is not known.
class CallFrequency final {
@@ -60,8 +133,6 @@ class CallFrequency final {
std::ostream& operator<<(std::ostream&, CallFrequency const&);
-CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT;
-
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
class ConstructForwardVarargsParameters final {
@@ -97,15 +168,32 @@ std::ostream& operator<<(std::ostream&,
ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
-// Defines the arity and the feedback for a JavaScript constructor call. This is
-// used as a parameter by JSConstruct and JSConstructWithSpread operators.
+// Part of ConstructParameters::arity.
+static constexpr int kTargetAndNewTarget = 2;
+
+// Defines the arity (parameters plus the target and new target) and the
+// feedback for a JavaScript constructor call. This is used as a parameter by
+// JSConstruct, JSConstructWithArrayLike, and JSConstructWithSpread operators.
class ConstructParameters final {
public:
ConstructParameters(uint32_t arity, CallFrequency const& frequency,
FeedbackSource const& feedback)
- : arity_(arity), frequency_(frequency), feedback_(feedback) {}
+ : arity_(arity), frequency_(frequency), feedback_(feedback) {
+ DCHECK_GE(arity, kTargetAndNewTarget);
+ DCHECK(is_int32(arity));
+ }
+ // TODO(jgruber): Consider removing `arity()` and just storing the arity
+ // without extra args in ConstructParameters. Every spot that creates
+ // ConstructParameters artifically adds the extra args. Every spot that uses
+ // ConstructParameters artificially subtracts the extra args.
+ // We keep them for now for consistency with other spots
+ // that expect `arity()` to include extra args.
uint32_t arity() const { return arity_; }
+ int arity_without_implicit_args() const {
+ return static_cast<int>(arity_ - kTargetAndNewTarget);
+ }
+
CallFrequency const& frequency() const { return frequency_; }
FeedbackSource const& feedback() const { return feedback_; }
@@ -158,8 +246,12 @@ std::ostream& operator<<(std::ostream&, CallForwardVarargsParameters const&);
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
-// Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCall and JSCallWithSpread operators.
+// Part of CallParameters::arity.
+static constexpr int kTargetAndReceiver = 2;
+
+// Defines the arity (parameters plus the target and receiver) and the call
+// flags for a JavaScript function call. This is used as a parameter by JSCall,
+// JSCallWithArrayLike and JSCallWithSpread operators.
class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency const& frequency,
@@ -178,9 +270,17 @@ class CallParameters final {
feedback.IsValid());
DCHECK_IMPLIES(!feedback.IsValid(),
feedback_relation == CallFeedbackRelation::kUnrelated);
+ DCHECK_GE(arity, kTargetAndReceiver);
+ DCHECK(is_int32(arity));
}
+ // TODO(jgruber): Consider removing `arity()` and just storing the arity
+ // without extra args in CallParameters.
size_t arity() const { return ArityField::decode(bit_field_); }
+ int arity_without_implicit_args() const {
+ return static_cast<int>(arity() - kTargetAndReceiver);
+ }
+
CallFrequency const& frequency() const { return frequency_; }
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
@@ -733,10 +833,6 @@ std::ostream& operator<<(std::ostream&, ForInMode);
ForInMode ForInModeOf(Operator const* op) V8_WARN_UNUSED_RESULT;
-BinaryOperationHint BinaryOperationHintOf(const Operator* op);
-
-CompareOperationHint CompareOperationHintOf(const Operator* op);
-
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -752,30 +848,30 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
public:
explicit JSOperatorBuilder(Zone* zone);
- const Operator* Equal(CompareOperationHint hint);
- const Operator* StrictEqual(CompareOperationHint hint);
- const Operator* LessThan(CompareOperationHint hint);
- const Operator* GreaterThan(CompareOperationHint hint);
- const Operator* LessThanOrEqual(CompareOperationHint hint);
- const Operator* GreaterThanOrEqual(CompareOperationHint hint);
-
- const Operator* BitwiseOr();
- const Operator* BitwiseXor();
- const Operator* BitwiseAnd();
- const Operator* ShiftLeft();
- const Operator* ShiftRight();
- const Operator* ShiftRightLogical();
- const Operator* Add(BinaryOperationHint hint);
- const Operator* Subtract();
- const Operator* Multiply();
- const Operator* Divide();
- const Operator* Modulus();
- const Operator* Exponentiate();
-
- const Operator* BitwiseNot();
- const Operator* Decrement();
- const Operator* Increment();
- const Operator* Negate();
+ const Operator* Equal(FeedbackSource const& feedback);
+ const Operator* StrictEqual(FeedbackSource const& feedback);
+ const Operator* LessThan(FeedbackSource const& feedback);
+ const Operator* GreaterThan(FeedbackSource const& feedback);
+ const Operator* LessThanOrEqual(FeedbackSource const& feedback);
+ const Operator* GreaterThanOrEqual(FeedbackSource const& feedback);
+
+ const Operator* BitwiseOr(FeedbackSource const& feedback);
+ const Operator* BitwiseXor(FeedbackSource const& feedback);
+ const Operator* BitwiseAnd(FeedbackSource const& feedback);
+ const Operator* ShiftLeft(FeedbackSource const& feedback);
+ const Operator* ShiftRight(FeedbackSource const& feedback);
+ const Operator* ShiftRightLogical(FeedbackSource const& feedback);
+ const Operator* Add(FeedbackSource const& feedback);
+ const Operator* Subtract(FeedbackSource const& feedback);
+ const Operator* Multiply(FeedbackSource const& feedback);
+ const Operator* Divide(FeedbackSource const& feedback);
+ const Operator* Modulus(FeedbackSource const& feedback);
+ const Operator* Exponentiate(FeedbackSource const& feedback);
+
+ const Operator* BitwiseNot(FeedbackSource const& feedback);
+ const Operator* Decrement(FeedbackSource const& feedback);
+ const Operator* Increment(FeedbackSource const& feedback);
+ const Operator* Negate(FeedbackSource const& feedback);
const Operator* ToLength();
const Operator* ToName();
@@ -849,7 +945,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Construct(uint32_t arity,
CallFrequency const& frequency = CallFrequency(),
FeedbackSource const& feedback = FeedbackSource());
- const Operator* ConstructWithArrayLike(CallFrequency const& frequency);
+ const Operator* ConstructWithArrayLike(CallFrequency const& frequency,
+ FeedbackSource const& feedback);
const Operator* ConstructWithSpread(
uint32_t arity, CallFrequency const& frequency = CallFrequency(),
FeedbackSource const& feedback = FeedbackSource());
diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.cc b/chromium/v8/src/compiler/js-type-hint-lowering.cc
index 5c9a287bccc..808c59a65e2 100644
--- a/chromium/v8/src/compiler/js-type-hint-lowering.cc
+++ b/chromium/v8/src/compiler/js-type-hint-lowering.cc
@@ -97,6 +97,9 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kNumber:
*hint = NumberOperationHint::kNumber;
return true;
+ case CompareOperationHint::kNumberOrBoolean:
+ *hint = NumberOperationHint::kNumberOrBoolean;
+ return true;
case CompareOperationHint::kNumberOrOddball:
*hint = NumberOperationHint::kNumberOrOddball;
return true;
@@ -282,31 +285,33 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
return LoweringResult::Exit(node);
}
+ // Note: Unary and binary operations collect the same kind of feedback.
+ FeedbackSource feedback(feedback_vector(), slot);
+
Node* node;
switch (op->opcode()) {
case IrOpcode::kJSBitwiseNot: {
// Lower to a speculative xor with -1 if we have some kind of Number
// feedback.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->BitwiseXor(),
- operand, jsgraph()->SmiConstant(-1), effect,
- control, slot);
+ JSSpeculativeBinopBuilder b(
+ this, jsgraph()->javascript()->BitwiseXor(feedback), operand,
+ jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
break;
}
case IrOpcode::kJSDecrement: {
// Lower to a speculative subtraction of 1 if we have some kind of Number
// feedback.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Subtract(),
- operand, jsgraph()->SmiConstant(1), effect,
- control, slot);
+ JSSpeculativeBinopBuilder b(
+ this, jsgraph()->javascript()->Subtract(feedback), operand,
+ jsgraph()->SmiConstant(1), effect, control, slot);
node = b.TryBuildNumberBinop();
break;
}
case IrOpcode::kJSIncrement: {
// Lower to a speculative addition of 1 if we have some kind of Number
// feedback.
- BinaryOperationHint hint = BinaryOperationHint::kAny; // Dummy.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Add(hint),
+ JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Add(feedback),
operand, jsgraph()->SmiConstant(1), effect,
control, slot);
node = b.TryBuildNumberBinop();
@@ -315,9 +320,9 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
case IrOpcode::kJSNegate: {
// Lower to a speculative multiplication with -1 if we have some kind of
// Number feedback.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Multiply(),
- operand, jsgraph()->SmiConstant(-1), effect,
- control, slot);
+ JSSpeculativeBinopBuilder b(
+ this, jsgraph()->javascript()->Multiply(feedback), operand,
+ jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
if (!node) {
if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.h b/chromium/v8/src/compiler/js-type-hint-lowering.h
index 303e2f8dcfa..256858c1c69 100644
--- a/chromium/v8/src/compiler/js-type-hint-lowering.h
+++ b/chromium/v8/src/compiler/js-type-hint-lowering.h
@@ -72,6 +72,7 @@ class JSTypeHintLowering {
Node* control) {
DCHECK_NOT_NULL(effect);
DCHECK_NOT_NULL(control);
+ DCHECK(value->op()->HasProperty(Operator::kNoThrow));
return LoweringResult(LoweringResultKind::kSideEffectFree, value, effect,
control);
}
diff --git a/chromium/v8/src/compiler/js-typed-lowering.cc b/chromium/v8/src/compiler/js-typed-lowering.cc
index 69ca3e62e7a..8e03fc2f435 100644
--- a/chromium/v8/src/compiler/js-typed-lowering.cc
+++ b/chromium/v8/src/compiler/js-typed-lowering.cc
@@ -38,13 +38,16 @@ class JSBinopReduction final {
bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- switch (CompareOperationHintOf(node_->op())) {
+ switch (GetCompareOperationHint(node_)) {
case CompareOperationHint::kSignedSmall:
*hint = NumberOperationHint::kSignedSmall;
return true;
case CompareOperationHint::kNumber:
*hint = NumberOperationHint::kNumber;
return true;
+ case CompareOperationHint::kNumberOrBoolean:
+ *hint = NumberOperationHint::kNumberOrBoolean;
+ return true;
case CompareOperationHint::kNumberOrOddball:
*hint = NumberOperationHint::kNumberOrOddball;
return true;
@@ -63,36 +66,34 @@ class JSBinopReduction final {
bool IsInternalizedStringCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
+ return (GetCompareOperationHint(node_) ==
CompareOperationHint::kInternalizedString) &&
BothInputsMaybe(Type::InternalizedString());
}
bool IsReceiverCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
+ return (GetCompareOperationHint(node_) ==
CompareOperationHint::kReceiver) &&
BothInputsMaybe(Type::Receiver());
}
bool IsReceiverOrNullOrUndefinedCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
+ return (GetCompareOperationHint(node_) ==
CompareOperationHint::kReceiverOrNullOrUndefined) &&
BothInputsMaybe(Type::ReceiverOrNullOrUndefined());
}
bool IsStringCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
- CompareOperationHint::kString) &&
+ return (GetCompareOperationHint(node_) == CompareOperationHint::kString) &&
BothInputsMaybe(Type::String());
}
bool IsSymbolCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
- CompareOperationHint::kSymbol) &&
+ return (GetCompareOperationHint(node_) == CompareOperationHint::kSymbol) &&
BothInputsMaybe(Type::Symbol());
}
@@ -103,7 +104,7 @@ class JSBinopReduction final {
DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
DCHECK(OneInputIs(Type::String()));
if (BothInputsAre(Type::String()) ||
- BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString) {
+ GetBinaryOperationHint(node_) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
JSHeapBroker* broker = lowering_->broker();
if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
@@ -269,6 +270,10 @@ class JSBinopReduction final {
}
// Remove the inputs corresponding to context, effect, and control.
NodeProperties::RemoveNonValueInputs(node_);
+ // Remove the feedback vector input, if applicable.
+ if (JSOperator::IsBinaryWithFeedback(node_->opcode())) {
+ node_->RemoveInput(JSBinaryOpNode::FeedbackVectorIndex());
+ }
// Finally, update the operator to the new one.
NodeProperties::ChangeOp(node_, op);
@@ -292,7 +297,6 @@ class JSBinopReduction final {
DCHECK_EQ(1, node_->op()->EffectInputCount());
DCHECK_EQ(1, node_->op()->EffectOutputCount());
DCHECK_EQ(1, node_->op()->ControlInputCount());
- DCHECK_EQ(2, node_->op()->ValueInputCount());
// Reconnect the control output to bypass the IfSuccess node and
// possibly disconnect from the IfException node.
@@ -304,6 +308,11 @@ class JSBinopReduction final {
}
node_->RemoveInput(NodeProperties::FirstContextIndex(node_));
+ // Remove the feedback vector input, if applicable.
+ if (JSOperator::IsBinaryWithFeedback(node_->opcode())) {
+ node_->RemoveInput(JSBinaryOpNode::FeedbackVectorIndex());
+ }
+ // Finally, update the operator to the new one.
NodeProperties::ChangeOp(node_, op);
// Update the type to number.
@@ -366,6 +375,11 @@ class JSBinopReduction final {
return !left_type().Maybe(t) && !right_type().Maybe(t);
}
+ BinaryOperationHint GetBinaryOperationHint(Node* node) const {
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ return lowering_->broker()->GetFeedbackForBinaryOperation(p.feedback());
+ }
+
Node* effect() { return NodeProperties::GetEffectInput(node_); }
Node* control() { return NodeProperties::GetControlInput(node_); }
Node* context() { return NodeProperties::GetContextInput(node_); }
@@ -414,6 +428,11 @@ class JSBinopReduction final {
return node;
}
+ CompareOperationHint GetCompareOperationHint(Node* node) const {
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ return lowering_->broker()->GetFeedbackForCompareOperation(p.feedback());
+ }
+
void update_effect(Node* effect) {
NodeProperties::ReplaceEffectInput(node_, effect);
}
@@ -443,8 +462,9 @@ Reduction JSTypedLowering::ReduceJSBitwiseNot(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSBitwiseNot(x) => NumberBitwiseXor(ToInt32(x), -1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->SmiConstant(-1));
- NodeProperties::ChangeOp(node, javascript()->BitwiseXor());
+ NodeProperties::ChangeOp(node, javascript()->BitwiseXor(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
r.ConvertInputsToUI32(kSigned, kSigned);
@@ -458,8 +478,9 @@ Reduction JSTypedLowering::ReduceJSDecrement(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSDecrement(x) => NumberSubtract(ToNumber(x), 1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->OneConstant());
- NodeProperties::ChangeOp(node, javascript()->Subtract());
+ NodeProperties::ChangeOp(node, javascript()->Subtract(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
DCHECK_EQ(simplified()->NumberSubtract(), r.NumberOp());
@@ -473,9 +494,9 @@ Reduction JSTypedLowering::ReduceJSIncrement(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSIncrement(x) => NumberAdd(ToNumber(x), 1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->OneConstant());
- BinaryOperationHint hint = BinaryOperationHint::kAny; // Dummy.
- NodeProperties::ChangeOp(node, javascript()->Add(hint));
+ NodeProperties::ChangeOp(node, javascript()->Add(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
DCHECK_EQ(simplified()->NumberAdd(), r.NumberOp());
@@ -489,8 +510,9 @@ Reduction JSTypedLowering::ReduceJSNegate(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSNegate(x) => NumberMultiply(ToNumber(x), -1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->SmiConstant(-1));
- NodeProperties::ChangeOp(node, javascript()->Multiply());
+ NodeProperties::ChangeOp(node, javascript()->Multiply(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
@@ -527,7 +549,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
}
// Always bake in String feedback into the graph.
- if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ if (r.GetBinaryOperationHint(node) == BinaryOperationHint::kString) {
r.CheckInputsToString();
}
@@ -630,7 +652,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
}
// We never get here when we had String feedback.
- DCHECK_NE(BinaryOperationHint::kString, BinaryOperationHintOf(node->op()));
+ DCHECK_NE(BinaryOperationHint::kString, r.GetBinaryOperationHint(node));
if (r.OneInputIs(Type::String())) {
StringAddFlags flags = STRING_ADD_CHECK_NONE;
if (!r.LeftInputIs(Type::String())) {
@@ -654,6 +676,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(JSAddNode::FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -887,7 +910,14 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
if (r.BothInputsAre(Type::Signed32()) ||
r.BothInputsAre(Type::Unsigned32())) {
return r.ChangeToPureOperator(simplified()->NumberEqual());
- } else if (r.GetCompareNumberOperationHint(&hint)) {
+ } else if (r.GetCompareNumberOperationHint(&hint) &&
+ hint != NumberOperationHint::kNumberOrOddball &&
+ hint != NumberOperationHint::kNumberOrBoolean) {
+ // SpeculativeNumberEqual performs implicit conversion of oddballs to
+ // numbers, so me must not generate it for strict equality with respective
+ // hint.
+ DCHECK(hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kSignedSmall);
return r.ChangeToSpeculativeOperator(
simplified()->SpeculativeNumberEqual(hint), Type::Boolean());
} else if (r.BothInputsAre(Type::Number())) {
@@ -1463,17 +1493,34 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
CallDescriptor::Flags flags) {
// Patch {node} to a direct CEntry call.
//
+ // When V8_REVERSE_JSARGS is set:
+ // ----------- A r g u m e n t s -----------
+ // -- 0: CEntry
+ // --- Stack args ---
+ // -- 1: new_target
+ // -- 2: target
+ // -- 3: argc, including the receiver and implicit args (Smi)
+ // -- 4: padding
+ // -- 5: receiver
+ // -- [6, 6 + n[: the n actual arguments passed to the builtin
+ // --- Register args ---
+ // -- 6 + n: the C entry point
+ // -- 6 + n + 1: argc (Int32)
+ // -----------------------------------
+ //
+ // Otherwise:
// ----------- A r g u m e n t s -----------
// -- 0: CEntry
// --- Stack args ---
// -- 1: receiver
// -- [2, 2 + n[: the n actual arguments passed to the builtin
- // -- 2 + n: argc, including the receiver and implicit args (Smi)
- // -- 2 + n + 1: target
- // -- 2 + n + 2: new_target
+ // -- 2 + n: padding
+ // -- 2 + n + 1: argc, including the receiver and implicit args (Smi)
+ // -- 2 + n + 2: target
+ // -- 2 + n + 3: new_target
// --- Register args ---
- // -- 2 + n + 3: the C entry point
- // -- 2 + n + 4: argc (Int32)
+ // -- 2 + n + 4: the C entry point
+ // -- 2 + n + 5: argc (Int32)
// -----------------------------------
// The logic contained here is mirrored in Builtins::Generate_Adaptor.
@@ -1496,6 +1543,25 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
node->ReplaceInput(0, stub);
Zone* zone = jsgraph->zone();
+ const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
+ Node* argc_node = jsgraph->Constant(argc);
+
+ static const int kStubAndReceiver = 2;
+#ifdef V8_REVERSE_JSARGS
+ node->InsertInput(zone, 1, new_target);
+ node->InsertInput(zone, 2, target);
+ node->InsertInput(zone, 3, argc_node);
+ node->InsertInput(zone, 4, jsgraph->PaddingConstant());
+
+ if (is_construct) {
+ // Unify representations between construct and call nodes.
+ // Remove new target and add receiver as a stack parameter.
+ Node* receiver = jsgraph->UndefinedConstant();
+ node->RemoveInput(argc);
+ node->InsertInput(zone, 5, receiver);
+ }
+ int cursor = arity + kStubAndReceiver + BuiltinArguments::kNumExtraArgs;
+#else
if (is_construct) {
// Unify representations between construct and call nodes.
// Remove new target and add receiver as a stack parameter.
@@ -1504,15 +1570,12 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
node->InsertInput(zone, 1, receiver);
}
- const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
- Node* argc_node = jsgraph->Constant(argc);
-
- static const int kStubAndReceiver = 2;
int cursor = arity + kStubAndReceiver;
node->InsertInput(zone, cursor++, jsgraph->PaddingConstant());
node->InsertInput(zone, cursor++, argc_node);
node->InsertInput(zone, cursor++, target);
node->InsertInput(zone, cursor++, new_target);
+#endif
Address entry = Builtins::CppEntryOf(builtin_index);
ExternalReference entry_ref = ExternalReference::Create(entry);
@@ -1525,7 +1588,8 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
const char* debug_name = Builtins::name(builtin_index);
Operator::Properties properties = node->op()->properties();
auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
- zone, kReturnCount, argc, debug_name, properties, flags);
+ zone, kReturnCount, argc, debug_name, properties, flags,
+ StackArgumentOrder::kJS);
NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
@@ -1577,8 +1641,7 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- DCHECK_LE(2u, p.arity());
- int const arity = static_cast<int>(p.arity() - 2);
+ int const arity = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Type target_type = NodeProperties::GetType(target);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
@@ -1649,7 +1712,7 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Reduction JSTypedLowering::ReduceJSCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode = p.convert_mode();
Node* target = NodeProperties::GetValueInput(node, 0);
Type target_type = NodeProperties::GetType(target);
diff --git a/chromium/v8/src/compiler/linkage.cc b/chromium/v8/src/compiler/linkage.cc
index e16290f2a11..08ab34a892e 100644
--- a/chromium/v8/src/compiler/linkage.cc
+++ b/chromium/v8/src/compiler/linkage.cc
@@ -180,7 +180,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
switch (function) {
// Most runtime functions need a FrameState. A few chosen ones that we know
// not to call into arbitrary JavaScript, not to throw, and not to lazily
- // deoptimize are whitelisted here and can be called without a FrameState.
+ // deoptimize are allowlisted here and can be called without a FrameState.
case Runtime::kAbort:
case Runtime::kAllocateInOldGeneration:
case Runtime::kCreateIterResultObject:
@@ -218,7 +218,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
break;
}
- // For safety, default to needing a FrameState unless whitelisted.
+ // For safety, default to needing a FrameState unless allowlisted.
return true;
}
@@ -253,7 +253,7 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
Zone* zone, int return_count, int js_parameter_count,
const char* debug_name, Operator::Properties properties,
- CallDescriptor::Flags flags) {
+ CallDescriptor::Flags flags, StackArgumentOrder stack_order) {
const size_t function_count = 1;
const size_t num_args_count = 1;
const size_t context_count = 1;
@@ -305,7 +305,8 @@ CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
kNoCalleeSaved, // callee-saved
kNoCalleeSaved, // callee-saved fp
flags, // flags
- debug_name); // debug name
+ debug_name, // debug name
+ stack_order); // stack order
}
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
@@ -325,7 +326,11 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
+#ifdef V8_REVERSE_JSARGS
+ int spill_slot_index = -i - 1;
+#else
int spill_slot_index = i - js_parameter_count;
+#endif
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
spill_slot_index, MachineType::AnyTagged()));
}
@@ -358,7 +363,8 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
kNoCalleeSaved, // callee-saved
kNoCalleeSaved, // callee-saved fp
flags, // flags
- "js-call");
+ "js-call", // debug name
+ StackArgumentOrder::kJS); // stack order
}
// TODO(turbofan): cache call descriptors for code stub calls.
@@ -458,6 +464,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
kNoCalleeSaved, // callee-saved fp
CallDescriptor::kCanUseRoots | flags, // flags
descriptor.DebugName(), // debug name
+ descriptor.GetStackArgumentOrder(), // stack order
descriptor.allocatable_registers());
}
diff --git a/chromium/v8/src/compiler/linkage.h b/chromium/v8/src/compiler/linkage.h
index b55f3cdcb7c..346e9bda0cc 100644
--- a/chromium/v8/src/compiler/linkage.h
+++ b/chromium/v8/src/compiler/linkage.h
@@ -237,6 +237,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
+ StackArgumentOrder stack_order = StackArgumentOrder::kDefault,
const RegList allocatable_registers = 0,
size_t stack_return_count = 0)
: kind_(kind),
@@ -250,6 +251,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
callee_saved_fp_registers_(callee_saved_fp_registers),
allocatable_registers_(allocatable_registers),
flags_(flags),
+ stack_order_(stack_order),
debug_name_(debug_name) {}
// Returns the kind of this call.
@@ -292,6 +294,19 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return stack_param_count_;
}
+ int GetStackIndexFromSlot(int slot_index) const {
+#ifdef V8_REVERSE_JSARGS
+ switch (GetStackArgumentOrder()) {
+ case StackArgumentOrder::kDefault:
+ return -slot_index - 1;
+ case StackArgumentOrder::kJS:
+ return slot_index + static_cast<int>(StackParameterCount());
+ }
+#else
+ return -slot_index - 1;
+#endif
+ }
+
// The total number of inputs to this call, which includes the target,
// receiver, context, etc.
// TODO(titzer): this should input the framestate input too.
@@ -338,6 +353,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return location_sig_->GetParam(index).GetType();
}
+ StackArgumentOrder GetStackArgumentOrder() const { return stack_order_; }
+
// Operator properties describe how this call can be optimized, if at all.
Operator::Properties properties() const { return properties_; }
@@ -391,6 +408,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// register allocator to use.
const RegList allocatable_registers_;
const Flags flags_;
+ const StackArgumentOrder stack_order_;
const char* const debug_name_;
const CFunctionInfo* c_function_info_ = nullptr;
@@ -438,7 +456,8 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
static CallDescriptor* GetCEntryStubCallDescriptor(
Zone* zone, int return_count, int js_parameter_count,
const char* debug_name, Operator::Properties properties,
- CallDescriptor::Flags flags);
+ CallDescriptor::Flags flags,
+ StackArgumentOrder stack_order = StackArgumentOrder::kDefault);
static CallDescriptor* GetStubCallDescriptor(
Zone* zone, const CallInterfaceDescriptor& descriptor,
diff --git a/chromium/v8/src/compiler/load-elimination.h b/chromium/v8/src/compiler/load-elimination.h
index b97fd7b8834..fb5aee23aa0 100644
--- a/chromium/v8/src/compiler/load-elimination.h
+++ b/chromium/v8/src/compiler/load-elimination.h
@@ -228,8 +228,6 @@ class V8_EXPORT_PRIVATE LoadElimination final
class AbstractState final : public ZoneObject {
public:
- AbstractState() {}
-
bool Equals(AbstractState const* that) const;
void Merge(AbstractState const* that, Zone* zone);
diff --git a/chromium/v8/src/compiler/machine-graph-verifier.cc b/chromium/v8/src/compiler/machine-graph-verifier.cc
index 8b318d1430b..5eeb5dc2486 100644
--- a/chromium/v8/src/compiler/machine-graph-verifier.cc
+++ b/chromium/v8/src/compiler/machine-graph-verifier.cc
@@ -735,23 +735,6 @@ class MachineRepresentationChecker {
}
}
- void CheckValueInputIsCompressed(Node const* node, int index) {
- Node const* input = node->InputAt(index);
- switch (inferrer_->GetRepresentation(input)) {
- case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCompressedPointer:
- return;
- default:
- break;
- }
- std::ostringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op()
- << " uses node #" << input->id() << ":" << *input->op()
- << " which doesn't have a compressed representation.";
- PrintDebugHelp(str, node);
- FATAL("%s", str.str().c_str());
- }
-
void CheckValueInputIsTagged(Node const* node, int index) {
Node const* input = node->InputAt(index);
switch (inferrer_->GetRepresentation(input)) {
@@ -985,35 +968,6 @@ class MachineRepresentationChecker {
}
}
- bool Intersect(MachineRepresentation lhs, MachineRepresentation rhs) {
- return (GetRepresentationProperties(lhs) &
- GetRepresentationProperties(rhs)) != 0;
- }
-
- enum RepresentationProperties { kIsPointer = 1, kIsTagged = 2 };
-
- int GetRepresentationProperties(MachineRepresentation representation) {
- switch (representation) {
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kTaggedPointer:
- return kIsPointer | kIsTagged;
- case MachineRepresentation::kTaggedSigned:
- return kIsTagged;
- case MachineRepresentation::kWord32:
- return MachineRepresentation::kWord32 ==
- MachineType::PointerRepresentation()
- ? kIsPointer
- : 0;
- case MachineRepresentation::kWord64:
- return MachineRepresentation::kWord64 ==
- MachineType::PointerRepresentation()
- ? kIsPointer
- : 0;
- default:
- return 0;
- }
- }
-
bool IsCompatible(MachineRepresentation expected,
MachineRepresentation actual) {
switch (expected) {
diff --git a/chromium/v8/src/compiler/machine-graph.cc b/chromium/v8/src/compiler/machine-graph.cc
index 0a00392f4b2..34464cfb052 100644
--- a/chromium/v8/src/compiler/machine-graph.cc
+++ b/chromium/v8/src/compiler/machine-graph.cc
@@ -32,6 +32,11 @@ Node* MachineGraph::IntPtrConstant(intptr_t value) {
: Int64Constant(static_cast<int64_t>(value));
}
+Node* MachineGraph::UintPtrConstant(uintptr_t value) {
+ return machine()->Is32() ? Uint32Constant(static_cast<uint32_t>(value))
+ : Uint64Constant(static_cast<uint64_t>(value));
+}
+
Node* MachineGraph::TaggedIndexConstant(intptr_t value) {
int32_t value32 = static_cast<int32_t>(value);
Node** loc = cache_.FindTaggedIndexConstant(value32);
diff --git a/chromium/v8/src/compiler/machine-graph.h b/chromium/v8/src/compiler/machine-graph.h
index 9eb5998dfc7..87175847f54 100644
--- a/chromium/v8/src/compiler/machine-graph.h
+++ b/chromium/v8/src/compiler/machine-graph.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
// TODO(turbofan): Code using Int32Constant/Int64Constant to store pointer
// constants is probably not serializable.
Node* IntPtrConstant(intptr_t value);
+ Node* UintPtrConstant(uintptr_t value);
Node* TaggedIndexConstant(intptr_t value);
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.cc b/chromium/v8/src/compiler/machine-operator-reducer.cc
index 1b600291691..127c7681099 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.cc
+++ b/chromium/v8/src/compiler/machine-operator-reducer.cc
@@ -311,35 +311,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kWord32Equal: {
- Int32BinopMatcher m(node);
- if (m.IsFoldable()) { // K == K => K
- return ReplaceBool(m.left().Value() == m.right().Value());
- }
- if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
- Int32BinopMatcher msub(m.left().node());
- node->ReplaceInput(0, msub.left().node());
- node->ReplaceInput(1, msub.right().node());
- return Changed(node);
- }
- // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
- if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
- if (m.right().HasValue()) {
- base::Optional<std::pair<Node*, uint32_t>> replacements;
- if (m.left().IsTruncateInt64ToInt32()) {
- replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
- NodeProperties::GetValueInput(m.left().node(), 0),
- static_cast<uint32_t>(m.right().Value()));
- } else {
- replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
- m.left().node(), static_cast<uint32_t>(m.right().Value()));
- }
- if (replacements) {
- node->ReplaceInput(0, replacements->first);
- node->ReplaceInput(1, Uint32Constant(replacements->second));
- return Changed(node);
- }
- }
- break;
+ return ReduceWord32Equal(node);
}
case IrOpcode::kWord64Equal: {
Int64BinopMatcher m(node);
@@ -1623,9 +1595,117 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
return NoChange();
}
+namespace {
+
+// Represents an operation of the form `(source & mask) == masked_value`.
+struct BitfieldCheck {
+ Node* source;
+ uint32_t mask;
+ uint32_t masked_value;
+ bool truncate_from_64_bit;
+
+ static base::Optional<BitfieldCheck> Detect(Node* node) {
+ // There are two patterns to check for here:
+ // 1. Single-bit checks: `(val >> shift) & 1`, where:
+ // - the shift may be omitted, and/or
+ // - the result may be truncated from 64 to 32
+ // 2. Equality checks: `(val & mask) == expected`, where:
+ // - val may be truncated from 64 to 32 before masking (see
+ // ReduceWord32EqualForConstantRhs)
+ if (node->opcode() == IrOpcode::kWord32Equal) {
+ Uint32BinopMatcher eq(node);
+ if (eq.left().IsWord32And()) {
+ Uint32BinopMatcher mand(eq.left().node());
+ if (mand.right().HasValue()) {
+ BitfieldCheck result{mand.left().node(), mand.right().Value(),
+ eq.right().Value(), false};
+ if (mand.left().IsTruncateInt64ToInt32()) {
+ result.truncate_from_64_bit = true;
+ result.source =
+ NodeProperties::GetValueInput(mand.left().node(), 0);
+ }
+ return result;
+ }
+ }
+ } else {
+ if (node->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ return TryDetectShiftAndMaskOneBit<Word64Adapter>(
+ NodeProperties::GetValueInput(node, 0));
+ } else {
+ return TryDetectShiftAndMaskOneBit<Word32Adapter>(node);
+ }
+ }
+ return {};
+ }
+
+ base::Optional<BitfieldCheck> TryCombine(const BitfieldCheck& other) {
+ if (source != other.source ||
+ truncate_from_64_bit != other.truncate_from_64_bit)
+ return {};
+ uint32_t overlapping_bits = mask & other.mask;
+ // It would be kind of strange to have any overlapping bits, but they can be
+ // allowed as long as they don't require opposite values in the same
+ // positions.
+ if ((masked_value & overlapping_bits) !=
+ (other.masked_value & overlapping_bits))
+ return {};
+ return BitfieldCheck{source, mask | other.mask,
+ masked_value | other.masked_value,
+ truncate_from_64_bit};
+ }
+
+ private:
+ template <typename WordNAdapter>
+ static base::Optional<BitfieldCheck> TryDetectShiftAndMaskOneBit(Node* node) {
+ // Look for the pattern `(val >> shift) & 1`. The shift may be omitted.
+ if (WordNAdapter::IsWordNAnd(NodeMatcher(node))) {
+ typename WordNAdapter::IntNBinopMatcher mand(node);
+ if (mand.right().HasValue() && mand.right().Value() == 1) {
+ if (WordNAdapter::IsWordNShr(mand.left()) ||
+ WordNAdapter::IsWordNSar(mand.left())) {
+ typename WordNAdapter::UintNBinopMatcher shift(mand.left().node());
+ if (shift.right().HasValue() && shift.right().Value() < 32u) {
+ uint32_t mask = 1 << shift.right().Value();
+ return BitfieldCheck{shift.left().node(), mask, mask,
+ WordNAdapter::WORD_SIZE == 64};
+ }
+ }
+ return BitfieldCheck{mand.left().node(), 1, 1,
+ WordNAdapter::WORD_SIZE == 64};
+ }
+ }
+ return {};
+ }
+};
+
+} // namespace
+
Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
- return ReduceWordNAnd<Word32Adapter>(node);
+ Reduction reduction = ReduceWordNAnd<Word32Adapter>(node);
+ if (reduction.Changed()) {
+ return reduction;
+ }
+
+ // Attempt to detect multiple bitfield checks from the same bitfield struct
+ // and fold them into a single check.
+ Int32BinopMatcher m(node);
+ if (auto right_bitfield = BitfieldCheck::Detect(m.right().node())) {
+ if (auto left_bitfield = BitfieldCheck::Detect(m.left().node())) {
+ if (auto combined_bitfield = left_bitfield->TryCombine(*right_bitfield)) {
+ Node* source = combined_bitfield->source;
+ if (combined_bitfield->truncate_from_64_bit) {
+ source = TruncateInt64ToInt32(source);
+ }
+ node->ReplaceInput(0, Word32And(source, combined_bitfield->mask));
+ node->ReplaceInput(1, Int32Constant(combined_bitfield->masked_value));
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ return Changed(node).FollowedBy(ReduceWord32Equal(node));
+ }
+ }
+ }
+
+ return NoChange();
}
Reduction MachineOperatorReducer::ReduceWord64And(Node* node) {
@@ -1756,6 +1836,39 @@ Reduction MachineOperatorReducer::ReduceWord64Xor(Node* node) {
return ReduceWordNXor<Word64Adapter>(node);
}
+Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) { // K == K => K
+ return ReplaceBool(m.left().Value() == m.right().Value());
+ }
+ if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
+ Int32BinopMatcher msub(m.left().node());
+ node->ReplaceInput(0, msub.left().node());
+ node->ReplaceInput(1, msub.right().node());
+ return Changed(node);
+ }
+ // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+ if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
+ if (m.right().HasValue()) {
+ base::Optional<std::pair<Node*, uint32_t>> replacements;
+ if (m.left().IsTruncateInt64ToInt32()) {
+ replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
+ NodeProperties::GetValueInput(m.left().node(), 0),
+ static_cast<uint32_t>(m.right().Value()));
+ } else {
+ replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
+ m.left().node(), static_cast<uint32_t>(m.right().Value()));
+ }
+ if (replacements) {
+ node->ReplaceInput(0, replacements->first);
+ node->ReplaceInput(1, Uint32Constant(replacements->second));
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.h b/chromium/v8/src/compiler/machine-operator-reducer.h
index 7970daefce9..9f12f818374 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.h
+++ b/chromium/v8/src/compiler/machine-operator-reducer.h
@@ -109,6 +109,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceWord64Or(Node* node);
Reduction ReduceWord32Xor(Node* node);
Reduction ReduceWord64Xor(Node* node);
+ Reduction ReduceWord32Equal(Node* node);
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
diff --git a/chromium/v8/src/compiler/machine-operator.cc b/chromium/v8/src/compiler/machine-operator.cc
index 9a985eb5fa4..ed180283fed 100644
--- a/chromium/v8/src/compiler/machine-operator.cc
+++ b/chromium/v8/src/compiler/machine-operator.cc
@@ -339,6 +339,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F64x2Qfms, Operator::kNoProperties, 3, 0, 1) \
V(F64x2Pmin, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Pmax, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Ceil, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Floor, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Trunc, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2NearestInt, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -362,6 +366,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F32x4Qfms, Operator::kNoProperties, 3, 0, 1) \
V(F32x4Pmin, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Pmax, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Ceil, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Floor, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Trunc, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4NearestInt, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
@@ -408,6 +416,7 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I32x4GeU, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(I32x4BitMask, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4DotI16x8S, Operator::kCommutative, 2, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -476,14 +485,14 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
- V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V64x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
// The format is:
diff --git a/chromium/v8/src/compiler/machine-operator.h b/chromium/v8/src/compiler/machine-operator.h
index aa4f2dcf2ca..f013337478e 100644
--- a/chromium/v8/src/compiler/machine-operator.h
+++ b/chromium/v8/src/compiler/machine-operator.h
@@ -576,6 +576,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Qfms();
const Operator* F64x2Pmin();
const Operator* F64x2Pmax();
+ const Operator* F64x2Ceil();
+ const Operator* F64x2Floor();
+ const Operator* F64x2Trunc();
+ const Operator* F64x2NearestInt();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -602,6 +606,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Qfms();
const Operator* F32x4Pmin();
const Operator* F32x4Pmax();
+ const Operator* F32x4Ceil();
+ const Operator* F32x4Floor();
+ const Operator* F32x4Trunc();
+ const Operator* F32x4NearestInt();
const Operator* I64x2Splat();
const Operator* I64x2SplatI32Pair();
@@ -656,6 +664,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4GeU();
const Operator* I32x4Abs();
const Operator* I32x4BitMask();
+ const Operator* I32x4DotI16x8S();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -740,14 +749,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S8x16Swizzle();
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* S1x2AnyTrue();
- const Operator* S1x2AllTrue();
- const Operator* S1x4AnyTrue();
- const Operator* S1x4AllTrue();
- const Operator* S1x8AnyTrue();
- const Operator* S1x8AllTrue();
- const Operator* S1x16AnyTrue();
- const Operator* S1x16AllTrue();
+ const Operator* V64x2AnyTrue();
+ const Operator* V64x2AllTrue();
+ const Operator* V32x4AnyTrue();
+ const Operator* V32x4AllTrue();
+ const Operator* V16x8AnyTrue();
+ const Operator* V16x8AllTrue();
+ const Operator* V8x16AnyTrue();
+ const Operator* V8x16AllTrue();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
diff --git a/chromium/v8/src/compiler/memory-lowering.h b/chromium/v8/src/compiler/memory-lowering.h
index 45015e98bbf..1c6ef8a3722 100644
--- a/chromium/v8/src/compiler/memory-lowering.h
+++ b/chromium/v8/src/compiler/memory-lowering.h
@@ -78,7 +78,6 @@ class MemoryLowering final : public Reducer {
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
Zone*) { UNREACHABLE(); },
const char* function_debug_name = nullptr);
- ~MemoryLowering() = default;
const char* reducer_name() const override { return "MemoryReducer"; }
diff --git a/chromium/v8/src/compiler/node-matchers.h b/chromium/v8/src/compiler/node-matchers.h
index cf0df2d6360..bd93b545e12 100644
--- a/chromium/v8/src/compiler/node-matchers.h
+++ b/chromium/v8/src/compiler/node-matchers.h
@@ -39,7 +39,7 @@ struct NodeMatcher {
bool IsComparison() const;
-#define DEFINE_IS_OPCODE(Opcode) \
+#define DEFINE_IS_OPCODE(Opcode, ...) \
bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
ALL_OP_LIST(DEFINE_IS_OPCODE)
#undef DEFINE_IS_OPCODE
diff --git a/chromium/v8/src/compiler/node.h b/chromium/v8/src/compiler/node.h
index 8072bab46eb..add4116dac9 100644
--- a/chromium/v8/src/compiler/node.h
+++ b/chromium/v8/src/compiler/node.h
@@ -303,6 +303,16 @@ Node** Node::OutOfLineInputs::inputs() {
std::ostream& operator<<(std::ostream& os, const Node& n);
+// Base class for node wrappers.
+class NodeWrapper {
+ public:
+ explicit constexpr NodeWrapper(Node* node) : node_(node) {}
+ operator Node*() const { return node_; }
+ Node* operator->() const { return node_; }
+
+ private:
+ Node* node_;
+};
// Typedefs to shorten commonly used Node containers.
using NodeDeque = ZoneDeque<Node*>;
diff --git a/chromium/v8/src/compiler/opcodes.cc b/chromium/v8/src/compiler/opcodes.cc
index c465422d346..3cd464d6b1b 100644
--- a/chromium/v8/src/compiler/opcodes.cc
+++ b/chromium/v8/src/compiler/opcodes.cc
@@ -16,7 +16,7 @@ namespace compiler {
namespace {
char const* const kMnemonics[] = {
-#define DECLARE_MNEMONIC(x) #x,
+#define DECLARE_MNEMONIC(x, ...) #x,
ALL_OP_LIST(DECLARE_MNEMONIC)
#undef DECLARE_MNEMONIC
"UnknownOpcode"};
diff --git a/chromium/v8/src/compiler/opcodes.h b/chromium/v8/src/compiler/opcodes.h
index f3b3ff8c8eb..9db2a912a2a 100644
--- a/chromium/v8/src/compiler/opcodes.h
+++ b/chromium/v8/src/compiler/opcodes.h
@@ -85,29 +85,30 @@
V(StaticAssert)
// Opcodes for JavaScript operators.
-#define JS_COMPARE_BINOP_LIST(V) \
- V(JSEqual) \
- V(JSStrictEqual) \
- V(JSLessThan) \
- V(JSGreaterThan) \
- V(JSLessThanOrEqual) \
- V(JSGreaterThanOrEqual)
+// Arguments are JSName (the name with a 'JS' prefix), and Name.
+#define JS_COMPARE_BINOP_LIST(V) \
+ V(JSEqual, Equal) \
+ V(JSStrictEqual, StrictEqual) \
+ V(JSLessThan, LessThan) \
+ V(JSGreaterThan, GreaterThan) \
+ V(JSLessThanOrEqual, LessThanOrEqual) \
+ V(JSGreaterThanOrEqual, GreaterThanOrEqual)
#define JS_BITWISE_BINOP_LIST(V) \
- V(JSBitwiseOr) \
- V(JSBitwiseXor) \
- V(JSBitwiseAnd) \
- V(JSShiftLeft) \
- V(JSShiftRight) \
- V(JSShiftRightLogical)
+ V(JSBitwiseOr, BitwiseOr) \
+ V(JSBitwiseXor, BitwiseXor) \
+ V(JSBitwiseAnd, BitwiseAnd) \
+ V(JSShiftLeft, ShiftLeft) \
+ V(JSShiftRight, ShiftRight) \
+ V(JSShiftRightLogical, ShiftRightLogical)
#define JS_ARITH_BINOP_LIST(V) \
- V(JSAdd) \
- V(JSSubtract) \
- V(JSMultiply) \
- V(JSDivide) \
- V(JSModulus) \
- V(JSExponentiate)
+ V(JSAdd, Add) \
+ V(JSSubtract, Subtract) \
+ V(JSMultiply, Multiply) \
+ V(JSDivide, Divide) \
+ V(JSModulus, Modulus) \
+ V(JSExponentiate, Exponentiate)
#define JS_SIMPLE_BINOP_LIST(V) \
JS_COMPARE_BINOP_LIST(V) \
@@ -127,12 +128,18 @@
V(JSToString) \
V(JSParseInt)
+#define JS_BITWISE_UNOP_LIST(V) \
+ V(JSBitwiseNot, BitwiseNot) \
+ V(JSNegate, Negate)
+
+#define JS_ARITH_UNOP_LIST(V) \
+ V(JSDecrement, Decrement) \
+ V(JSIncrement, Increment)
+
#define JS_SIMPLE_UNOP_LIST(V) \
- JS_CONVERSION_UNOP_LIST(V) \
- V(JSBitwiseNot) \
- V(JSDecrement) \
- V(JSIncrement) \
- V(JSNegate)
+ JS_ARITH_UNOP_LIST(V) \
+ JS_BITWISE_UNOP_LIST(V) \
+ JS_CONVERSION_UNOP_LIST(V)
#define JS_CREATE_OP_LIST(V) \
V(JSCloneObject) \
@@ -765,6 +772,10 @@
V(F64x2Qfms) \
V(F64x2Pmin) \
V(F64x2Pmax) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -792,6 +803,10 @@
V(F32x4Qfms) \
V(F32x4Pmin) \
V(F32x4Pmax) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
V(I64x2Splat) \
V(I64x2SplatI32Pair) \
V(I64x2ExtractLane) \
@@ -847,6 +862,7 @@
V(I32x4GeU) \
V(I32x4Abs) \
V(I32x4BitMask) \
+ V(I32x4DotI16x8S) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -931,14 +947,14 @@
V(S128AndNot) \
V(S8x16Swizzle) \
V(S8x16Shuffle) \
- V(S1x2AnyTrue) \
- V(S1x2AllTrue) \
- V(S1x4AnyTrue) \
- V(S1x4AllTrue) \
- V(S1x8AnyTrue) \
- V(S1x8AllTrue) \
- V(S1x16AnyTrue) \
- V(S1x16AllTrue) \
+ V(V64x2AnyTrue) \
+ V(V64x2AllTrue) \
+ V(V32x4AnyTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AnyTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AnyTrue) \
+ V(V8x16AllTrue) \
V(LoadTransform)
#define VALUE_OP_LIST(V) \
@@ -962,12 +978,12 @@ namespace compiler {
class V8_EXPORT_PRIVATE IrOpcode {
public:
enum Value {
-#define DECLARE_OPCODE(x) k##x,
+#define DECLARE_OPCODE(x, ...) k##x,
ALL_OP_LIST(DECLARE_OPCODE)
#undef DECLARE_OPCODE
- kLast = -1
-#define COUNT_OPCODE(x) +1
- ALL_OP_LIST(COUNT_OPCODE)
+ kLast = -1
+#define COUNT_OPCODE(...) +1
+ ALL_OP_LIST(COUNT_OPCODE)
#undef COUNT_OPCODE
};
@@ -991,7 +1007,16 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode for constant operator.
static bool IsConstantOpcode(Value value) {
- return kInt32Constant <= value && value <= kRelocatableInt64Constant;
+#define CASE(Name) \
+ case k##Name: \
+ return true;
+ switch (value) {
+ CONSTANT_OP_LIST(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ UNREACHABLE();
}
static bool IsPhiOpcode(Value value) {
@@ -1006,8 +1031,9 @@ class V8_EXPORT_PRIVATE IrOpcode {
return kIfTrue <= value && value <= kIfDefault;
}
- // Returns true if opcode terminates control flow in a graph (i.e. respective
- // nodes are expected to have control uses by the graphs {End} node only).
+ // Returns true if opcode terminates control flow in a graph (i.e.
+ // respective nodes are expected to have control uses by the graphs {End}
+ // node only).
static bool IsGraphTerminator(Value value) {
return value == kDeoptimize || value == kReturn || value == kTailCall ||
value == kTerminate || value == kThrow;
@@ -1020,9 +1046,18 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode for comparison operator.
static bool IsComparisonOpcode(Value value) {
- return (kJSEqual <= value && value <= kJSGreaterThanOrEqual) ||
- (kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
- (kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
+#define CASE(Name, ...) \
+ case k##Name: \
+ return true;
+ switch (value) {
+ JS_COMPARE_BINOP_LIST(CASE);
+ SIMPLIFIED_COMPARE_BINOP_LIST(CASE);
+ MACHINE_COMPARE_BINOP_LIST(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ UNREACHABLE();
}
static bool IsContextChainExtendingOpcode(Value value) {
diff --git a/chromium/v8/src/compiler/operator-properties.cc b/chromium/v8/src/compiler/operator-properties.cc
index a4892cdb2a2..bf0f724a993 100644
--- a/chromium/v8/src/compiler/operator-properties.cc
+++ b/chromium/v8/src/compiler/operator-properties.cc
@@ -24,7 +24,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
DCHECK(HasContextInput(op));
IrOpcode::Value const opcode = static_cast<IrOpcode::Value>(op->opcode());
switch (opcode) {
-#define CASE(Name) case IrOpcode::k##Name:
+#define CASE(Name, ...) case IrOpcode::k##Name:
// Binary/unary operators, calls and constructor calls only
// need the context to generate exceptions or lookup fields
// on the native context, so passing any context is fine.
diff --git a/chromium/v8/src/compiler/pipeline-statistics.cc b/chromium/v8/src/compiler/pipeline-statistics.cc
index 5e2c7feffd1..7989cfacfb6 100644
--- a/chromium/v8/src/compiler/pipeline-statistics.cc
+++ b/chromium/v8/src/compiler/pipeline-statistics.cc
@@ -18,10 +18,10 @@ namespace compiler {
namespace {
// We log detailed phase information about the pipeline
-// in both the v8.turbofan and the v8.wasm categories.
+// in both the v8.turbofan and the v8.wasm.detailed categories.
constexpr const char kTraceCategory[] = // --
TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
- TRACE_DISABLED_BY_DEFAULT("v8.wasm");
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed");
} // namespace
diff --git a/chromium/v8/src/compiler/pipeline.cc b/chromium/v8/src/compiler/pipeline.cc
index ee6609cfa69..6f3b8923764 100644
--- a/chromium/v8/src/compiler/pipeline.cc
+++ b/chromium/v8/src/compiler/pipeline.cc
@@ -150,9 +150,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone(),
- info_->trace_heap_broker_enabled(),
- is_concurrent_inlining)),
+ broker_(new JSHeapBroker(
+ isolate_, info_->zone(), info_->trace_heap_broker(),
+ is_concurrent_inlining, info->native_context_independent())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
@@ -160,9 +160,9 @@ class PipelineData {
PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
graph_ = new (graph_zone_) Graph(graph_zone_);
source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
- node_origins_ = info->trace_turbo_json_enabled()
- ? new (graph_zone_) NodeOriginTable(graph_)
- : nullptr;
+ node_origins_ = info->trace_turbo_json() ? new (graph_zone_)
+ NodeOriginTable(graph_)
+ : nullptr;
simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
graph_zone_, MachineType::PointerRepresentation(),
@@ -349,11 +349,6 @@ class PipelineData {
return register_allocation_data_;
}
- BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
- void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
- profiler_data_ = profiler_data;
- }
-
std::string const& source_position_output() const {
return source_position_output_;
}
@@ -370,7 +365,7 @@ class PipelineData {
}
void ChooseSpecializationContext() {
- if (info()->is_function_context_specializing()) {
+ if (info()->function_context_specializing()) {
DCHECK(info()->has_context());
specialization_context_ =
Just(OuterContext(handle(info()->context(), isolate()), 0));
@@ -599,9 +594,6 @@ class PipelineData {
Zone* register_allocation_zone_;
RegisterAllocationData* register_allocation_data_ = nullptr;
- // Basic block profiling support.
- BasicBlockProfiler::Data* profiler_data_ = nullptr;
-
// Source position output for --trace-turbo.
std::string source_position_output_;
@@ -680,9 +672,9 @@ void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
Handle<Script> script(Script::cast(shared->script()), isolate);
if (!script->source().IsUndefined(isolate)) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
Object source_name = script->name();
- OFStream os(tracing_scope.file());
+ auto& os = tracing_scope.stream();
os << "--- FUNCTION SOURCE (";
if (source_name.IsString()) {
os << String::cast(source_name).ToCString().get() << ":";
@@ -711,8 +703,8 @@ void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
void PrintInlinedFunctionInfo(
OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ auto& os = tracing_scope.stream();
os << "INLINE (" << h.shared_info->DebugName().ToCString().get() << ") id{"
<< info->optimization_id() << "," << source_id << "} AS " << inlining_id
<< " AT ";
@@ -753,8 +745,8 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ auto& os = tracing_scope.stream();
// Print the source code if available.
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
@@ -795,7 +787,7 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
@@ -808,11 +800,12 @@ void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
}
json_of << "\"},\n";
}
- if (info->trace_turbo_graph_enabled() || FLAG_trace_turbo_scheduler) {
+ if (info->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "-- Schedule --------------------------------------\n" << *schedule;
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "-- Schedule --------------------------------------\n"
+ << *schedule;
}
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
@@ -865,13 +858,13 @@ class NodeOriginsWrapper final : public Reducer {
void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
Reducer* reducer) {
- if (data->info()->is_source_positions_enabled()) {
+ if (data->info()->source_positions()) {
void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
SourcePositionWrapper* const wrapper =
new (buffer) SourcePositionWrapper(reducer, data->source_positions());
reducer = wrapper;
}
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
void* const buffer = data->graph_zone()->New(sizeof(NodeOriginsWrapper));
NodeOriginsWrapper* const wrapper =
new (buffer) NodeOriginsWrapper(reducer, data->node_origins());
@@ -919,7 +912,7 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
}
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
TurboJsonFile json_of(info, std::ios_base::trunc);
json_of << "{\"function\" : ";
JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
@@ -937,15 +930,15 @@ PipelineStatistics* CreatePipelineStatistics(
PipelineStatistics* pipeline_statistics = nullptr;
bool tracing_enabled;
- TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- &tracing_enabled);
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), &tracing_enabled);
if (tracing_enabled || FLAG_turbo_stats_wasm) {
pipeline_statistics = new PipelineStatistics(
info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
}
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
TurboJsonFile json_of(info, std::ios_base::trunc);
std::unique_ptr<char[]> function_name = info->GetDebugName();
json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
@@ -1015,7 +1008,8 @@ PipelineCompilationJob::PipelineCompilationJob(
zone_(function->GetIsolate()->allocator(),
kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()),
- compilation_info_(&zone_, function->GetIsolate(), shared_info, function),
+ compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
+ FLAG_turbo_nci),
pipeline_statistics_(CreatePipelineStatistics(
handle(Script::cast(shared_info->script()), isolate),
compilation_info(), function->GetIsolate(), &zone_stats_)),
@@ -1027,7 +1021,7 @@ PipelineCompilationJob::PipelineCompilationJob(
compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
}
-PipelineCompilationJob::~PipelineCompilationJob() {}
+PipelineCompilationJob::~PipelineCompilationJob() = default;
namespace {
// Ensure that the RuntimeStats table is set on the PipelineData for
@@ -1058,14 +1052,15 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
return AbortOptimization(BailoutReason::kFunctionTooBig);
}
- if (!FLAG_always_opt) {
- compilation_info()->MarkAsBailoutOnUninitialized();
+ if (!FLAG_always_opt && !compilation_info()->native_context_independent()) {
+ compilation_info()->set_bailout_on_uninitialized();
}
if (FLAG_turbo_loop_peeling) {
- compilation_info()->MarkAsLoopPeelingEnabled();
+ compilation_info()->set_loop_peeling();
}
- if (FLAG_turbo_inlining) {
- compilation_info()->MarkAsInliningEnabled();
+ if (FLAG_turbo_inlining &&
+ !compilation_info()->native_context_independent()) {
+ compilation_info()->set_inlining();
}
// This is the bottleneck for computing and setting poisoning level in the
@@ -1080,7 +1075,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
if (FLAG_turbo_allocation_folding) {
- compilation_info()->MarkAsAllocationFoldingEnabled();
+ compilation_info()->set_allocation_folding();
}
// Determine whether to specialize the code for the function's context.
@@ -1091,11 +1086,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
!compilation_info()->is_osr()) {
- compilation_info()->MarkAsFunctionContextSpecializing();
+ compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
}
- if (compilation_info()->is_source_positions_enabled()) {
+ if (compilation_info()->source_positions()) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(
isolate, compilation_info()->shared_info());
}
@@ -1269,20 +1264,20 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
&info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
}
- if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data_.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info_.GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
- if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ if (info_.trace_turbo_graph()) { // Simple textual RPO.
StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind())
<< " graph -- " << std::endl
<< AsRPO(*data_.graph());
}
- if (info_.trace_turbo_json_enabled()) {
+ if (info_.trace_turbo_json()) {
TurboJsonFile json_of(&info_, std::ios_base::trunc);
json_of << "{\"function\":\"" << info_.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -1306,9 +1301,9 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
info_.SetCode(code);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(compilation_info()->GetDebugName().get(), os, isolate);
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ code->Disassemble(compilation_info()->GetDebugName().get(),
+ tracing_scope.stream(), isolate);
}
#endif
return SUCCEEDED;
@@ -1341,12 +1336,15 @@ struct GraphBuilderPhase {
void Run(PipelineData* data, Zone* temp_zone) {
BytecodeGraphBuilderFlags flags;
- if (data->info()->is_analyze_environment_liveness()) {
+ if (data->info()->analyze_environment_liveness()) {
flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
}
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
+ if (data->info()->native_context_independent()) {
+ flags |= BytecodeGraphBuilderFlag::kNativeContextIndependent;
+ }
JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f);
@@ -1372,7 +1370,7 @@ struct InliningPhase {
data->broker(), data->common(),
data->machine(), temp_zone);
JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
}
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
@@ -1381,12 +1379,12 @@ struct InliningPhase {
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(), data->broker(),
data->specialization_context(),
- data->info()->is_function_context_specializing()
+ data->info()->function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
JSNativeContextSpecialization::Flags flags =
JSNativeContextSpecialization::kNoFlags;
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
}
// Passing the OptimizedCompilationInfo's shared zone here as
@@ -1404,11 +1402,13 @@ struct InliningPhase {
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &native_context_specialization);
- AddReducer(data, &graph_reducer, &context_specialization);
+ if (!data->info()->native_context_independent()) {
+ AddReducer(data, &graph_reducer, &native_context_specialization);
+ AddReducer(data, &graph_reducer, &context_specialization);
+ }
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &call_reducer);
- if (data->info()->is_inlining_enabled()) {
+ if (data->info()->inlining()) {
AddReducer(data, &graph_reducer, &inlining);
}
graph_reducer.ReduceGraph();
@@ -1497,17 +1497,17 @@ struct SerializationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
SerializerForBackgroundCompilationFlags flags;
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
}
- if (data->info()->is_source_positions_enabled()) {
+ if (data->info()->source_positions()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
- if (data->info()->is_analyze_environment_liveness()) {
+ if (data->info()->analyze_environment_liveness()) {
flags |=
SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- if (data->info()->is_inlining_enabled()) {
+ if (data->info()->inlining()) {
flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining;
}
RunSerializerForBackgroundCompilation(
@@ -1545,7 +1545,9 @@ struct TypedLoweringPhase {
data->broker(), data->common(),
data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &create_lowering);
+ if (!data->info()->native_context_independent()) {
+ AddReducer(data, &graph_reducer, &create_lowering);
+ }
AddReducer(data, &graph_reducer, &constant_folding_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &typed_optimization);
@@ -1805,7 +1807,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
- data->info()->is_allocation_folding_enabled()
+ data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
data->debug_name(), &data->info()->tick_counter());
@@ -1997,8 +1999,8 @@ struct ComputeSchedulePhase {
void Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = Scheduler::ComputeSchedule(
temp_zone, data->graph(),
- data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes
- : Scheduler::kNoFlags,
+ data->info()->splitting() ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags,
&data->info()->tick_counter());
data->set_schedule(schedule);
}
@@ -2043,13 +2045,13 @@ struct InstructionSelectionPhase {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
- data->info()->switch_jump_table_enabled()
+ data->info()->switch_jump_table()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
&data->info()->tick_counter(),
data->address_of_max_unoptimized_frame_height(),
data->address_of_max_pushed_argument_count(),
- data->info()->is_source_positions_enabled()
+ data->info()->source_positions()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
InstructionSelector::SupportedFeatures(),
@@ -2060,13 +2062,13 @@ struct InstructionSelectionPhase {
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
- data->info()->trace_turbo_json_enabled()
+ data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
}
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
json_of << "{\"name\":\"" << phase_name()
<< "\",\"type\":\"instructions\""
@@ -2283,7 +2285,7 @@ struct PrintGraphPhase {
OptimizedCompilationInfo* info = data->info();
Graph* graph = data->graph();
- if (info->trace_turbo_json_enabled()) { // Print JSON.
+ if (info->trace_turbo_json()) { // Print JSON.
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
@@ -2292,7 +2294,7 @@ struct PrintGraphPhase {
<< "},\n";
}
- if (info->trace_turbo_scheduled_enabled()) {
+ if (info->trace_turbo_scheduled()) {
AccountingAllocator allocator;
Schedule* schedule = data->schedule();
if (schedule == nullptr) {
@@ -2302,16 +2304,16 @@ struct PrintGraphPhase {
}
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "-- Graph after " << phase << " -- " << std::endl;
- os << AsScheduledGraph(schedule);
- } else if (info->trace_turbo_graph_enabled()) { // Simple textual RPO.
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "-- Graph after " << phase << " -- " << std::endl
+ << AsScheduledGraph(schedule);
+ } else if (info->trace_turbo_graph()) { // Simple textual RPO.
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "-- Graph after " << phase << " -- " << std::endl;
- os << AsRPO(*graph);
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "-- Graph after " << phase << " -- " << std::endl
+ << AsRPO(*graph);
}
}
};
@@ -2345,8 +2347,7 @@ struct VerifyGraphPhase {
#undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
- if (info()->trace_turbo_json_enabled() ||
- info()->trace_turbo_graph_enabled()) {
+ if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
Run<PrintGraphPhase>(phase);
}
if (FLAG_turbo_verify) {
@@ -2359,21 +2360,20 @@ void PipelineImpl::Serialize() {
data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
- if (info()->trace_turbo_json_enabled() ||
- info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VCompilation(info());
}
data->source_positions()->AddDecorator();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
data->node_origins()->AddDecorator();
}
@@ -2442,7 +2442,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<TypedLoweringPhase>();
RunPrintAndVerify(TypedLoweringPhase::phase_name());
- if (data->info()->is_loop_peeling_enabled()) {
+ if (data->info()->loop_peeling()) {
Run<LoopPeelingPhase>();
RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
} else {
@@ -2531,7 +2531,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
data->source_positions()->RemoveDecorator();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
}
@@ -2598,7 +2598,7 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
RunPrintAndVerify(ScheduledMachineLoweringPhase::phase_name(), true);
data->source_positions()->RemoveDecorator();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
}
@@ -2621,8 +2621,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
JumpOptimizationInfo jump_opt;
- bool should_optimize_jumps =
- isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
+ bool should_optimize_jumps = isolate->serializer_enabled() &&
+ FLAG_turbo_rewrite_far_jumps &&
+ !FLAG_turbo_profiling;
PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
jsgraph, nullptr, source_positions, &node_origins,
should_optimize_jumps ? &jump_opt : nullptr, options);
@@ -2639,12 +2640,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
PipelineImpl pipeline(&data);
- if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json() || info.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
+ if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\" : ";
JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
@@ -2741,21 +2742,21 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
PipelineImpl pipeline(&data);
- if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info.trace_turbo_json() || info.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info.GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ if (info.trace_turbo_graph()) { // Simple textual RPO.
StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
<< std::endl
<< AsRPO(*graph);
}
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -2783,7 +2784,7 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
DCHECK(result.succeeded());
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::app);
json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
<< BlockStartsAsJSON{&code_generator->block_starts()}
@@ -2802,12 +2803,12 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
json_of << "\n}";
}
- if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info.trace_turbo_json() || info.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << info.GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
return result;
@@ -2862,7 +2863,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
TurboJsonFile json_of(info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info->GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -2915,13 +2916,12 @@ void Pipeline::GenerateCodeForWasmFunction(
PipelineImpl pipeline(&data);
- if (data.info()->trace_turbo_json_enabled() ||
- data.info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << data.info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << data.info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
@@ -2929,7 +2929,7 @@ void Pipeline::GenerateCodeForWasmFunction(
data.BeginPhaseKind("V8.WasmOptimization");
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
- data.info()->MarkAsSplittingEnabled();
+ data.info()->set_splitting();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization",
@@ -2987,7 +2987,7 @@ void Pipeline::GenerateCodeForWasmFunction(
code_generator->GetProtectedInstructionsData();
result->result_tier = wasm::ExecutionTier::kTurbofan;
- if (data.info()->trace_turbo_json_enabled()) {
+ if (data.info()->trace_turbo_json()) {
TurboJsonFile json_of(data.info(), std::ios_base::app);
json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
<< BlockStartsAsJSON{&code_generator->block_starts()}
@@ -3006,13 +3006,12 @@ void Pipeline::GenerateCodeForWasmFunction(
json_of << "\n}";
}
- if (data.info()->trace_turbo_json_enabled() ||
- data.info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << data.info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << data.info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
DCHECK(result->succeeded());
@@ -3054,7 +3053,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
DCHECK_NOT_NULL(data->schedule());
if (FLAG_turbo_profiling) {
- data->set_profiler_data(BasicBlockInstrumentor::Instrument(
+ data->info()->set_profiler_data(BasicBlockInstrumentor::Instrument(
info(), data->graph(), data->schedule(), data->isolate()));
}
@@ -3074,15 +3073,16 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
if (verify_stub_graph) {
if (FLAG_trace_verify_csa) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "--------------------------------------------------\n"
- << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
- << "--------------------------------------------------\n"
- << *data->schedule()
- << "--------------------------------------------------\n"
- << "--- End of " << data->debug_name() << " generated by TurboFan\n"
- << "--------------------------------------------------\n";
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "--------------------------------------------------\n"
+ << "--- Verifying " << data->debug_name()
+ << " generated by TurboFan\n"
+ << "--------------------------------------------------\n"
+ << *data->schedule()
+ << "--------------------------------------------------\n"
+ << "--- End of " << data->debug_name() << " generated by TurboFan\n"
+ << "--------------------------------------------------\n";
}
Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
MachineGraphVerifier::Run(
@@ -3102,14 +3102,14 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
return false;
}
- if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
AllowHandleDereference allow_deref;
TurboCfgFile tcf(isolate());
tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
data->sequence());
}
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
std::ostringstream source_position_output;
// Output source position information before the graph is deleted.
if (data_->source_positions() != nullptr) {
@@ -3244,7 +3244,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
data->InitializeCodeGenerator(linkage, std::move(buffer));
Run<AssembleCodePhase>();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
json_of << "{\"name\":\"code generation\""
<< ", \"type\":\"instructions\""
@@ -3271,18 +3271,10 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
return maybe_code;
}
- if (data->profiler_data()) {
-#ifdef ENABLE_DISASSEMBLER
- std::ostringstream os;
- code->Disassemble(nullptr, os, isolate());
- data->profiler_data()->SetCode(&os);
-#endif // ENABLE_DISASSEMBLER
- }
-
info()->SetCode(code);
PrintCode(isolate(), code, info());
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
TurboJsonFile json_of(info(), std::ios_base::app);
json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
@@ -3302,13 +3294,12 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
json_of << "\n}";
}
- if (info()->trace_turbo_json_enabled() ||
- info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
data->EndPhaseKind();
return code;
@@ -3342,19 +3333,22 @@ namespace {
void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
const char* phase_name) {
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
- json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\",";
- json_of << InstructionSequenceAsJSON{data->sequence()};
- json_of << "},\n";
- }
- if (info->trace_turbo_graph_enabled()) {
+ json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\""
+ << ",\"blocks\":" << InstructionSequenceAsJSON{data->sequence()}
+ << ",\"register_allocation\":{"
+ << RegisterAllocationDataAsJSON{*(data->register_allocation_data()),
+ *(data->sequence())}
+ << "}},\n";
+ }
+ if (info->trace_turbo_graph()) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "----- Instruction sequence " << phase_name << " -----\n"
- << *data->sequence();
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream() << "----- Instruction sequence " << phase_name
+ << " -----\n"
+ << *data->sequence();
}
}
@@ -3381,13 +3375,13 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
#endif
RegisterAllocationFlags flags;
- if (data->info()->is_turbo_control_flow_aware_allocation()) {
+ if (data->info()->turbo_control_flow_aware_allocation()) {
flags |= RegisterAllocationFlag::kTurboControlFlowAwareAllocation;
}
- if (data->info()->is_turbo_preprocess_ranges()) {
+ if (data->info()->turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
- if (data->info()->trace_turbo_allocation_enabled()) {
+ if (data->info()->trace_turbo_allocation()) {
flags |= RegisterAllocationFlag::kTraceAllocation;
}
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
@@ -3405,16 +3399,15 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
->RangesDefinedInDeferredStayInDeferred());
}
- if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("PreAllocation",
data->register_allocation_data());
}
- if (info()->is_turbo_preprocess_ranges()) {
+ if (info()->turbo_preprocess_ranges()) {
Run<SplinterLiveRangesPhase>();
- if (info()->trace_turbo_json_enabled() &&
- !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("PostSplinter",
data->register_allocation_data());
@@ -3427,7 +3420,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
- if (info()->is_turbo_preprocess_ranges()) {
+ if (info()->turbo_preprocess_ranges()) {
Run<MergeSplintersPhase>();
}
@@ -3459,7 +3452,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
verifier->VerifyGapMoves();
}
- if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("CodeGen",
data->register_allocation_data());
diff --git a/chromium/v8/src/compiler/representation-change.cc b/chromium/v8/src/compiler/representation-change.cc
index 7077f7d643f..5967d1005e5 100644
--- a/chromium/v8/src/compiler/representation-change.cc
+++ b/chromium/v8/src/compiler/representation-change.cc
@@ -11,6 +11,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/heap/factory-inl.h"
@@ -210,6 +211,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetFloat32RepresentationFor(node, output_rep, output_type,
use_info.truncation());
case MachineRepresentation::kFloat64:
+ DCHECK_NE(TypeCheckKind::kBigInt, use_info.type_check());
return GetFloat64RepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kBit:
@@ -402,7 +404,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
node);
- } else if (output_rep == MachineRepresentation::kBit) {
+ }
+
+ if (use_info.type_check() == TypeCheckKind::kBigInt &&
+ !output_type.Is(Type::BigInt())) {
+ // BigInt checks can only be performed on tagged representations. Note that
+ // a corresponding check is inserted down below.
+ if (!CanBeTaggedPointer(output_rep)) {
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
+ unreachable);
+ }
+ }
+
+ if (output_rep == MachineRepresentation::kBit) {
if (output_type.Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
} else {
@@ -427,7 +444,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer();
- } else if (output_type.Is(Type::BigInt())) {
+ } else if (output_type.Is(Type::BigInt()) &&
+ use_info.type_check() == TypeCheckKind::kBigInt) {
op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
@@ -662,6 +680,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
switch (use_info.type_check()) {
case TypeCheckKind::kNone:
case TypeCheckKind::kNumber:
+ case TypeCheckKind::kNumberOrBoolean:
case TypeCheckKind::kNumberOrOddball:
return jsgraph()->Float64Constant(m.Value());
case TypeCheckKind::kBigInt:
@@ -695,6 +714,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
if (use_info.truncation().TruncatesOddballAndBigIntToNumber() ||
+ use_info.type_check() == TypeCheckKind::kNumberOrBoolean ||
use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -707,9 +727,16 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (IsAnyTagged(output_rep)) {
if (output_type.Is(Type::Undefined())) {
- return jsgraph()->Float64Constant(
- std::numeric_limits<double>::quiet_NaN());
-
+ if (use_info.type_check() == TypeCheckKind::kNumberOrBoolean) {
+ Node* unreachable = InsertUnconditionalDeopt(
+ use_node, DeoptimizeReason::kNotANumberOrBoolean);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64),
+ unreachable);
+ } else {
+ return jsgraph()->Float64Constant(
+ std::numeric_limits<double>::quiet_NaN());
+ }
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
node = InsertChangeTaggedSignedToInt32(node);
op = machine()->ChangeInt32ToFloat64();
@@ -732,6 +759,9 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
!output_type.Maybe(Type::BooleanOrNullOrNumber()))) {
op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber,
use_info.feedback());
+ } else if (use_info.type_check() == TypeCheckKind::kNumberOrBoolean) {
+ op = simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumberOrBoolean, use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTaggedToFloat64(
CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
@@ -1045,12 +1075,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
case IrOpcode::kFloat64Constant:
UNREACHABLE();
case IrOpcode::kNumberConstant: {
- double const fv = OpParameter<double>(node->op());
- using limits = std::numeric_limits<int64_t>;
- if (fv <= limits::max() && fv >= limits::min()) {
- int64_t const iv = static_cast<int64_t>(fv);
- if (static_cast<double>(iv) == fv) {
- return jsgraph()->Int64Constant(iv);
+ if (use_info.type_check() != TypeCheckKind::kBigInt) {
+ double const fv = OpParameter<double>(node->op());
+ using limits = std::numeric_limits<int64_t>;
+ if (fv <= limits::max() && fv >= limits::min()) {
+ int64_t const iv = static_cast<int64_t>(fv);
+ if (static_cast<double>(iv) == fv) {
+ return jsgraph()->Int64Constant(iv);
+ }
}
}
break;
@@ -1069,6 +1101,19 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
break;
}
+ if (use_info.type_check() == TypeCheckKind::kBigInt) {
+ // BigInts are only represented as tagged pointer and word64.
+ if (!CanBeTaggedPointer(output_rep) &&
+ output_rep != MachineRepresentation::kWord64) {
+ DCHECK(!output_type.Is(Type::BigInt()));
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
+ unreachable);
+ }
+ }
+
// Select the correct X -> Word64 operator.
const Operator* op;
if (output_type.Is(Type::None())) {
@@ -1079,6 +1124,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
CHECK(output_type.Is(Type::Boolean()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball);
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kBigInt);
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode(
diff --git a/chromium/v8/src/compiler/representation-change.h b/chromium/v8/src/compiler/representation-change.h
index 78fa1fbe9dc..3654b089fca 100644
--- a/chromium/v8/src/compiler/representation-change.h
+++ b/chromium/v8/src/compiler/representation-change.h
@@ -119,6 +119,7 @@ enum class TypeCheckKind : uint8_t {
kSigned32,
kSigned64,
kNumber,
+ kNumberOrBoolean,
kNumberOrOddball,
kHeapObject,
kBigInt,
@@ -137,6 +138,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "Signed64";
case TypeCheckKind::kNumber:
return os << "Number";
+ case TypeCheckKind::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case TypeCheckKind::kNumberOrOddball:
return os << "NumberOrOddball";
case TypeCheckKind::kHeapObject:
@@ -266,6 +269,12 @@ class UseInfo {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
TypeCheckKind::kNumber, feedback);
}
+ static UseInfo CheckedNumberOrBooleanAsFloat64(
+ IdentifyZeros identify_zeros, const FeedbackSource& feedback) {
+ return UseInfo(MachineRepresentation::kFloat64,
+ Truncation::Any(identify_zeros),
+ TypeCheckKind::kNumberOrBoolean, feedback);
+ }
static UseInfo CheckedNumberOrOddballAsFloat64(
IdentifyZeros identify_zeros, const FeedbackSource& feedback) {
return UseInfo(MachineRepresentation::kFloat64,
diff --git a/chromium/v8/src/compiler/schedule.cc b/chromium/v8/src/compiler/schedule.cc
index cc3243cb2e1..1b0caa7567f 100644
--- a/chromium/v8/src/compiler/schedule.cc
+++ b/chromium/v8/src/compiler/schedule.cc
@@ -228,7 +228,7 @@ namespace {
bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
switch (opcode) {
-#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+#define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
JS_OP_LIST(BUILD_BLOCK_JS_CASE)
#undef BUILD_BLOCK_JS_CASE
case IrOpcode::kCall:
diff --git a/chromium/v8/src/compiler/scheduler.cc b/chromium/v8/src/compiler/scheduler.cc
index 0b0a5484117..ddd97f3e1e0 100644
--- a/chromium/v8/src/compiler/scheduler.cc
+++ b/chromium/v8/src/compiler/scheduler.cc
@@ -354,7 +354,7 @@ class CFGBuilder : public ZoneObject {
case IrOpcode::kSwitch:
BuildBlocksForSuccessors(node);
break;
-#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+#define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
JS_OP_LIST(BUILD_BLOCK_JS_CASE)
// JS opcodes are just like calls => fall through.
#undef BUILD_BLOCK_JS_CASE
@@ -398,7 +398,7 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectThrow(node);
break;
-#define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+#define CONNECT_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
// JS opcodes are just like calls => fall through.
#undef CONNECT_BLOCK_JS_CASE
diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.cc b/chromium/v8/src/compiler/simd-scalar-lowering.cc
index 21d34b21d25..82ccc463261 100644
--- a/chromium/v8/src/compiler/simd-scalar-lowering.cc
+++ b/chromium/v8/src/compiler/simd-scalar-lowering.cc
@@ -142,12 +142,13 @@ void SimdScalarLowering::LowerGraph() {
V(S128Or) \
V(S128Xor) \
V(S128Not) \
- V(S1x4AnyTrue) \
- V(S1x4AllTrue) \
- V(S1x8AnyTrue) \
- V(S1x8AllTrue) \
- V(S1x16AnyTrue) \
- V(S1x16AllTrue)
+ V(V32x4AnyTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AnyTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AnyTrue) \
+ V(V8x16AllTrue) \
+ V(I32x4BitMask)
#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
@@ -212,7 +213,8 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8LtU) \
V(I16x8LeU) \
V(I16x8RoundingAverageU) \
- V(I16x8Abs)
+ V(I16x8Abs) \
+ V(I16x8BitMask)
#define FOREACH_INT8X16_OPCODE(V) \
V(I8x16Splat) \
@@ -245,7 +247,8 @@ void SimdScalarLowering::LowerGraph() {
V(S8x16Swizzle) \
V(S8x16Shuffle) \
V(I8x16RoundingAverageU) \
- V(I8x16Abs)
+ V(I8x16Abs) \
+ V(I8x16BitMask)
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
@@ -1025,6 +1028,44 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
ReplaceNode(node, rep_node, num_lanes);
}
+void SimdScalarLowering::LowerBitMaskOp(Node* node, SimdType rep_type,
+ int msb_index) {
+ Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(1);
+ Node* result = mcgraph_->Int32Constant(0);
+ uint32_t mask = 1 << msb_index;
+
+ for (int i = 0; i < num_lanes; ++i) {
+ // Lane i should end up at bit i in the final result.
+ // +-----------------------------------------------------------------+
+ // | | msb_index | (i < msb_index) | (i > msb_index) |
+ // +-------+-----------+----------------------+----------------------+
+ // | i8x16 | 7 | msb >> (msb_index-i) | msb << (i-msb_index) |
+ // | i16x8 | 15 | msb >> (msb_index-i) | n/a |
+ // | i32x4 | 31 | msb >> (msb_index-i) | n/a |
+ // +-------+-----------+----------------------+----------------------+
+ Node* msb = Mask(reps[i], mask);
+
+ if (i < msb_index) {
+ int shift = msb_index - i;
+ Node* shifted = graph()->NewNode(machine()->Word32Shr(), msb,
+ mcgraph_->Int32Constant(shift));
+ result = graph()->NewNode(machine()->Word32Or(), shifted, result);
+ } else if (i > msb_index) {
+ int shift = i - msb_index;
+ Node* shifted = graph()->NewNode(machine()->Word32Shl(), msb,
+ mcgraph_->Int32Constant(shift));
+ result = graph()->NewNode(machine()->Word32Or(), shifted, result);
+ } else {
+ result = graph()->NewNode(machine()->Word32Or(), msb, result);
+ }
+ }
+
+ rep_node[0] = result;
+ ReplaceNode(node, rep_node, 1);
+}
+
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
int num_lanes = NumLanes(rep_type);
@@ -1627,12 +1668,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 16);
break;
}
- case IrOpcode::kS1x4AnyTrue:
- case IrOpcode::kS1x4AllTrue:
- case IrOpcode::kS1x8AnyTrue:
- case IrOpcode::kS1x8AllTrue:
- case IrOpcode::kS1x16AnyTrue:
- case IrOpcode::kS1x16AllTrue: {
+ case IrOpcode::kV32x4AnyTrue:
+ case IrOpcode::kV32x4AllTrue:
+ case IrOpcode::kV16x8AnyTrue:
+ case IrOpcode::kV16x8AllTrue:
+ case IrOpcode::kV8x16AnyTrue:
+ case IrOpcode::kV8x16AllTrue: {
DCHECK_EQ(1, node->InputCount());
SimdType input_rep_type = ReplacementType(node->InputAt(0));
Node** rep;
@@ -1649,18 +1690,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node* true_node = mcgraph_->Int32Constant(1);
Node* false_node = mcgraph_->Int32Constant(0);
Node* tmp_result = false_node;
- if (node->opcode() == IrOpcode::kS1x4AllTrue ||
- node->opcode() == IrOpcode::kS1x8AllTrue ||
- node->opcode() == IrOpcode::kS1x16AllTrue) {
+ if (node->opcode() == IrOpcode::kV32x4AllTrue ||
+ node->opcode() == IrOpcode::kV16x8AllTrue ||
+ node->opcode() == IrOpcode::kV8x16AllTrue) {
tmp_result = true_node;
}
for (int i = 0; i < input_num_lanes; ++i) {
Diamond is_false(
graph(), common(),
graph()->NewNode(machine()->Word32Equal(), rep[i], false_node));
- if (node->opcode() == IrOpcode::kS1x4AllTrue ||
- node->opcode() == IrOpcode::kS1x8AllTrue ||
- node->opcode() == IrOpcode::kS1x16AllTrue) {
+ if (node->opcode() == IrOpcode::kV32x4AllTrue ||
+ node->opcode() == IrOpcode::kV16x8AllTrue ||
+ node->opcode() == IrOpcode::kV8x16AllTrue) {
tmp_result = is_false.Phi(MachineRepresentation::kWord32, false_node,
tmp_result);
} else {
@@ -1675,6 +1716,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kI8x16BitMask: {
+ LowerBitMaskOp(node, rep_type, 7);
+ break;
+ }
+ case IrOpcode::kI16x8BitMask: {
+ LowerBitMaskOp(node, rep_type, 15);
+ break;
+ }
+ case IrOpcode::kI32x4BitMask: {
+ LowerBitMaskOp(node, rep_type, 31);
+ break;
+ }
case IrOpcode::kI8x16RoundingAverageU:
case IrOpcode::kI16x8RoundingAverageU: {
DCHECK_EQ(2, node->InputCount());
@@ -1707,7 +1760,7 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
something_changed = true;
node->ReplaceInput(i, GetReplacements(input)[0]);
}
- if (HasReplacement(1, input)) {
+ if (ReplacementCount(input) > 1 && HasReplacement(1, input)) {
something_changed = true;
for (int j = 1; j < ReplacementCount(input); ++j) {
node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.h b/chromium/v8/src/compiler/simd-scalar-lowering.h
index d91e6285f4e..a852f94c7c7 100644
--- a/chromium/v8/src/compiler/simd-scalar-lowering.h
+++ b/chromium/v8/src/compiler/simd-scalar-lowering.h
@@ -110,6 +110,7 @@ class SimdScalarLowering {
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
MachineType MachineTypeFrom(SimdType simdType);
+ void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
MachineGraph* const mcgraph_;
NodeMarker<State> state_;
diff --git a/chromium/v8/src/compiler/simplified-lowering.cc b/chromium/v8/src/compiler/simplified-lowering.cc
index d00acefc39c..dd297d0b121 100644
--- a/chromium/v8/src/compiler/simplified-lowering.cc
+++ b/chromium/v8/src/compiler/simplified-lowering.cc
@@ -105,6 +105,9 @@ UseInfo CheckedUseInfoAsWord32FromHint(
return UseInfo::CheckedSigned32AsWord32(identify_zeros, feedback);
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsWord32(feedback);
+ case NumberOperationHint::kNumberOrBoolean:
+ // Not used currently.
+ UNREACHABLE();
case NumberOperationHint::kNumberOrOddball:
return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
}
@@ -122,6 +125,8 @@ UseInfo CheckedUseInfoAsFloat64FromHint(
UNREACHABLE();
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback);
+ case NumberOperationHint::kNumberOrBoolean:
+ return UseInfo::CheckedNumberOrBooleanAsFloat64(identify_zeros, feedback);
case NumberOperationHint::kNumberOrOddball:
return UseInfo::CheckedNumberOrOddballAsFloat64(identify_zeros, feedback);
}
@@ -178,10 +183,16 @@ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
}
bool CanOverflowSigned32(const Operator* op, Type left, Type right,
- Zone* type_zone) {
- // We assume the inputs are checked Signed32 (or known statically
- // to be Signed32). Technically, the inputs could also be minus zero, but
- // that cannot cause overflow.
+ TypeCache const* type_cache, Zone* type_zone) {
+ // We assume the inputs are checked Signed32 (or known statically to be
+ // Signed32). Technically, the inputs could also be minus zero, which we treat
+ // as 0 for the purpose of this function.
+ if (left.Maybe(Type::MinusZero())) {
+ left = Type::Union(left, type_cache->kSingletonZero, type_zone);
+ }
+ if (right.Maybe(Type::MinusZero())) {
+ right = Type::Union(right, type_cache->kSingletonZero, type_zone);
+ }
left = Type::Intersect(left, Type::Signed32(), type_zone);
right = Type::Intersect(right, Type::Signed32(), type_zone);
if (left.IsNone() || right.IsNone()) return false;
@@ -291,11 +302,10 @@ class RepresentationSelector {
#ifdef DEBUG
node_input_use_infos_(count_, InputUseInfos(zone), zone),
#endif
- nodes_(zone),
replacements_(zone),
changer_(changer),
- queue_(zone),
- typing_stack_(zone),
+ revisit_queue_(zone),
+ traversal_nodes_(zone),
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
@@ -303,90 +313,6 @@ class RepresentationSelector {
tick_counter_(tick_counter) {
}
- // Forward propagation of types from type feedback.
- void RunTypePropagationPhase() {
- // Run type propagation.
- TRACE("--{Type propagation phase}--\n");
- ResetNodeInfoState();
-
- DCHECK(typing_stack_.empty());
- typing_stack_.push({graph()->end(), 0});
- GetInfo(graph()->end())->set_pushed();
- while (!typing_stack_.empty()) {
- NodeState& current = typing_stack_.top();
-
- // If there is an unvisited input, push it and continue.
- bool pushed_unvisited = false;
- while (current.input_index < current.node->InputCount()) {
- Node* input = current.node->InputAt(current.input_index);
- NodeInfo* input_info = GetInfo(input);
- current.input_index++;
- if (input_info->unvisited()) {
- input_info->set_pushed();
- typing_stack_.push({input, 0});
- pushed_unvisited = true;
- break;
- } else if (input_info->pushed()) {
- // If we had already pushed (and not visited) an input, it means that
- // the current node will be visited before one of its inputs. If this
- // happens, the current node might need to be revisited.
- MarkAsPossibleRevisit(current.node, input);
- }
- }
- if (pushed_unvisited) continue;
-
- // Process the top of the stack.
- Node* node = current.node;
- typing_stack_.pop();
- NodeInfo* info = GetInfo(node);
- info->set_visited();
- bool updated = UpdateFeedbackType(node);
- TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode<RETYPE>(node, info->truncation(), nullptr);
- TRACE(" ==> output ");
- PrintOutputInfo(info);
- TRACE("\n");
- if (updated) {
- auto it = might_need_revisit_.find(node);
- if (it == might_need_revisit_.end()) continue;
-
- for (Node* const user : it->second) {
- if (GetInfo(user)->visited()) {
- TRACE(" QUEUEING #%d: %s\n", user->id(), user->op()->mnemonic());
- GetInfo(user)->set_queued();
- queue_.push(user);
- }
- }
- }
- }
-
- // Process the revisit queue.
- while (!queue_.empty()) {
- Node* node = queue_.front();
- queue_.pop();
- NodeInfo* info = GetInfo(node);
- info->set_visited();
- bool updated = UpdateFeedbackType(node);
- TRACE(" revisit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode<RETYPE>(node, info->truncation(), nullptr);
- TRACE(" ==> output ");
- PrintOutputInfo(info);
- TRACE("\n");
- if (updated) {
- // Here we need to check all uses since we can't easily know which nodes
- // will need to be revisited due to having an input which was a
- // revisited node.
- for (Node* const user : node->uses()) {
- if (GetInfo(user)->visited()) {
- TRACE(" QUEUEING #%d: %s\n", user->id(), user->op()->mnemonic());
- GetInfo(user)->set_queued();
- queue_.push(user);
- }
- }
- }
- }
- }
-
void ResetNodeInfoState() {
// Clean up for the next phase.
for (NodeInfo& info : info_) {
@@ -421,10 +347,6 @@ class RepresentationSelector {
bool UpdateFeedbackType(Node* node) {
if (node->op()->ValueOutputCount() == 0) return false;
- NodeInfo* info = GetInfo(node);
- Type type = info->feedback_type();
- Type new_type = type;
-
// For any non-phi node just wait until we get all inputs typed. We only
// allow untyped inputs for phi nodes because phis are the only places
// where cycles need to be broken.
@@ -436,6 +358,10 @@ class RepresentationSelector {
}
}
+ NodeInfo* info = GetInfo(node);
+ Type type = info->feedback_type();
+ Type new_type = NodeProperties::GetType(node);
+
// We preload these values here to avoid increasing the binary size too
// much, which happens if we inline the calls into the macros below.
Type input0_type;
@@ -604,33 +530,140 @@ class RepresentationSelector {
graph_zone());
}
- // Backward propagation of truncations.
- void RunTruncationPropagationPhase() {
- // Run propagation phase to a fixpoint.
- TRACE("--{Propagation phase}--\n");
- EnqueueInitial(jsgraph_->graph()->end());
- // Process nodes from the queue until it is empty.
- while (!queue_.empty()) {
- Node* node = queue_.front();
+ // Generates a pre-order traversal of the nodes, starting with End.
+ void GenerateTraversal() {
+ ZoneStack<NodeState> stack(zone_);
+
+ stack.push({graph()->end(), 0});
+ GetInfo(graph()->end())->set_pushed();
+ while (!stack.empty()) {
+ NodeState& current = stack.top();
+ Node* node = current.node;
+
+ // If there is an unvisited input, push it and continue with that node.
+ bool pushed_unvisited = false;
+ while (current.input_index < node->InputCount()) {
+ Node* input = node->InputAt(current.input_index);
+ NodeInfo* input_info = GetInfo(input);
+ current.input_index++;
+ if (input_info->unvisited()) {
+ input_info->set_pushed();
+ stack.push({input, 0});
+ pushed_unvisited = true;
+ break;
+ } else if (input_info->pushed()) {
+ // Optimization for the Retype phase.
+ // If we had already pushed (and not visited) an input, it means that
+ // the current node will be visited in the Retype phase before one of
+ // its inputs. If this happens, the current node might need to be
+ // revisited.
+ MarkAsPossibleRevisit(node, input);
+ }
+ }
+
+ if (pushed_unvisited) continue;
+
+ stack.pop();
NodeInfo* info = GetInfo(node);
- queue_.pop();
info->set_visited();
- TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
- info->truncation().description());
- VisitNode<PROPAGATE>(node, info->truncation(), nullptr);
+
+ // Generate the traversal
+ traversal_nodes_.push_back(node);
}
}
- void Run(SimplifiedLowering* lowering) {
- RunTruncationPropagationPhase();
+ void PushNodeToRevisitIfVisited(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ if (info->visited()) {
+ TRACE(" QUEUEING #%d: %s\n", node->id(), node->op()->mnemonic());
+ info->set_queued();
+ revisit_queue_.push(node);
+ }
+ }
- RunTypePropagationPhase();
+ // Tries to update the feedback type of the node, as well as setting its
+ // machine representation (in VisitNode). Returns true iff updating the
+ // feedback type is successful.
+ bool RetypeNode(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ bool updated = UpdateFeedbackType(node);
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+ VisitNode<RETYPE>(node, info->truncation(), nullptr);
+ TRACE(" ==> output %s\n", MachineReprToString(info->representation()));
+ return updated;
+ }
- // Run lowering and change insertion phase.
- TRACE("--{Simplified lowering phase}--\n");
- // Process nodes from the collected {nodes_} vector.
- for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
- Node* node = *i;
+ // Visits the node and marks it as visited. Inside of VisitNode, we might
+ // change the truncation of one of our inputs (see EnqueueInput<PROPAGATE> for
+ // this). If we change the truncation of an already visited node, we will add
+ // it to the revisit queue.
+ void PropagateTruncation(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
+ info->truncation().description());
+ VisitNode<PROPAGATE>(node, info->truncation(), nullptr);
+ }
+
+ // Backward propagation of truncations to a fixpoint.
+ void RunPropagatePhase() {
+ TRACE("--{Propagate phase}--\n");
+ ResetNodeInfoState();
+ DCHECK(revisit_queue_.empty());
+
+ // Process nodes in reverse post order, with End as the root.
+ for (auto it = traversal_nodes_.crbegin(); it != traversal_nodes_.crend();
+ ++it) {
+ PropagateTruncation(*it);
+
+ while (!revisit_queue_.empty()) {
+ Node* node = revisit_queue_.front();
+ revisit_queue_.pop();
+ PropagateTruncation(node);
+ }
+ }
+ }
+
+ // Forward propagation of types from type feedback to a fixpoint.
+ void RunRetypePhase() {
+ TRACE("--{Retype phase}--\n");
+ ResetNodeInfoState();
+ DCHECK(revisit_queue_.empty());
+
+ for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend();
+ ++it) {
+ Node* node = *it;
+ if (!RetypeNode(node)) continue;
+
+ auto revisit_it = might_need_revisit_.find(node);
+ if (revisit_it == might_need_revisit_.end()) continue;
+
+ for (Node* const user : revisit_it->second) {
+ PushNodeToRevisitIfVisited(user);
+ }
+
+ // Process the revisit queue.
+ while (!revisit_queue_.empty()) {
+ Node* revisit_node = revisit_queue_.front();
+ revisit_queue_.pop();
+ if (!RetypeNode(revisit_node)) continue;
+ // Here we need to check all uses since we can't easily know which
+ // nodes will need to be revisited due to having an input which was
+ // a revisited node.
+ for (Node* const user : revisit_node->uses()) {
+ PushNodeToRevisitIfVisited(user);
+ }
+ }
+ }
+ }
+
+ // Lowering and change insertion phase.
+ void RunLowerPhase(SimplifiedLowering* lowering) {
+ TRACE("--{Lower phase}--\n");
+ for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend();
+ ++it) {
+ Node* node = *it;
NodeInfo* info = GetInfo(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
// Reuse {VisitNode()} so the representation rules are in one place.
@@ -656,11 +689,11 @@ class RepresentationSelector {
}
}
- void EnqueueInitial(Node* node) {
- NodeInfo* info = GetInfo(node);
- info->set_queued();
- nodes_.push_back(node);
- queue_.push(node);
+ void Run(SimplifiedLowering* lowering) {
+ GenerateTraversal();
+ RunPropagatePhase();
+ RunRetypePhase();
+ RunLowerPhase(lowering);
}
// Just assert for Retype and Lower. Propagate specialized below.
@@ -793,10 +826,10 @@ class RepresentationSelector {
// it takes the input from the input node {TypeOf(node->InputAt(index))}.
void ConvertInput(Node* node, int index, UseInfo use,
Type input_type = Type::Invalid()) {
- Node* input = node->InputAt(index);
// In the change phase, insert a change before the use if necessary.
if (use.representation() == MachineRepresentation::kNone)
return; // No input requirement on the use.
+ Node* input = node->InputAt(index);
DCHECK_NOT_NULL(input);
NodeInfo* input_info = GetInfo(input);
MachineRepresentation input_rep = input_info->representation();
@@ -805,16 +838,15 @@ class RepresentationSelector {
// Output representation doesn't match usage.
TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
index, input->id(), input->op()->mnemonic());
- TRACE(" from ");
- PrintOutputInfo(input_info);
- TRACE(" to ");
- PrintUseInfo(use);
- TRACE("\n");
+ TRACE("from %s to %s:%s\n",
+ MachineReprToString(input_info->representation()),
+ MachineReprToString(use.representation()),
+ use.truncation().description());
if (input_type.IsInvalid()) {
input_type = TypeOf(input);
}
- Node* n = changer_->GetRepresentationFor(
- input, input_info->representation(), input_type, node, use);
+ Node* n = changer_->GetRepresentationFor(input, input_rep, input_type,
+ node, use);
node->ReplaceInput(index, n);
}
}
@@ -854,18 +886,16 @@ class RepresentationSelector {
template <Phase T>
void VisitReturn(Node* node) {
- int tagged_limit = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
// Visit integer slot count to pop
ProcessInput<T>(node, 0, UseInfo::TruncatingWord32());
// Visit value, context and frame state inputs as tagged.
- for (int i = 1; i < tagged_limit; i++) {
+ for (int i = 1; i < first_effect_index; i++) {
ProcessInput<T>(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (effects, control).
- for (int i = tagged_limit; i < node->InputCount(); i++) {
+ for (int i = first_effect_index; i < node->InputCount(); i++) {
EnqueueInput<T>(node, i);
}
}
@@ -873,13 +903,11 @@ class RepresentationSelector {
// Helper for an unused node.
template <Phase T>
void VisitUnused(Node* node) {
- int value_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
- for (int i = 0; i < value_count; i++) {
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
+ for (int i = 0; i < first_effect_index; i++) {
ProcessInput<T>(node, i, UseInfo::None());
}
- ProcessRemainingInputs<T>(node, value_count);
+ ProcessRemainingInputs<T>(node, first_effect_index);
if (lower<T>()) Kill(node);
}
@@ -1083,19 +1111,27 @@ class RepresentationSelector {
auto call_descriptor = CallDescriptorOf(node->op());
int params = static_cast<int>(call_descriptor->ParameterCount());
int value_input_count = node->op()->ValueInputCount();
- // Propagate representation information from call descriptor.
- for (int i = 0; i < value_input_count; i++) {
- if (i == 0) {
- // The target of the call.
- ProcessInput<T>(node, i, UseInfo::Any());
- } else if ((i - 1) < params) {
- ProcessInput<T>(node, i,
- TruncatingUseInfoFromRepresentation(
- call_descriptor->GetInputType(i).representation()));
- } else {
- ProcessInput<T>(node, i, UseInfo::AnyTagged());
- }
+
+ DCHECK_GT(value_input_count, 0);
+ DCHECK_GE(value_input_count, params);
+
+ // The target of the call.
+ ProcessInput<T>(node, 0, UseInfo::Any());
+
+ // For the parameters (indexes [1, ..., params]), propagate representation
+ // information from call descriptor.
+ for (int i = 1; i <= params; i++) {
+ ProcessInput<T>(node, i,
+ TruncatingUseInfoFromRepresentation(
+ call_descriptor->GetInputType(i).representation()));
}
+
+ // Rest of the value inputs.
+ for (int i = params + 1; i < value_input_count; i++) {
+ ProcessInput<T>(node, i, UseInfo::AnyTagged());
+ }
+
+ // Effect and Control.
ProcessRemainingInputs<T>(node, value_input_count);
if (call_descriptor->ReturnCount() > 0) {
@@ -1457,7 +1493,8 @@ class RepresentationSelector {
if (lower<T>()) {
if (truncation.IsUsedAsWord32() ||
!CanOverflowSigned32(node->op(), left_feedback_type,
- right_feedback_type, graph_zone())) {
+ right_feedback_type, type_cache_,
+ graph_zone())) {
ChangeToPureOp(node, Int32Op(node));
} else {
@@ -1789,9 +1826,8 @@ class RepresentationSelector {
// Note: We must not do this for constants, as they are cached and we
// would thus kill the cached {node} during lowering (i.e. replace all
// uses with Dead), but at that point some node lowering might have
- // already taken the constant {node} from the cache (while it was in
- // a sane state still) and we would afterwards replace that use with
- // Dead as well.
+ // already taken the constant {node} from the cache (while it was not
+ // yet killed) and we would afterwards replace that use with Dead as well.
if (node->op()->ValueInputCount() > 0 &&
node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) {
return VisitUnused<T>(node);
@@ -2059,6 +2095,7 @@ class RepresentationSelector {
// hint with Oddball feedback here.
DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
V8_FALLTHROUGH;
+ case NumberOperationHint::kNumberOrBoolean:
case NumberOperationHint::kNumber:
VisitBinop<T>(node,
CheckedUseInfoAsFloat64FromHint(
@@ -2806,6 +2843,11 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeBigIntAdd: {
+ // TODO(nicohartmann@, chromium:1073440): There should be special
+ // handling for trunction.IsUnused() that correctly propagates deadness,
+ // but preserves type checking which may throw exceptions. Until this
+ // is fully supported, we lower to int64 operations but keep pushing
+ // type constraints.
if (truncation.IsUsedAsWord64()) {
VisitBinop<T>(
node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
@@ -3278,6 +3320,7 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Signed32());
break;
case NumberOperationHint::kNumber:
+ case NumberOperationHint::kNumberOrBoolean:
case NumberOperationHint::kNumberOrOddball:
VisitUnop<T>(
node, CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
@@ -3681,7 +3724,7 @@ class RepresentationSelector {
case IrOpcode::kUnreachable:
case IrOpcode::kRuntimeAbort:
// All JavaScript operators except JSToNumber have uniform handling.
-#define OPCODE_CASE(name) case IrOpcode::k##name:
+#define OPCODE_CASE(name, ...) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
JS_OBJECT_OP_LIST(OPCODE_CASE)
JS_CONTEXT_OP_LIST(OPCODE_CASE)
@@ -3757,31 +3800,6 @@ class RepresentationSelector {
node->NullAllInputs(); // The {node} is now dead.
}
- void PrintOutputInfo(NodeInfo* info) {
- if (FLAG_trace_representation) {
- StdoutStream{} << info->representation();
- }
- }
-
- void PrintRepresentation(MachineRepresentation rep) {
- if (FLAG_trace_representation) {
- StdoutStream{} << rep;
- }
- }
-
- void PrintTruncation(Truncation truncation) {
- if (FLAG_trace_representation) {
- StdoutStream{} << truncation.description() << std::endl;
- }
- }
-
- void PrintUseInfo(UseInfo info) {
- if (FLAG_trace_representation) {
- StdoutStream{} << info.representation() << ":"
- << info.truncation().description();
- }
- }
-
private:
JSGraph* jsgraph_;
Zone* zone_; // Temporary zone.
@@ -3793,16 +3811,15 @@ class RepresentationSelector {
ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
// requirements on inputs.
#endif // DEBUG
- NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
RepresentationChanger* changer_; // for inserting representation changes
- ZoneQueue<Node*> queue_; // queue for traversing the graph
+ ZoneQueue<Node*> revisit_queue_; // Queue for revisiting nodes.
struct NodeState {
Node* node;
int input_index;
};
- ZoneStack<NodeState> typing_stack_; // stack for graph typing.
+ NodeVector traversal_nodes_; // Order in which to traverse the nodes.
// TODO(danno): RepresentationSelector shouldn't know anything about the
// source positions table, but must for now since there currently is no other
// way to pass down source position information to nodes created during
@@ -3825,8 +3842,7 @@ class RepresentationSelector {
// Template specializations
// Enqueue {use_node}'s {index} input if the {use_info} contains new information
-// for that input node. Add the input to {nodes_} if this is the first time it's
-// been visited.
+// for that input node.
template <>
void RepresentationSelector::EnqueueInput<PROPAGATE>(Node* use_node, int index,
UseInfo use_info) {
@@ -3838,28 +3854,21 @@ void RepresentationSelector::EnqueueInput<PROPAGATE>(Node* use_node, int index,
use_info);
#endif // DEBUG
if (info->unvisited()) {
- // First visit of this node.
- info->set_queued();
- nodes_.push_back(node);
- queue_.push(node);
- TRACE(" initial #%i: ", node->id());
info->AddUse(use_info);
- PrintTruncation(info->truncation());
+ TRACE(" initial #%i: %s\n", node->id(), info->truncation().description());
return;
}
- TRACE(" queue #%i?: ", node->id());
- PrintTruncation(info->truncation());
+ TRACE(" queue #%i?: %s\n", node->id(), info->truncation().description());
if (info->AddUse(use_info)) {
// New usage information for the node is available.
if (!info->queued()) {
DCHECK(info->visited());
- queue_.push(node);
+ revisit_queue_.push(node);
info->set_queued();
- TRACE(" added: ");
+ TRACE(" added: %s\n", info->truncation().description());
} else {
- TRACE(" inqueue: ");
+ TRACE(" inqueue: %s\n", info->truncation().description());
}
- PrintTruncation(info->truncation());
}
}
@@ -3918,15 +3927,12 @@ void RepresentationSelector::ProcessInput<LOWER>(Node* node, int index,
template <>
void RepresentationSelector::ProcessRemainingInputs<PROPAGATE>(Node* node,
int index) {
- DCHECK_GE(index, NodeProperties::PastValueIndex(node));
DCHECK_GE(index, NodeProperties::PastContextIndex(node));
+
+ // Enqueue other inputs (effects, control).
for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
- i < NodeProperties::PastEffectIndex(node); ++i) {
- EnqueueInput<PROPAGATE>(node, i); // Effect inputs: just visit
- }
- for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
- i < NodeProperties::PastControlIndex(node); ++i) {
- EnqueueInput<PROPAGATE>(node, i); // Control inputs: just visit
+ i < node->InputCount(); ++i) {
+ EnqueueInput<PROPAGATE>(node, i);
}
}
@@ -3936,26 +3942,22 @@ void RepresentationSelector::ProcessRemainingInputs<PROPAGATE>(Node* node,
// values {kTypeAny}.
template <>
void RepresentationSelector::VisitInputs<PROPAGATE>(Node* node) {
- int tagged_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
// Visit value, context and frame state inputs as tagged.
- for (int i = 0; i < tagged_count; i++) {
+ for (int i = 0; i < first_effect_index; i++) {
ProcessInput<PROPAGATE>(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (effects, control).
- for (int i = tagged_count; i < node->InputCount(); i++) {
+ for (int i = first_effect_index; i < node->InputCount(); i++) {
EnqueueInput<PROPAGATE>(node, i);
}
}
template <>
void RepresentationSelector::VisitInputs<LOWER>(Node* node) {
- int tagged_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
// Visit value, context and frame state inputs as tagged.
- for (int i = 0; i < tagged_count; i++) {
+ for (int i = 0; i < first_effect_index; i++) {
ProcessInput<LOWER>(node, i, UseInfo::AnyTagged());
}
}
diff --git a/chromium/v8/src/compiler/simplified-operator.cc b/chromium/v8/src/compiler/simplified-operator.cc
index 1be2bed0013..ff5d69cd669 100644
--- a/chromium/v8/src/compiler/simplified-operator.cc
+++ b/chromium/v8/src/compiler/simplified-operator.cc
@@ -49,7 +49,7 @@ bool operator==(ConstFieldInfo const& lhs, ConstFieldInfo const& rhs) {
}
size_t hash_value(ConstFieldInfo const& const_field_info) {
- return (size_t)const_field_info.owner_map.address();
+ return static_cast<size_t>(const_field_info.owner_map.address());
}
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
@@ -304,6 +304,8 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
switch (mode) {
case CheckTaggedInputMode::kNumber:
return os << "Number";
+ case CheckTaggedInputMode::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case CheckTaggedInputMode::kNumberOrOddball:
return os << "NumberOrOddball";
}
@@ -532,6 +534,8 @@ std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
return os << "Signed32";
case NumberOperationHint::kNumber:
return os << "Number";
+ case NumberOperationHint::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case NumberOperationHint::kNumberOrOddball:
return os << "NumberOrOddball";
}
@@ -1045,6 +1049,8 @@ struct SimplifiedOperatorGlobalCache final {
};
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
kCheckedTaggedToFloat64NumberOperator;
+ CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrBoolean>
+ kCheckedTaggedToFloat64NumberOrBooleanOperator;
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrOddball>
kCheckedTaggedToFloat64NumberOrOddballOperator;
@@ -1157,6 +1163,8 @@ struct SimplifiedOperatorGlobalCache final {
k##Name##NumberOrOddballOperator;
SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
#undef SPECULATIVE_NUMBER_BINOP
+ SpeculativeNumberEqualOperator<NumberOperationHint::kNumberOrBoolean>
+ kSpeculativeNumberEqualNumberOrBooleanOperator;
template <NumberOperationHint kHint>
struct SpeculativeToNumberOperator final
@@ -1402,6 +1410,8 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
switch (mode) {
case CheckTaggedInputMode::kNumber:
return &cache_.kCheckedTaggedToFloat64NumberOperator;
+ case CheckTaggedInputMode::kNumberOrBoolean:
+ return &cache_.kCheckedTaggedToFloat64NumberOrBooleanOperator;
case CheckTaggedInputMode::kNumberOrOddball:
return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
}
@@ -1418,6 +1428,9 @@ const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
switch (mode) {
case CheckTaggedInputMode::kNumber:
return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrBoolean:
+ // Not used currently.
+ UNREACHABLE();
case CheckTaggedInputMode::kNumberOrOddball:
return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
}
@@ -1541,6 +1554,9 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
return &cache_.kSpeculativeToNumberSigned32Operator;
case NumberOperationHint::kNumber:
return &cache_.kSpeculativeToNumberNumberOperator;
+ case NumberOperationHint::kNumberOrBoolean:
+ // Not used currently.
+ UNREACHABLE();
case NumberOperationHint::kNumberOrOddball:
return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
}
@@ -1778,14 +1794,38 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
return &cache_.k##Name##Signed32Operator; \
case NumberOperationHint::kNumber: \
return &cache_.k##Name##NumberOperator; \
+ case NumberOperationHint::kNumberOrBoolean: \
+ /* Not used currenly. */ \
+ UNREACHABLE(); \
case NumberOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
} \
UNREACHABLE(); \
return nullptr; \
}
-SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+SPECULATIVE_NUMBER_BINOP(SpeculativeNumberLessThan)
+SPECULATIVE_NUMBER_BINOP(SpeculativeNumberLessThanOrEqual)
#undef SPECULATIVE_NUMBER_BINOP
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
+ NumberOperationHint hint) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return &cache_.kSpeculativeNumberEqualSignedSmallOperator;
+ case NumberOperationHint::kSignedSmallInputs:
+ return &cache_.kSpeculativeNumberEqualSignedSmallInputsOperator;
+ case NumberOperationHint::kSigned32:
+ return &cache_.kSpeculativeNumberEqualSigned32Operator;
+ case NumberOperationHint::kNumber:
+ return &cache_.kSpeculativeNumberEqualNumberOperator;
+ case NumberOperationHint::kNumberOrBoolean:
+ return &cache_.kSpeculativeNumberEqualNumberOrBooleanOperator;
+ case NumberOperationHint::kNumberOrOddball:
+ return &cache_.kSpeculativeNumberEqualNumberOrOddballOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
diff --git a/chromium/v8/src/compiler/simplified-operator.h b/chromium/v8/src/compiler/simplified-operator.h
index df2516646b2..649aea0d0b9 100644
--- a/chromium/v8/src/compiler/simplified-operator.h
+++ b/chromium/v8/src/compiler/simplified-operator.h
@@ -312,6 +312,7 @@ Handle<FeedbackCell> FeedbackCellOf(const Operator* op);
enum class CheckTaggedInputMode : uint8_t {
kNumber,
+ kNumberOrBoolean,
kNumberOrOddball,
};
@@ -507,6 +508,7 @@ enum class NumberOperationHint : uint8_t {
kSignedSmallInputs, // Inputs were Smi, output was Number.
kSigned32, // Inputs were Signed32, output was Number.
kNumber, // Inputs were Number, output was Number.
+ kNumberOrBoolean, // Inputs were Number or Boolean, output was Number.
kNumberOrOddball, // Inputs were Number or Oddball, output was Number.
};
diff --git a/chromium/v8/src/compiler/typed-optimization.cc b/chromium/v8/src/compiler/typed-optimization.cc
index c8c422f66bf..abc88c4b8ed 100644
--- a/chromium/v8/src/compiler/typed-optimization.cc
+++ b/chromium/v8/src/compiler/typed-optimization.cc
@@ -181,7 +181,7 @@ Reduction TypedOptimization::ReduceMaybeGrowFastElements(Node* node) {
simplified()->CheckBounds(FeedbackSource{},
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, effect, control);
- ReplaceWithValue(node, elements);
+ ReplaceWithValue(node, elements, check_bounds);
return Replace(check_bounds);
}
diff --git a/chromium/v8/src/compiler/typer.cc b/chromium/v8/src/compiler/typer.cc
index 6d53531f1cb..94e2c9a1e06 100644
--- a/chromium/v8/src/compiler/typer.cc
+++ b/chromium/v8/src/compiler/typer.cc
@@ -61,8 +61,7 @@ class Typer::Visitor : public Reducer {
explicit Visitor(Typer* typer, LoopVariableOptimizer* induction_vars)
: typer_(typer),
induction_vars_(induction_vars),
- weakened_nodes_(typer->zone()),
- remembered_types_(typer->zone()) {}
+ weakened_nodes_(typer->zone()) {}
const char* reducer_name() const override { return "Typer"; }
@@ -73,8 +72,8 @@ class Typer::Visitor : public Reducer {
Type TypeNode(Node* node) {
switch (node->opcode()) {
-#define DECLARE_UNARY_CASE(x) \
- case IrOpcode::k##x: \
+#define DECLARE_UNARY_CASE(x, ...) \
+ case IrOpcode::k##x: \
return Type##x(Operand(node, 0));
JS_SIMPLE_UNOP_LIST(DECLARE_UNARY_CASE)
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
@@ -82,8 +81,8 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_UNARY_CASE)
#undef DECLARE_UNARY_CASE
-#define DECLARE_BINARY_CASE(x) \
- case IrOpcode::k##x: \
+#define DECLARE_BINARY_CASE(x, ...) \
+ case IrOpcode::k##x: \
return Type##x(Operand(node, 0), Operand(node, 1));
JS_SIMPLE_BINOP_LIST(DECLARE_BINARY_CASE)
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
@@ -91,8 +90,8 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_BINARY_CASE)
#undef DECLARE_BINARY_CASE
-#define DECLARE_OTHER_CASE(x) \
- case IrOpcode::k##x: \
+#define DECLARE_OTHER_CASE(x, ...) \
+ case IrOpcode::k##x: \
return Type##x(node);
DECLARE_OTHER_CASE(Start)
DECLARE_OTHER_CASE(IfException)
@@ -103,7 +102,7 @@ class Typer::Visitor : public Reducer {
JS_CONTEXT_OP_LIST(DECLARE_OTHER_CASE)
JS_OTHER_OP_LIST(DECLARE_OTHER_CASE)
#undef DECLARE_OTHER_CASE
-#define DECLARE_IMPOSSIBLE_CASE(x) case IrOpcode::k##x:
+#define DECLARE_IMPOSSIBLE_CASE(x, ...) case IrOpcode::k##x:
DECLARE_IMPOSSIBLE_CASE(Loop)
DECLARE_IMPOSSIBLE_CASE(Branch)
DECLARE_IMPOSSIBLE_CASE(IfTrue)
@@ -141,10 +140,8 @@ class Typer::Visitor : public Reducer {
Typer* typer_;
LoopVariableOptimizer* induction_vars_;
ZoneSet<NodeId> weakened_nodes_;
- // TODO(tebbi): remove once chromium:906567 is resolved.
- ZoneUnorderedMap<std::pair<Node*, int>, Type> remembered_types_;
-#define DECLARE_METHOD(x) inline Type Type##x(Node* node);
+#define DECLARE_METHOD(x, ...) inline Type Type##x(Node* node);
DECLARE_METHOD(Start)
DECLARE_METHOD(IfException)
COMMON_OP_LIST(DECLARE_METHOD)
@@ -154,7 +151,7 @@ class Typer::Visitor : public Reducer {
JS_CONTEXT_OP_LIST(DECLARE_METHOD)
JS_OTHER_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(x) inline Type Type##x(Type input);
+#define DECLARE_METHOD(x, ...) inline Type Type##x(Type input);
JS_SIMPLE_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
@@ -232,13 +229,13 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(Name) \
+#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type left, Type right) { \
return TypeBinaryOp(left, right, Name##Typer); \
}
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(Name) \
+#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type left, Type right) { \
return TypeBinaryOp(left, right, Name); \
}
@@ -247,7 +244,7 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(Name) \
+#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type input) { return TypeUnaryOp(input, Name); }
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
@@ -274,7 +271,7 @@ class Typer::Visitor : public Reducer {
static ComparisonOutcome JSCompareTyper(Type, Type, Typer*);
static ComparisonOutcome NumberCompareTyper(Type, Type, Typer*);
-#define DECLARE_METHOD(x) static Type x##Typer(Type, Type, Typer*);
+#define DECLARE_METHOD(x, ...) static Type x##Typer(Type, Type, Typer*);
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
@@ -303,49 +300,9 @@ class Typer::Visitor : public Reducer {
AllowHandleDereference allow;
std::ostringstream ostream;
node->Print(ostream);
-
- if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
- ostream << "Previous UpdateType run (inputs first):";
- for (int i = 0; i < 3; ++i) {
- ostream << " ";
- if (remembered_types_[{node, i}].IsInvalid()) {
- ostream << "untyped";
- } else {
- remembered_types_[{node, i}].PrintTo(ostream);
- }
- }
-
- ostream << "\nCurrent (output) type: ";
- previous.PrintTo(ostream);
-
- ostream << "\nThis UpdateType run (inputs first):";
- for (int i = 0; i < 2; ++i) {
- ostream << " ";
- Node* input = NodeProperties::GetValueInput(node, i);
- if (NodeProperties::IsTyped(input)) {
- NodeProperties::GetType(input).PrintTo(ostream);
- } else {
- ostream << "untyped";
- }
- }
- ostream << " ";
- current.PrintTo(ostream);
- ostream << "\n";
- }
-
FATAL("UpdateType error for node %s", ostream.str().c_str());
}
- if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
- for (int i = 0; i < 2; ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- remembered_types_[{node, i}] = NodeProperties::IsTyped(input)
- ? NodeProperties::GetType(input)
- : Type::Invalid();
- }
- remembered_types_[{node, 2}] = current;
- }
-
NodeProperties::SetType(node, current);
if (!current.Is(previous)) {
// If something changed, revisit all uses.
@@ -353,16 +310,6 @@ class Typer::Visitor : public Reducer {
}
return NoChange();
} else {
- if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
- for (int i = 0; i < 2; ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- remembered_types_[{node, i}] = NodeProperties::IsTyped(input)
- ? NodeProperties::GetType(input)
- : Type::Invalid();
- }
- remembered_types_[{node, 2}] = current;
- }
-
// No previous type, simply update the type.
NodeProperties::SetType(node, current);
return Changed(node);
diff --git a/chromium/v8/src/compiler/types.cc b/chromium/v8/src/compiler/types.cc
index 47280becbd9..c32ae4cd923 100644
--- a/chromium/v8/src/compiler/types.cc
+++ b/chromium/v8/src/compiler/types.cc
@@ -224,7 +224,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_AGGREGATE_ERROR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_REG_EXP_TYPE:
@@ -358,7 +357,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ENUM_CACHE_TYPE:
case WASM_CAPI_FUNCTION_DATA_TYPE:
case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
- case WASM_DEBUG_INFO_TYPE:
case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
case WASM_JS_FUNCTION_DATA_TYPE:
@@ -372,7 +370,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
#define MAKE_TORQUE_CLASS_TYPE(INSTANCE_TYPE, Name, name) case INSTANCE_TYPE:
- TORQUE_INTERNAL_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
+ TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
diff --git a/chromium/v8/src/compiler/wasm-compiler.cc b/chromium/v8/src/compiler/wasm-compiler.cc
index ac7a681336a..12ce3d32558 100644
--- a/chromium/v8/src/compiler/wasm-compiler.cc
+++ b/chromium/v8/src/compiler/wasm-compiler.cc
@@ -55,7 +55,7 @@
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -2116,16 +2116,14 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
+ case wasm::ValueType::kRtt: // TODO(7748): Implement.
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -2214,7 +2212,7 @@ Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj,
wasm::WasmCodePosition position) {
- TrapIfTrue(wasm::kTrapBrOnExnNullRef, gasm_->WordEqual(RefNull(), except_obj),
+ TrapIfTrue(wasm::kTrapBrOnExnNull, gasm_->WordEqual(RefNull(), except_obj),
position);
return CALL_BUILTIN(
WasmGetOwnProperty, except_obj,
@@ -2269,16 +2267,14 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
mcgraph()->machine()->I32x4ReplaceLane(3), value,
BuildDecodeException32BitValue(values_array, &index));
break;
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
+ case wasm::ValueType::kRtt: // TODO(7748): Implement.
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -3257,7 +3253,7 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
}
}
-void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableAnyRefGlobal(
+void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset) {
// Load the base from the ImportedMutableGlobalsBuffer of the instance.
Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
@@ -3358,11 +3354,11 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
- GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
return gasm_->Load(MachineType::AnyTagged(), base, offset);
}
Node* globals_buffer =
@@ -3387,11 +3383,11 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
- GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
return STORE_RAW_NODE_OFFSET(
base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
@@ -3497,7 +3493,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}
- if (!base::IsInBounds(offset, access_size, env_->max_memory_size)) {
+ if (!base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0);
@@ -3613,20 +3609,43 @@ const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
return mcgraph()->machine()->UnalignedStore(store_rep);
}
+Node* WasmGraphBuilder::TraceFunctionEntry(wasm::WasmCodePosition position) {
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceEnter, nullptr, 0);
+ SetSourcePosition(call, position);
+ return call;
+}
+
+Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
+ wasm::WasmCodePosition position) {
+ Node* info = gasm_->IntPtrConstant(0);
+ size_t num_returns = vals.size();
+ if (num_returns == 1) {
+ wasm::ValueType return_type = sig_->GetReturn(0);
+ MachineRepresentation rep = return_type.machine_representation();
+ int size = ElementSizeInBytes(rep);
+ info = gasm_->StackSlot(size, size);
+
+ gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
+ gasm_->Int32Constant(0), vals[0]);
+ }
+
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceExit, &info, 1);
+ SetSourcePosition(call, position);
+ return call;
+}
+
Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
- Node* info = graph()->NewNode(
- mcgraph()->machine()->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign));
+ TNode<RawPtrT> info =
+ gasm_->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign);
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Int32Constant(offset), index);
+ Node* address = gasm_->Int32Add(Int32Constant(offset), index);
auto store = [&](int offset, MachineRepresentation rep, Node* data) {
- SetEffect(graph()->NewNode(
- mcgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
- info, mcgraph()->Int32Constant(offset), data, effect(), control()));
+ gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
+ gasm_->Int32Constant(offset), data);
};
// Store address, is_store, and mem_rep.
store(offsetof(wasm::MemoryTracingInfo, address),
@@ -3638,7 +3657,9 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation::kWord8,
mcgraph()->Int32Constant(static_cast<int>(rep)));
- Node* call = BuildCallToRuntime(Runtime::kWasmTraceMemory, &info, 1);
+ Node* args[] = {info};
+ Node* call =
+ BuildCallToRuntime(Runtime::kWasmTraceMemory, args, arraysize(args));
SetSourcePosition(call, position);
return call;
}
@@ -3699,31 +3720,66 @@ LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
// TODO(miladfar): Remove SIM once V8_TARGET_BIG_ENDIAN includes the Sim.
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
Node* WasmGraphBuilder::LoadTransformBigEndian(
- MachineType memtype, wasm::LoadTransformationKind transform, Node* value) {
+ wasm::ValueType type, MachineType memtype,
+ wasm::LoadTransformationKind transform, Node* index, uint32_t offset,
+ uint32_t alignment, wasm::WasmCodePosition position) {
+#define LOAD_EXTEND(num_lanes, bytes_per_load, replace_lane) \
+ result = graph()->NewNode(mcgraph()->machine()->S128Zero()); \
+ Node* values[num_lanes]; \
+ for (int i = 0; i < num_lanes; i++) { \
+ values[i] = LoadMem(type, memtype, index, offset + i * bytes_per_load, \
+ alignment, position); \
+ if (memtype.IsSigned()) { \
+ /* sign extend */ \
+ values[i] = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), \
+ values[i]); \
+ } else { \
+ /* zero extend */ \
+ values[i] = graph()->NewNode( \
+ mcgraph()->machine()->ChangeUint32ToUint64(), values[i]); \
+ } \
+ } \
+ for (int lane = 0; lane < num_lanes; lane++) { \
+ result = graph()->NewNode(mcgraph()->machine()->replace_lane(lane), \
+ result, values[lane]); \
+ }
Node* result;
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
switch (transformation) {
case LoadTransformation::kS8x16LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), result);
break;
}
case LoadTransformation::kI16x8Load8x8S:
- case LoadTransformation::kI16x8Load8x8U:
+ case LoadTransformation::kI16x8Load8x8U: {
+ LOAD_EXTEND(8, 1, I16x8ReplaceLane)
+ break;
+ }
case LoadTransformation::kS16x8LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), result);
break;
}
case LoadTransformation::kI32x4Load16x4S:
- case LoadTransformation::kI32x4Load16x4U:
+ case LoadTransformation::kI32x4Load16x4U: {
+ LOAD_EXTEND(4, 2, I32x4ReplaceLane)
+ break;
+ }
case LoadTransformation::kS32x4LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), result);
break;
}
case LoadTransformation::kI64x2Load32x2S:
- case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kI64x2Load32x2U: {
+ LOAD_EXTEND(2, 4, I64x2ReplaceLane)
+ break;
+ }
case LoadTransformation::kS64x2LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), result);
break;
}
default:
@@ -3731,6 +3787,7 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
}
return result;
+#undef LOAD_EXTEND
}
#endif
@@ -3749,8 +3806,8 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
// LoadTransform cannot efficiently be executed on BE machines as a
// single operation since loaded bytes need to be reversed first,
// therefore we divide them into separate "load" and "operation" nodes.
- load = LoadMem(type, memtype, index, offset, alignment, position);
- load = LoadTransformBigEndian(memtype, transform, load);
+ load = LoadTransformBigEndian(type, memtype, transform, index, offset,
+ alignment, position);
USE(GetLoadKind);
#else
// Wasm semantics throw on OOB. Introduce explicit bounds check and
@@ -3983,6 +4040,24 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
return val;
}
+Node* WasmGraphBuilder::BuildF32x4Ceil(Node* input) {
+ MachineType type = MachineType::Simd128();
+ ExternalReference ref = ExternalReference::wasm_f32x4_ceil();
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF32x4Floor(Node* input) {
+ MachineType type = MachineType::Simd128();
+ ExternalReference ref = ExternalReference::wasm_f32x4_floor();
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF32x4Trunc(Node* input) {
+ MachineType type = MachineType::Simd128();
+ ExternalReference ref = ExternalReference::wasm_f32x4_trunc();
+ return BuildCFuncInstruction(ref, type, input);
+}
+
void WasmGraphBuilder::PrintDebugName(Node* node) {
PrintF("#%d:%s", node->id(), node->op()->mnemonic());
}
@@ -4143,6 +4218,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF64x2Pmax:
return graph()->NewNode(mcgraph()->machine()->F64x2Pmax(), inputs[0],
inputs[1]);
+ case wasm::kExprF64x2Ceil:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Ceil(), inputs[0]);
+ case wasm::kExprF64x2Floor:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Floor(), inputs[0]);
+ case wasm::kExprF64x2Trunc:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Trunc(), inputs[0]);
+ case wasm::kExprF64x2NearestInt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2NearestInt(),
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4214,6 +4298,25 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Pmax:
return graph()->NewNode(mcgraph()->machine()->F32x4Pmax(), inputs[0],
inputs[1]);
+ case wasm::kExprF32x4Ceil:
+ // Architecture support for F32x4Ceil and Float32RoundUp is the same.
+ if (!mcgraph()->machine()->Float32RoundUp().IsSupported())
+ return BuildF32x4Ceil(inputs[0]);
+ return graph()->NewNode(mcgraph()->machine()->F32x4Ceil(), inputs[0]);
+ case wasm::kExprF32x4Floor:
+ // Architecture support for F32x4Floor and Float32RoundDown is the same.
+ if (!mcgraph()->machine()->Float32RoundDown().IsSupported())
+ return BuildF32x4Floor(inputs[0]);
+ return graph()->NewNode(mcgraph()->machine()->F32x4Floor(), inputs[0]);
+ case wasm::kExprF32x4Trunc:
+ // Architecture support for F32x4Trunc and Float32RoundTruncate is the
+ // same.
+ if (!mcgraph()->machine()->Float32RoundTruncate().IsSupported())
+ return BuildF32x4Trunc(inputs[0]);
+ return graph()->NewNode(mcgraph()->machine()->F32x4Trunc(), inputs[0]);
+ case wasm::kExprF32x4NearestInt:
+ return graph()->NewNode(mcgraph()->machine()->F32x4NearestInt(),
+ inputs[0]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
@@ -4367,6 +4470,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->I32x4Abs(), inputs[0]);
case wasm::kExprI32x4BitMask:
return graph()->NewNode(mcgraph()->machine()->I32x4BitMask(), inputs[0]);
+ case wasm::kExprI32x4DotI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4DotI16x8S(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -4577,22 +4683,22 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprS1x2AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]);
- case wasm::kExprS1x2AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]);
- case wasm::kExprS1x4AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]);
- case wasm::kExprS1x4AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x4AllTrue(), inputs[0]);
- case wasm::kExprS1x8AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x8AnyTrue(), inputs[0]);
- case wasm::kExprS1x8AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x8AllTrue(), inputs[0]);
- case wasm::kExprS1x16AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x16AnyTrue(), inputs[0]);
- case wasm::kExprS1x16AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x16AllTrue(), inputs[0]);
+ case wasm::kExprV64x2AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V64x2AnyTrue(), inputs[0]);
+ case wasm::kExprV64x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
+ case wasm::kExprV32x4AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V32x4AnyTrue(), inputs[0]);
+ case wasm::kExprV32x4AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V32x4AllTrue(), inputs[0]);
+ case wasm::kExprV16x8AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V16x8AnyTrue(), inputs[0]);
+ case wasm::kExprV16x8AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V16x8AllTrue(), inputs[0]);
+ case wasm::kExprV8x16AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V8x16AnyTrue(), inputs[0]);
+ case wasm::kExprV8x16AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V8x16AllTrue(), inputs[0]);
case wasm::kExprS8x16Swizzle:
return graph()->NewNode(mcgraph()->machine()->S8x16Swizzle(), inputs[0],
inputs[1]);
@@ -5042,9 +5148,10 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
namespace {
-MachineType FieldType(const wasm::StructType* type, uint32_t field_index) {
+MachineType FieldType(const wasm::StructType* type, uint32_t field_index,
+ bool is_signed) {
return MachineType::TypeForRepresentation(
- type->field(field_index).machine_representation());
+ type->field(field_index).machine_representation(), is_signed);
}
Node* FieldOffset(MachineGraph* graph, const wasm::StructType* type,
@@ -5054,19 +5161,43 @@ Node* FieldOffset(MachineGraph* graph, const wasm::StructType* type,
return graph->IntPtrConstant(offset);
}
+// It's guaranteed that struct/array fields are aligned to min(field_size,
+// kTaggedSize), with the latter being 4 or 8 depending on platform and
+// pointer compression. So on our most common configurations, 8-byte types
+// must use unaligned loads/stores.
+Node* LoadWithTaggedAlignment(WasmGraphAssembler* gasm, MachineType type,
+ Node* base, Node* offset) {
+ if (ElementSizeInBytes(type.representation()) > kTaggedSize) {
+ return gasm->LoadUnaligned(type, base, offset);
+ } else {
+ return gasm->Load(type, base, offset);
+ }
+}
+
+// Same alignment considerations as above.
+Node* StoreWithTaggedAlignment(WasmGraphAssembler* gasm, Node* base,
+ Node* offset, Node* value,
+ wasm::ValueType type) {
+ MachineRepresentation rep = type.machine_representation();
+ if (ElementSizeInBytes(rep) > kTaggedSize) {
+ return gasm->StoreUnaligned(rep, base, offset, value);
+ } else {
+ WriteBarrierKind write_barrier =
+ type.is_reference_type() ? kPointerWriteBarrier : kNoWriteBarrier;
+ StoreRepresentation store_rep(rep, write_barrier);
+ return gasm->Store(store_rep, base, offset, value);
+ }
+}
+
// Set a field of a struct, without checking if the struct is null.
// Helper method for StructNew and StructSet.
Node* StoreStructFieldUnchecked(MachineGraph* graph, WasmGraphAssembler* gasm,
Node* struct_object,
const wasm::StructType* type,
uint32_t field_index, Node* value) {
- WriteBarrierKind write_barrier = type->field(field_index).IsReferenceType()
- ? kPointerWriteBarrier
- : kNoWriteBarrier;
- StoreRepresentation rep(type->field(field_index).machine_representation(),
- write_barrier);
- Node* offset = FieldOffset(graph, type, field_index);
- return gasm->Store(rep, struct_object, offset, value);
+ return StoreWithTaggedAlignment(gasm, struct_object,
+ FieldOffset(graph, type, field_index), value,
+ type->field(field_index));
}
Node* ArrayElementOffset(GraphAssembler* gasm, Node* index,
@@ -5130,10 +5261,6 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
graph()->NewNode(mcgraph()->common()->NumberConstant(
element_type.element_size_bytes())),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
- WriteBarrierKind write_barrier =
- element_type.IsReferenceType() ? kPointerWriteBarrier : kNoWriteBarrier;
- StoreRepresentation rep(element_type.machine_representation(), write_barrier);
-
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
Node* start_offset =
@@ -5153,7 +5280,8 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
Node* offset = loop.PhiAt(0);
Node* check = gasm_->Uint32LessThan(offset, end_offset);
gasm_->GotoIfNot(check, &done);
- gasm_->Store(rep, a, offset, initial_value);
+ StoreWithTaggedAlignment(gasm_.get(), a, offset, initial_value,
+ type->element_type());
offset = gasm_->Int32Add(offset, element_size);
gasm_->Goto(&loop, offset);
}
@@ -5161,17 +5289,35 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
return a;
}
+Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
+ // This logic is duplicated from module-instantiate.cc.
+ // TODO(jkummerow): Find a nicer solution.
+ int map_index = 0;
+ const std::vector<uint8_t>& type_kinds = env_->module->type_kinds;
+ for (uint32_t i = 0; i < type_index; i++) {
+ if (type_kinds[i] == wasm::kWasmStructTypeCode ||
+ type_kinds[i] == wasm::kWasmArrayTypeCode) {
+ map_index++;
+ }
+ }
+ Node* maps_list =
+ LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
+ return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type_index);
+}
+
Node* WasmGraphBuilder::StructGet(Node* struct_object,
const wasm::StructType* struct_type,
uint32_t field_index, CheckForNull null_check,
+ bool is_signed,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(struct_object, RefNull()), position);
}
- MachineType machine_type = FieldType(struct_type, field_index);
+ MachineType machine_type = FieldType(struct_type, field_index, is_signed);
Node* offset = FieldOffset(mcgraph(), struct_type, field_index);
- return gasm_->Load(machine_type, struct_object, offset);
+ return LoadWithTaggedAlignment(gasm_.get(), machine_type, struct_object,
+ offset);
}
Node* WasmGraphBuilder::StructSet(Node* struct_object,
@@ -5196,14 +5342,16 @@ void WasmGraphBuilder::BoundsCheck(Node* array, Node* index,
Node* WasmGraphBuilder::ArrayGet(Node* array_object,
const wasm::ArrayType* type, Node* index,
+ bool is_signed,
wasm::WasmCodePosition position) {
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(array_object, RefNull()), position);
BoundsCheck(array_object, index, position);
MachineType machine_type = MachineType::TypeForRepresentation(
- type->element_type().machine_representation());
+ type->element_type().machine_representation(), is_signed);
Node* offset = ArrayElementOffset(gasm_.get(), index, type->element_type());
- return gasm_->Load(machine_type, array_object, offset);
+ return LoadWithTaggedAlignment(gasm_.get(), machine_type, array_object,
+ offset);
}
Node* WasmGraphBuilder::ArraySet(Node* array_object,
@@ -5212,13 +5360,9 @@ Node* WasmGraphBuilder::ArraySet(Node* array_object,
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(array_object, RefNull()), position);
BoundsCheck(array_object, index, position);
- WriteBarrierKind write_barrier = type->element_type().IsReferenceType()
- ? kPointerWriteBarrier
- : kNoWriteBarrier;
- StoreRepresentation rep(type->element_type().machine_representation(),
- write_barrier);
Node* offset = ArrayElementOffset(gasm_.get(), index, type->element_type());
- return gasm_->Store(rep, array_object, offset, value);
+ return StoreWithTaggedAlignment(gasm_.get(), array_object, offset, value,
+ type->element_type());
}
Node* WasmGraphBuilder::ArrayLen(Node* array_object,
@@ -5460,17 +5604,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildChangeFloat32ToNumber(node);
case wasm::ValueType::kF64:
return BuildChangeFloat64ToNumber(node);
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
- return node;
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
- // TODO(7748): Implement properly. For now, we just expose the raw
- // object for testing.
+ case wasm::ValueType::kRtt:
+ // TODO(7748): Implement properly for arrays and structs, figure
+ // out what to do for RTTs.
+ // For now, we just expose the raw object for testing.
return node;
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ UNIMPLEMENTED();
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -5521,49 +5664,35 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
switch (type.kind()) {
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kExnRef:
- return input;
-
- case wasm::ValueType::kNullRef: {
- Node* check = graph()->NewNode(mcgraph()->machine()->WordEqual(), input,
- RefNull());
-
- Diamond null_check(graph(), mcgraph()->common(), check,
- BranchHint::kTrue);
- null_check.Chain(control());
- SetControl(null_check.if_false);
-
- Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
- nullptr, 0);
-
- SetEffectControl(null_check.EffectPhi(old_effect, effect()),
- null_check.merge);
-
- return input;
- }
-
- case wasm::ValueType::kFuncRef: {
- Node* check =
- BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidFuncRefValue, js_context, &input, 1)));
-
- Diamond type_check(graph(), mcgraph()->common(), check,
- BranchHint::kTrue);
- type_check.Chain(control());
- SetControl(type_check.if_false);
-
- Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
- nullptr, 0);
-
- SetEffectControl(type_check.EffectPhi(old_effect, effect()),
- type_check.merge);
-
- return input;
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef: {
+ switch (type.heap_type()) {
+ case wasm::kHeapExtern:
+ case wasm::kHeapExn:
+ return input;
+ case wasm::kHeapFunc: {
+ Node* check =
+ BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
+ Runtime::kWasmIsValidFuncRefValue, js_context, &input, 1)));
+
+ Diamond type_check(graph(), mcgraph()->common(), check,
+ BranchHint::kTrue);
+ type_check.Chain(control());
+ SetControl(type_check.if_false);
+
+ Node* old_effect = effect();
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+ js_context, nullptr, 0);
+
+ SetEffectControl(type_check.EffectPhi(old_effect, effect()),
+ type_check.merge);
+
+ return input;
+ }
+ default:
+ UNREACHABLE();
+ }
}
-
case wasm::ValueType::kF32:
return graph()->NewNode(
mcgraph()->machine()->TruncateFloat64ToFloat32(),
@@ -5580,7 +5709,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
DCHECK(enabled_features_.has_bigint());
return BuildChangeBigIntToInt64(input, js_context);
- default:
+ case wasm::ValueType::kRtt: // TODO(7748): Implement.
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
UNREACHABLE();
break;
}
@@ -6471,8 +6605,8 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
const wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileWasmMathIntrinsic");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmMathIntrinsic");
Zone zone(wasm_engine->allocator(), ZONE_NAME);
@@ -6545,8 +6679,8 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
return CompileWasmMathIntrinsic(wasm_engine, kind, sig);
}
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileWasmImportCallWrapper");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmImportCallWrapper");
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -6588,7 +6722,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
wasm::NativeModule* native_module,
const wasm::FunctionSig* sig,
Address address) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileWasmCapiFunction");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmCapiFunction");
Zone zone(wasm_engine->allocator(), ZONE_NAME);
@@ -6683,8 +6818,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
return code;
}
-MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate,
- const wasm::FunctionSig* sig) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
@@ -6729,14 +6863,11 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate,
Code::C_WASM_ENTRY, std::move(name_buffer),
AssemblerOptions::Default(isolate)));
- if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
- CompilationJob::FAILED ||
- job->FinalizeJob(isolate) == CompilationJob::FAILED) {
- return {};
- }
- Handle<Code> code = job->compilation_info()->code();
+ CHECK_NE(job->ExecuteJob(isolate->counters()->runtime_call_stats()),
+ CompilationJob::FAILED);
+ CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
- return code;
+ return job->compilation_info()->code();
}
namespace {
@@ -6799,9 +6930,9 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
const wasm::FunctionBody& func_body, int func_index, Counters* counters,
wasm::WasmFeatures* detected) {
- TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "ExecuteTurbofanCompilation", "func_index", func_index,
- "body_size", func_body.end - func_body.start);
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileTopTier", "func_index", func_index, "body_size",
+ func_body.end - func_body.start);
Zone zone(wasm_engine->allocator(), ZONE_NAME);
MachineGraph* mcgraph = new (&zone) MachineGraph(
new (&zone) Graph(&zone), new (&zone) CommonOperatorBuilder(&zone),
@@ -6813,18 +6944,17 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
OptimizedCompilationInfo info(GetDebugName(&zone, func_index), &zone,
Code::WASM_FUNCTION);
if (env->runtime_exception_support) {
- info.SetWasmRuntimeExceptionSupport();
+ info.set_wasm_runtime_exception_support();
}
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json()) {
TurboCfgFile tcf;
tcf << AsC1VCompilation(&info);
}
- NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
- ? new (&zone)
- NodeOriginTable(mcgraph->graph())
- : nullptr;
+ NodeOriginTable* node_origins =
+ info.trace_turbo_json() ? new (&zone) NodeOriginTable(mcgraph->graph())
+ : nullptr;
SourcePositionTable* source_positions =
new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
if (!BuildGraphForWasmFunction(wasm_engine->allocator(), env, func_body,
@@ -6976,18 +7106,19 @@ CallDescriptor* GetWasmCallDescriptor(
CallDescriptor::Flags flags =
use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
- return new (zone) CallDescriptor( // --
- descriptor_kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- parameter_slots, // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call", // debug name
- 0, // allocatable registers
+ return new (zone) CallDescriptor( // --
+ descriptor_kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ parameter_slots, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ flags, // flags
+ "wasm-call", // debug name
+ StackArgumentOrder::kDefault, // order of the arguments in the stack
+ 0, // allocatable registers
rets.NumStackSlots() - parameter_slots); // stack_return_count
}
@@ -7065,6 +7196,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
call_descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
call_descriptor->flags(), // flags
call_descriptor->debug_name(), // debug name
+ call_descriptor->GetStackArgumentOrder(), // stack order
call_descriptor->AllocatableRegisters(), // allocatable registers
rets.NumStackSlots() - params.NumStackSlots()); // stack_return_count
}
diff --git a/chromium/v8/src/compiler/wasm-compiler.h b/chromium/v8/src/compiler/wasm-compiler.h
index 6d662e674d8..d72c2bcab5f 100644
--- a/chromium/v8/src/compiler/wasm-compiler.h
+++ b/chromium/v8/src/compiler/wasm-compiler.h
@@ -138,7 +138,8 @@ enum CWasmEntryParameters {
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
// which knows how to feed it its parameters.
-MaybeHandle<Code> CompileCWasmEntry(Isolate*, const wasm::FunctionSig*);
+V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(Isolate*,
+ const wasm::FunctionSig*);
// Values from the instance object are cached between Wasm-level function calls.
// This struct allows the SSA environment handling this cache to be defined
@@ -250,6 +251,10 @@ class WasmGraphBuilder {
Node* arr[] = {fst, more...};
return Return(ArrayVector(arr));
}
+
+ Node* TraceFunctionEntry(wasm::WasmCodePosition position);
+ Node* TraceFunctionExit(Vector<Node*> vals, wasm::WasmCodePosition position);
+
Node* Trap(wasm::TrapReason reason, wasm::WasmCodePosition position);
Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets,
@@ -284,9 +289,10 @@ class WasmGraphBuilder {
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
- Node* LoadTransformBigEndian(MachineType memtype,
+ Node* LoadTransformBigEndian(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
- Node* value);
+ Node* index, uint32_t offset, uint32_t alignment,
+ wasm::WasmCodePosition position);
#endif
Node* LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform, Node* index,
@@ -316,7 +322,7 @@ class WasmGraphBuilder {
void GetGlobalBaseAndOffset(MachineType mem_type, const wasm::WasmGlobal&,
Node** base_node, Node** offset_node);
- void GetBaseAndOffsetForImportedMutableAnyRefGlobal(
+ void GetBaseAndOffsetForImportedMutableExternRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset);
// Utilities to manipulate sets of instance cache nodes.
@@ -378,7 +384,7 @@ class WasmGraphBuilder {
Node* StructNew(uint32_t struct_index, const wasm::StructType* type,
Vector<Node*> fields);
Node* StructGet(Node* struct_object, const wasm::StructType* struct_type,
- uint32_t field_index, CheckForNull null_check,
+ uint32_t field_index, CheckForNull null_check, bool is_signed,
wasm::WasmCodePosition position);
Node* StructSet(Node* struct_object, const wasm::StructType* struct_type,
uint32_t field_index, Node* value, CheckForNull null_check,
@@ -387,10 +393,11 @@ class WasmGraphBuilder {
Node* length, Node* initial_value);
void BoundsCheck(Node* array, Node* index, wasm::WasmCodePosition position);
Node* ArrayGet(Node* array_object, const wasm::ArrayType* type, Node* index,
- wasm::WasmCodePosition position);
+ bool is_signed, wasm::WasmCodePosition position);
Node* ArraySet(Node* array_object, const wasm::ArrayType* type, Node* index,
Node* value, wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, wasm::WasmCodePosition position);
+ Node* RttCanon(uint32_t type_index);
bool has_simd() const { return has_simd_; }
@@ -547,6 +554,11 @@ class WasmGraphBuilder {
Node* BuildAsmjsLoadMem(MachineType type, Node* index);
Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
+ // Wasm SIMD.
+ Node* BuildF32x4Ceil(Node* input);
+ Node* BuildF32x4Floor(Node* input);
+ Node* BuildF32x4Trunc(Node* input);
+
void BuildEncodeException32BitValue(Node* values_array, uint32_t* index,
Node* value);
Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);