summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc122
-rw-r--r--deps/v8/src/compiler/access-builder.h10
-rw-r--r--deps/v8/src/compiler/access-info.cc14
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc197
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc207
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc641
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h748
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc196
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc802
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc104
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h37
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc936
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h144
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc168
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc574
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h138
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc93
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc149
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h40
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc4
-rw-r--r--deps/v8/src/compiler/backend/instruction.h3
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc156
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc2636
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h397
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc26
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc3124
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc191
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc44
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc67
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc392
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h800
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc66
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc288
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc103
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc103
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc1008
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h33
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc71
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc390
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc440
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h44
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc44
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc227
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc1062
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h790
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc42
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc363
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc17
-rw-r--r--deps/v8/src/compiler/branch-elimination.h1
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc17
-rw-r--r--deps/v8/src/compiler/c-linkage.cc12
-rw-r--r--deps/v8/src/compiler/code-assembler.cc82
-rw-r--r--deps/v8/src/compiler/code-assembler.h63
-rw-r--r--deps/v8/src/compiler/common-operator.cc212
-rw-r--r--deps/v8/src/compiler/common-operator.h62
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc183
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h5
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h37
-rw-r--r--deps/v8/src/compiler/decompression-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc67
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h4
-rw-r--r--deps/v8/src/compiler/frame-states.cc7
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc41
-rw-r--r--deps/v8/src/compiler/graph-assembler.h42
-rw-r--r--deps/v8/src/compiler/heap-refs.cc893
-rw-r--r--deps/v8/src/compiler/heap-refs.h125
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc18
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc58
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc22
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc59
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc16
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc35
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h28
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc77
-rw-r--r--deps/v8/src/compiler/js-inlining.cc7
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc68
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc33
-rw-r--r--deps/v8/src/compiler/linkage.cc7
-rw-r--r--deps/v8/src/compiler/linkage.h18
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc19
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc38
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc15
-rw-r--r--deps/v8/src/compiler/machine-operator.cc358
-rw-r--r--deps/v8/src/compiler/machine-operator.h86
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc33
-rw-r--r--deps/v8/src/compiler/memory-lowering.h3
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc11
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h1
-rw-r--r--deps/v8/src/compiler/node-matchers.h1
-rw-r--r--deps/v8/src/compiler/opcodes.h5
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc27
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h9
-rw-r--r--deps/v8/src/compiler/pipeline.cc103
-rw-r--r--deps/v8/src/compiler/pipeline.h8
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc8
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc23
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h74
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc28
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h3
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc267
-rw-r--r--deps/v8/src/compiler/simplified-operator.h19
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc6
-rw-r--r--deps/v8/src/compiler/typer.cc45
-rw-r--r--deps/v8/src/compiler/verifier.cc8
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc274
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h23
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc195
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h77
111 files changed, 13656 insertions, 8691 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 1626bc5487..a415cbfa66 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -4,7 +4,6 @@ mvstanton@chromium.org
neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
-solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 675371df57..fda0727dd1 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -82,25 +82,25 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -410,26 +410,22 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
FieldAccess access = {kTaggedBase, JSTypedArray::kBasePointerOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
- FieldAccess access = {kTaggedBase,
- JSTypedArray::kExternalPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier,
- LoadSensitivity::kCritical,
- ConstFieldInfo::None(),
- false,
-#ifdef V8_HEAP_SANDBOX
- kTypedArrayExternalPointerTag
-#endif
+ FieldAccess access = {
+ kTaggedBase,
+ JSTypedArray::kExternalPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ ConstFieldInfo::None(),
+ false,
};
return access;
}
@@ -441,16 +437,11 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
JSDataView::kDataPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
-#ifdef V8_HEAP_SANDBOX
- kDataViewDataPointerTag,
-#endif
};
return access;
}
@@ -756,7 +747,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@@ -902,10 +892,10 @@ FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) {
}
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {kTaggedBase, Cell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -966,11 +956,9 @@ ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
}
// statics
-ElementAccess AccessBuilder::ForFixedArrayElement(
- ElementsKind kind, LoadSensitivity load_sensitivity) {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, load_sensitivity};
+ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
@@ -1038,59 +1026,50 @@ FieldAccess AccessBuilder::ForEnumCacheIndices() {
}
// static
-ElementAccess AccessBuilder::ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity) {
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+ bool is_external) {
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : ByteArray::kHeaderSize;
switch (type) {
case kExternalInt8Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
return access;
}
case kExternalBigInt64Array:
@@ -1239,15 +1218,6 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
}
// static
-FieldAccess AccessBuilder::ForFeedbackCellValue() {
- FieldAccess access = {kTaggedBase, FeedbackCell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kFullWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
FieldAccess access = {kTaggedBase,
FeedbackCell::kInterruptBudgetOffset,
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index fa68628cf8..99ffde19c4 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -299,9 +299,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
- static ElementAccess ForFixedArrayElement(
- ElementsKind kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForFixedArrayElement(ElementsKind kind);
// Provides access to SloppyArgumentsElements elements.
static ElementAccess ForSloppyArgumentsElementsMappedEntry();
@@ -319,9 +317,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForEnumCacheIndices();
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
- static ElementAccess ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+ bool is_external);
// Provides access to HashTable fields.
static FieldAccess ForHashTableBaseNumberOfElements();
@@ -342,7 +339,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryObjectHashIndex();
// Provides access to FeedbackCell fields.
- static FieldAccess ForFeedbackCellValue();
static FieldAccess ForFeedbackCellInterruptBudget();
// Provides access to a FeedbackVector fields.
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 21f453f4d8..e68ced7460 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,7 +8,6 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
@@ -57,7 +56,8 @@ bool HasFieldRepresentationDependenciesOnMap(
ZoneVector<CompilationDependency const*>& dependencies,
Handle<Map> const& field_owner_map) {
for (auto dep : dependencies) {
- if (dep->IsFieldRepresentationDependencyOnMap(field_owner_map)) {
+ if (CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ dep, field_owner_map)) {
return true;
}
}
@@ -109,6 +109,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
DCHECK_IMPLIES(
field_representation.IsDouble(),
HasFieldRepresentationDependenciesOnMap(
@@ -129,6 +130,7 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
field_index, field_representation, field_type,
field_owner_map, field_map, {{receiver_map}, zone},
@@ -384,7 +386,7 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
MapRef map, AccessMode access_mode) const {
- if (!CanInlineElementAccess(map)) return {};
+ if (!map.CanInlineElementAccess()) return {};
return ElementAccessInfo({{map}, zone()}, map.elements_kind(), zone());
}
@@ -542,7 +544,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
Handle<Cell> cell = broker->CanonicalPersistentHandle(
Cell::cast(module_namespace->module().exports().Lookup(
isolate, name.object(), Smi::ToInt(name.object()->GetHash()))));
- if (cell->value().IsTheHole(isolate)) {
+ if (cell->value(kRelaxedLoad).IsTheHole(isolate)) {
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
}
@@ -1050,7 +1052,7 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
if (!map.has_value()) return {};
if (map->instance_type() != instance_type ||
- !CanInlineElementAccess(*map)) {
+ !map->CanInlineElementAccess()) {
return {};
}
if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
@@ -1132,6 +1134,8 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
int const index = details.field_index();
Representation details_representation = details.representation();
+ if (details_representation.IsNone()) return Invalid();
+
FieldIndex field_index = FieldIndex::ForPropertyIndex(
*transition_map.object(), index, details_representation);
Type field_type = Type::NonInternal();
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 29c7897ec9..7bc90fd822 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -36,9 +36,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -322,35 +320,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
- }
-}
-
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i,
- Register address) {
- DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
- switch (AddressingModeField::decode(opcode)) {
- case kMode_Offset_RI:
- codegen->tasm()->mov(address, i.InputImmediate(1));
- codegen->tasm()->add(address, address, i.InputRegister(0));
- break;
- case kMode_Offset_RR:
- codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1));
- break;
- default:
- UNREACHABLE();
- }
- codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister));
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -360,12 +329,11 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ dmb(ISH); \
- __ asm_instr(i.InputRegister(2), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(0), i.InputOffset(1)); \
+ if (order == AtomicMemoryOrder::kSeqCst) __ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
@@ -691,25 +659,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
- __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
- __ csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -977,15 +926,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode;
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ } else {
+ mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
+ }
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Operand offset(0);
+
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier) {
+ __ dmb(ISH);
+ }
if (addressing_mode == kMode_Offset_RI) {
int32_t immediate = i.InputInt32(1);
offset = Operand(immediate);
@@ -996,6 +954,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset = Operand(reg);
__ str(value, MemOperand(object, reg));
}
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier &&
+ AtomicMemoryOrderField::decode(instr->opcode()) ==
+ AtomicMemoryOrder::kSeqCst) {
+ __ dmb(ISH);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
@@ -1619,12 +1583,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1632,11 +1594,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1644,22 +1604,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputFloatRegister(), address, 0);
- } else {
- __ vldr(i.OutputFloatRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1688,15 +1639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputDoubleRegister(), address, 0);
- } else {
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1832,10 +1775,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isb(SY);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
case kArmVmullLow: {
auto dt = static_cast<NeonDataType>(MiscField::decode(instr->opcode()));
__ vmull(dt, i.OutputSimd128Register(), i.InputSimd128Register(0).low(),
@@ -3373,94 +3312,97 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src_list, i.InputUint8(1), i.NeonInputOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
break;
ATOMIC_BINOP_CASE(Add, add)
@@ -3597,20 +3539,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
- FlagsConditionToCondition(condition));
- __ csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3805,7 +3733,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3955,12 +3882,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// DropArguments().
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
if (parameter_slots > 1) {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
- __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC, lt);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
+ } else {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC,
+ lt);
+ }
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 2698d45ae7..3de9b2aab6 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -430,17 +430,18 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
size_t input_count, InstructionOperand* inputs, Node* index) {
ArmOperandGenerator g(selector);
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
if (g.CanBeImmediate(index, opcode)) {
inputs[input_count++] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_Offset_RI);
- } else if ((opcode == kArmStr) &&
+ } else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
&inputs[3])) {
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
- if (opcode == kArmVst1S128) {
+ if (arch_opcode == kArmVst1S128) {
// Inputs are value, base, index, only care about base and index.
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
} else {
@@ -630,29 +631,69 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- ArmOperandGenerator g(this);
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return kArmVstrF32;
+ case MachineRepresentation::kFloat64:
+ return kArmVstrF64;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ return kArmStrb;
+ case MachineRepresentation::kWord16:
+ return kArmStrh;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kArmStr;
+ case MachineRepresentation::kSimd128:
+ return kArmVst1S128;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+}
+
+ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ ArmOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
@@ -678,58 +719,44 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code;
+ if (!atomic_order) {
+ code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= AtomicMemoryOrderField::encode(*atomic_order);
+ code |= AtomicStoreRecordWriteModeField::encode(record_write_mode);
+ }
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs);
+ selector->Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionCode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kArmVstrF32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kArmVstrF64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kArmStrb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArmStrh;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kArmStr;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kArmVst1S128;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
+ if (!atomic_order) {
+ opcode = GetStoreOpcode(rep);
+ } else {
+ // Release stores emit DMB ISH; STR while sequentially consistent stores
+ // emit DMB ISH; STR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ opcode = GetAtomicStoreOpcode(rep);
+ opcode |= AtomicMemoryOrderField::encode(*atomic_order);
}
ExternalReferenceMatcher m(base);
if (m.HasResolvedValue() &&
- CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- isolate(), m.ResolvedValue());
+ selector->isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
- Emit(opcode, 0, nullptr, input_count, inputs);
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
return;
}
}
@@ -738,10 +765,17 @@ void InstructionSelector::VisitStore(Node* node) {
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = g.UseRegister(base);
- EmitStore(this, opcode, input_count, inputs, index);
+ EmitStore(selector, opcode, input_count, inputs, index);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2236,22 +2270,27 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2261,34 +2300,9 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -2299,15 +2313,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2334,15 +2348,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2399,12 +2413,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index c121383426..fcab0a739b 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -235,7 +235,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
- break;
}
UNREACHABLE();
}
@@ -460,47 +459,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
- : kSpeculationPoisonRegister.W();
- codegen->tasm()->And(value, value, Operand(poison));
- }
-}
-
-void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
- Arm64OperandConverter* i, VRegister output_reg) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- AddressingMode address_mode = AddressingModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
- UseScratchRegisterScope temps(codegen->tasm());
- Register address = temps.AcquireX();
- switch (address_mode) {
- case kMode_MRI: // Fall through.
- case kMode_MRR:
- codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
- break;
- case kMode_Operand2_R_LSL_I:
- codegen->tasm()->Add(address, i->InputRegister(0),
- i->InputOperand2_64(1));
- break;
- default:
- // Note: we don't need poisoning for kMode_Root loads as those loads
- // target a fixed offset from root register which is set once when
- // initializing the vm.
- UNREACHABLE();
- }
- codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
- codegen->tasm()->Ldr(output_reg, MemOperand(address));
- } else {
- codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
- }
-}
-
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
@@ -714,29 +672,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ Csetm(kSpeculationPoisonRegister, eq);
- __ Csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, sp);
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(scratch, scratch, kSpeculationPoisonRegister);
- __ Mov(sp, scratch);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1034,6 +969,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bind(ool->exit());
break;
}
+ case kArchAtomicStoreWithWriteBarrier: {
+ DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode(),
+ &unwinding_info_writer_);
+ __ AtomicStoreTaggedField(value, object, offset, i.TempRegister(0));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq, ool->entry());
+ __ Bind(ool->exit());
+ break;
+ }
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -1232,6 +1186,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).Format(src_f));
break;
}
+ case kArm64ISplat: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register src = LaneSizeField::decode(opcode) == 64 ? i.InputRegister64(0)
+ : i.InputRegister32(0);
+ __ Dup(i.OutputSimd128Register().Format(f), src);
+ break;
+ }
+ case kArm64FSplat: {
+ VectorFormat src_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(src_f);
+ __ Dup(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), 0);
+ break;
+ }
+ case kArm64Smlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Smlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Smull: {
if (instr->InputAt(0)->IsRegister()) {
__ Smull(i.OutputRegister(), i.InputRegister32(0),
@@ -1254,6 +1241,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(src_f));
break;
}
+ case kArm64Umlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Umlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Umull: {
if (instr->InputAt(0)->IsRegister()) {
__ Umull(i.OutputRegister(), i.InputRegister32(0),
@@ -1551,6 +1556,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
+ case kArm64Cnt32: {
+ __ PopcntHelper(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64Cnt64: {
+ __ PopcntHelper(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ }
case kArm64Cnt: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cnt(i.OutputSimd128Register().Format(f),
@@ -1814,12 +1827,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrsbW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1832,12 +1843,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrshW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1850,12 +1859,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrsw:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1864,19 +1871,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldr:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kArm64LdarDecompressTaggedSigned:
+ __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressTaggedPointer:
+ __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressAnyTagged:
+ __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1885,9 +1900,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64StlrCompressTagged:
+ // To be consistent with other STLR instructions, the value is stored at
+ // the 3rd input register instead of the 1st.
+ __ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
+ __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1895,7 +1916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64LdrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
+ __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1916,117 +1937,100 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsb(FullSystem, BarrierAll);
__ Isb();
break;
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
break;
- case kWord32AtomicLoadWord32:
- case kArm64Word64AtomicLoadUint32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32);
break;
case kArm64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register);
break;
- case kWord32AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32);
break;
- case kWord32AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32);
break;
- case kWord32AtomicStoreWord32:
- case kArm64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32);
break;
case kArm64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
break;
- case kWord32AtomicExchangeWord32:
- case kArm64Word64AtomicExchangeUint32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
break;
- case kWord32AtomicCompareExchangeWord32:
- case kArm64Word64AtomicCompareExchangeUint32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
break;
case kArm64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
- case kArm64Word64Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
- case kArm64Word64Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
break; \
- case kWord32Atomic##op##Word32: \
- case kArm64Word64Atomic##op##Uint32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
break; \
case kArm64Word64Atomic##op##Uint64: \
@@ -2052,12 +2056,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT()); \
break;
+#define SIMD_UNOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ break; \
+ }
#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
case Op: \
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT(), \
i.InputSimd128Register(1).V##FORMAT()); \
break;
+#define SIMD_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), \
+ i.InputSimd128Register(1).Format(f)); \
+ break; \
+ }
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
@@ -2066,7 +2085,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2).V##FORMAT()); \
break; \
}
-
+#define SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ VRegister dst = i.OutputSimd128Register().Format(f); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f)); \
+ __ Instr(dst, i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(2).Format(f)); \
+ break; \
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FSqrt, Fsqrt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FAdd, Fadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FSub, Fsub);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMul, Fmul);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FDiv, Fdiv);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FNeg, Fneg);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64IAbs, Abs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64INeg, Neg);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64RoundingAverageU, Urhadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinS, Smin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxS, Smax);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinU, Umin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxU, Umax);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mla, Mla);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mls, Mls);
case kArm64Sxtl: {
VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
@@ -2129,49 +2174,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2S());
break;
}
- case kArm64F64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
+ case kArm64FExtractLane: {
+ VectorFormat dst_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatFillQ(dst_f);
+ __ Mov(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), i.InputInt8(1));
break;
}
- case kArm64F64x2ExtractLane: {
- __ Mov(i.OutputSimd128Register().D(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
- break;
- }
- case kArm64F64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64FReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
- break;
- }
- SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D);
- case kArm64F64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Fcmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).Format(f), 0);
+ break;
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FEq, Fcmeq);
+ case kArm64FNe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- case kArm64F64x2Lt: {
- __ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ case kArm64FLt: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Fcmgt(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(1).Format(f),
+ i.InputSimd128Register(0).Format(f));
break;
}
- case kArm64F64x2Le: {
- __ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ case kArm64FLe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Fcmge(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(1).Format(f),
+ i.InputSimd128Register(0).Format(f));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
@@ -2197,63 +2238,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
- break;
- }
- case kArm64F32x4ExtractLane: {
- __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64F32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
- break;
- }
SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
- case kArm64F32x4MulElement: {
- __ Fmul(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).S(), i.InputInt8(2));
- break;
- }
- case kArm64F64x2MulElement: {
- __ Fmul(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).D(), i.InputInt8(2));
- break;
- }
- case kArm64F32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- case kArm64F32x4Lt: {
- __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- }
- case kArm64F32x4Le: {
- __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
+ case kArm64FMulElement: {
+ VectorFormat s_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat v_f = VectorFormatFillQ(s_f);
+ __ Fmul(i.OutputSimd128Register().Format(v_f),
+ i.InputSimd128Register(0).Format(v_f),
+ i.InputSimd128Register(1).Format(s_f), i.InputInt8(2));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
@@ -2279,26 +2274,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64I64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
- break;
- }
- case kArm64I64x2ExtractLane: {
- __ Mov(i.OutputRegister64(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
+ case kArm64IExtractLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register dst =
+ f == kFormat2D ? i.OutputRegister64() : i.OutputRegister32();
+ __ Mov(dst, i.InputSimd128Register(0).Format(f), i.InputInt8(1));
break;
}
- case kArm64I64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64IReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
+ Register src2 =
+ f == kFormat2D ? i.InputRegister64(2) : i.InputRegister32(2);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
+ __ Mov(dst, i.InputInt8(1), src2);
break;
}
- SIMD_UNOP_CASE(kArm64I64x2Abs, Abs, 2D);
- SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
break;
@@ -2307,8 +2301,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 6, V2D, Sshl, X);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
- SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
UseScratchRegisterScope scope(tasm());
VRegister dst = i.OutputSimd128Register();
@@ -2368,16 +2362,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
- case kArm64I64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IEq, Cmeq);
+ case kArm64INe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Cmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtS, Cmgt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeS, Cmge);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
@@ -2386,26 +2381,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
- case kArm64I32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
- break;
- }
- case kArm64I32x4ExtractLane: {
- __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64I32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
break;
@@ -2414,33 +2390,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 5, V4S, Sshl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mla, Mla, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mls, Mls, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
- case kArm64I32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Cmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
case kArm64I32x4ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Abs, Abs, 4S);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2468,30 +2425,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
break;
}
- case kArm64I16x8Splat: {
- __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
- break;
- }
- case kArm64I16x8ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneU: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneS: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V8H(),
- src1 = i.InputSimd128Register(0).V8H();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
break;
@@ -2514,25 +2459,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatS, Sqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatS, Sqsub);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
- case kArm64I16x8Ne: {
- VRegister dst = i.OutputSimd128Register().V8H();
- __ Cmeq(dst, i.InputSimd128Register(0).V8H(),
- i.InputSimd128Register(1).V8H());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
case kArm64I16x8ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
@@ -2551,15 +2480,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8AddSatU, Uqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatU, Uqsub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
- SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatU, Uqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
- SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
case kArm64I16x8BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2576,30 +2499,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V8H(), 0);
break;
}
- case kArm64I8x16Splat: {
- __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
- break;
- }
- case kArm64I8x16ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V16B(),
- src1 = i.InputSimd128Register(0).V16B();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 3, V16B, Sshl, W);
break;
@@ -2622,24 +2521,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
- SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
- case kArm64I8x16Ne: {
- VRegister dst = i.OutputSimd128Register().V16B();
- __ Cmeq(dst, i.InputSimd128Register(0).V16B(),
- i.InputSimd128Register(1).V16B());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 3, V16B, Ushl, W);
break;
@@ -2658,14 +2539,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16AddSatU, Uqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatU, Uqsub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
- SIMD_BINOP_CASE(kArm64I8x16RoundingAverageU, Urhadd, 16B);
- SIMD_UNOP_CASE(kArm64I8x16Abs, Abs, 16B);
case kArm64I8x16BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2716,12 +2589,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default:
UNREACHABLE();
- break;
}
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64S128Select, Bsl, 16B);
SIMD_BINOP_CASE(kArm64S128AndNot, Bic, 16B);
+ case kArm64Ssra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Ssra(dst, i.InputSimd128Register(1).Format(f), i.InputInt8(2) & mask);
+ break;
+ }
+ case kArm64Usra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Usra(dst, i.InputSimd128Register(1).Format(f), i.InputUint8(2) & mask);
+ break;
+ }
case kArm64S32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V4S(),
src0 = i.InputSimd128Register(0).V4S(),
@@ -2892,8 +2782,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef SIMD_UNOP_CASE
+#undef SIMD_UNOP_LANE_SIZE_CASE
#undef SIMD_BINOP_CASE
+#undef SIMD_BINOP_LANE_SIZE_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
+#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -2907,7 +2800,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@@ -2919,7 +2811,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@@ -2931,7 +2822,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@@ -2943,7 +2833,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@@ -2961,19 +2850,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ CmovX(kSpeculationPoisonRegister, xzr,
- FlagsConditionToCondition(condition));
- __ Csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3143,7 +3019,6 @@ void CodeGenerator::AssembleConstructFrame() {
// arguments count was pushed.
required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
- ResetSpeculationPoison();
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3343,7 +3218,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// number of arguments is given by max(1 + argc_reg, parameter_slots).
Label argc_reg_has_final_count;
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ }
if (parameter_slots > 1) {
__ Cmp(argc_reg, Operand(parameter_slots));
__ B(&argc_reg_has_final_count, ge);
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 3f2e6151b6..d57203639e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -11,423 +11,337 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Arm64Add) \
- V(Arm64Add32) \
- V(Arm64And) \
- V(Arm64And32) \
- V(Arm64Bic) \
- V(Arm64Bic32) \
- V(Arm64Clz) \
- V(Arm64Clz32) \
- V(Arm64Cmp) \
- V(Arm64Cmp32) \
- V(Arm64Cmn) \
- V(Arm64Cmn32) \
- V(Arm64Cnt) \
- V(Arm64Tst) \
- V(Arm64Tst32) \
- V(Arm64Or) \
- V(Arm64Or32) \
- V(Arm64Orn) \
- V(Arm64Orn32) \
- V(Arm64Eor) \
- V(Arm64Eor32) \
- V(Arm64Eon) \
- V(Arm64Eon32) \
- V(Arm64Sadalp) \
- V(Arm64Saddlp) \
- V(Arm64Sub) \
- V(Arm64Sub32) \
- V(Arm64Mul) \
- V(Arm64Mul32) \
- V(Arm64Smull) \
- V(Arm64Smull2) \
- V(Arm64Uadalp) \
- V(Arm64Uaddlp) \
- V(Arm64Umull) \
- V(Arm64Umull2) \
- V(Arm64Madd) \
- V(Arm64Madd32) \
- V(Arm64Msub) \
- V(Arm64Msub32) \
- V(Arm64Mneg) \
- V(Arm64Mneg32) \
- V(Arm64Idiv) \
- V(Arm64Idiv32) \
- V(Arm64Udiv) \
- V(Arm64Udiv32) \
- V(Arm64Imod) \
- V(Arm64Imod32) \
- V(Arm64Umod) \
- V(Arm64Umod32) \
- V(Arm64Not) \
- V(Arm64Not32) \
- V(Arm64Lsl) \
- V(Arm64Lsl32) \
- V(Arm64Lsr) \
- V(Arm64Lsr32) \
- V(Arm64Asr) \
- V(Arm64Asr32) \
- V(Arm64Ror) \
- V(Arm64Ror32) \
- V(Arm64Mov32) \
- V(Arm64Sxtb32) \
- V(Arm64Sxth32) \
- V(Arm64Sxtb) \
- V(Arm64Sxth) \
- V(Arm64Sxtw) \
- V(Arm64Sbfx) \
- V(Arm64Sbfx32) \
- V(Arm64Ubfx) \
- V(Arm64Ubfx32) \
- V(Arm64Ubfiz32) \
- V(Arm64Bfi) \
- V(Arm64Rbit) \
- V(Arm64Rbit32) \
- V(Arm64Rev) \
- V(Arm64Rev32) \
- V(Arm64TestAndBranch32) \
- V(Arm64TestAndBranch) \
- V(Arm64CompareAndBranch32) \
- V(Arm64CompareAndBranch) \
- V(Arm64Claim) \
- V(Arm64Poke) \
- V(Arm64PokePair) \
- V(Arm64Peek) \
- V(Arm64Float32Cmp) \
- V(Arm64Float32Add) \
- V(Arm64Float32Sub) \
- V(Arm64Float32Mul) \
- V(Arm64Float32Div) \
- V(Arm64Float32Abs) \
- V(Arm64Float32Abd) \
- V(Arm64Float32Neg) \
- V(Arm64Float32Sqrt) \
- V(Arm64Float32Fnmul) \
- V(Arm64Float32RoundDown) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
- V(Arm64Float64Cmp) \
- V(Arm64Float64Add) \
- V(Arm64Float64Sub) \
- V(Arm64Float64Mul) \
- V(Arm64Float64Div) \
- V(Arm64Float64Mod) \
- V(Arm64Float64Max) \
- V(Arm64Float64Min) \
- V(Arm64Float64Abs) \
- V(Arm64Float64Abd) \
- V(Arm64Float64Neg) \
- V(Arm64Float64Sqrt) \
- V(Arm64Float64Fnmul) \
- V(Arm64Float64RoundDown) \
- V(Arm64Float32RoundUp) \
- V(Arm64Float64RoundUp) \
- V(Arm64Float64RoundTiesAway) \
- V(Arm64Float32RoundTruncate) \
- V(Arm64Float64RoundTruncate) \
- V(Arm64Float32RoundTiesEven) \
- V(Arm64Float64RoundTiesEven) \
- V(Arm64Float64SilenceNaN) \
- V(Arm64Float32ToFloat64) \
- V(Arm64Float64ToFloat32) \
- V(Arm64Float32ToInt32) \
- V(Arm64Float64ToInt32) \
- V(Arm64Float32ToUint32) \
- V(Arm64Float64ToUint32) \
- V(Arm64Float32ToInt64) \
- V(Arm64Float64ToInt64) \
- V(Arm64Float32ToUint64) \
- V(Arm64Float64ToUint64) \
- V(Arm64Int32ToFloat32) \
- V(Arm64Int32ToFloat64) \
- V(Arm64Int64ToFloat32) \
- V(Arm64Int64ToFloat64) \
- V(Arm64Uint32ToFloat32) \
- V(Arm64Uint32ToFloat64) \
- V(Arm64Uint64ToFloat32) \
- V(Arm64Uint64ToFloat64) \
- V(Arm64Float64ExtractLowWord32) \
- V(Arm64Float64ExtractHighWord32) \
- V(Arm64Float64InsertLowWord32) \
- V(Arm64Float64InsertHighWord32) \
- V(Arm64Float64MoveU64) \
- V(Arm64U64MoveFloat64) \
- V(Arm64LdrS) \
- V(Arm64StrS) \
- V(Arm64LdrD) \
- V(Arm64StrD) \
- V(Arm64LdrQ) \
- V(Arm64StrQ) \
- V(Arm64Ldrb) \
- V(Arm64Ldrsb) \
- V(Arm64LdrsbW) \
- V(Arm64Strb) \
- V(Arm64Ldrh) \
- V(Arm64Ldrsh) \
- V(Arm64LdrshW) \
- V(Arm64Strh) \
- V(Arm64Ldrsw) \
- V(Arm64LdrW) \
- V(Arm64StrW) \
- V(Arm64Ldr) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
- V(Arm64Str) \
- V(Arm64StrCompressTagged) \
- V(Arm64DmbIsh) \
- V(Arm64DsbIsb) \
- V(Arm64Sxtl) \
- V(Arm64Sxtl2) \
- V(Arm64Uxtl) \
- V(Arm64Uxtl2) \
- V(Arm64F64x2Splat) \
- V(Arm64F64x2ExtractLane) \
- V(Arm64F64x2ReplaceLane) \
- V(Arm64F64x2Abs) \
- V(Arm64F64x2Neg) \
- V(Arm64F64x2Sqrt) \
- V(Arm64F64x2Add) \
- V(Arm64F64x2Sub) \
- V(Arm64F64x2Mul) \
- V(Arm64F64x2MulElement) \
- V(Arm64F64x2Div) \
- V(Arm64F64x2Min) \
- V(Arm64F64x2Max) \
- V(Arm64F64x2Eq) \
- V(Arm64F64x2Ne) \
- V(Arm64F64x2Lt) \
- V(Arm64F64x2Le) \
- V(Arm64F64x2Qfma) \
- V(Arm64F64x2Qfms) \
- V(Arm64F64x2Pmin) \
- V(Arm64F64x2Pmax) \
- V(Arm64F64x2ConvertLowI32x4S) \
- V(Arm64F64x2ConvertLowI32x4U) \
- V(Arm64F64x2PromoteLowF32x4) \
- V(Arm64F32x4Splat) \
- V(Arm64F32x4ExtractLane) \
- V(Arm64F32x4ReplaceLane) \
- V(Arm64F32x4SConvertI32x4) \
- V(Arm64F32x4UConvertI32x4) \
- V(Arm64F32x4Abs) \
- V(Arm64F32x4Neg) \
- V(Arm64F32x4Sqrt) \
- V(Arm64F32x4RecipApprox) \
- V(Arm64F32x4RecipSqrtApprox) \
- V(Arm64F32x4Add) \
- V(Arm64F32x4Sub) \
- V(Arm64F32x4Mul) \
- V(Arm64F32x4MulElement) \
- V(Arm64F32x4Div) \
- V(Arm64F32x4Min) \
- V(Arm64F32x4Max) \
- V(Arm64F32x4Eq) \
- V(Arm64F32x4Ne) \
- V(Arm64F32x4Lt) \
- V(Arm64F32x4Le) \
- V(Arm64F32x4Qfma) \
- V(Arm64F32x4Qfms) \
- V(Arm64F32x4Pmin) \
- V(Arm64F32x4Pmax) \
- V(Arm64F32x4DemoteF64x2Zero) \
- V(Arm64I64x2Splat) \
- V(Arm64I64x2ExtractLane) \
- V(Arm64I64x2ReplaceLane) \
- V(Arm64I64x2Abs) \
- V(Arm64I64x2Neg) \
- V(Arm64I64x2Shl) \
- V(Arm64I64x2ShrS) \
- V(Arm64I64x2Add) \
- V(Arm64I64x2Sub) \
- V(Arm64I64x2Mul) \
- V(Arm64I64x2Eq) \
- V(Arm64I64x2Ne) \
- V(Arm64I64x2GtS) \
- V(Arm64I64x2GeS) \
- V(Arm64I64x2ShrU) \
- V(Arm64I64x2BitMask) \
- V(Arm64I32x4Splat) \
- V(Arm64I32x4ExtractLane) \
- V(Arm64I32x4ReplaceLane) \
- V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4Neg) \
- V(Arm64I32x4Shl) \
- V(Arm64I32x4ShrS) \
- V(Arm64I32x4Add) \
- V(Arm64I32x4Sub) \
- V(Arm64I32x4Mul) \
- V(Arm64I32x4Mla) \
- V(Arm64I32x4Mls) \
- V(Arm64I32x4MinS) \
- V(Arm64I32x4MaxS) \
- V(Arm64I32x4Eq) \
- V(Arm64I32x4Ne) \
- V(Arm64I32x4GtS) \
- V(Arm64I32x4GeS) \
- V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4ShrU) \
- V(Arm64I32x4MinU) \
- V(Arm64I32x4MaxU) \
- V(Arm64I32x4GtU) \
- V(Arm64I32x4GeU) \
- V(Arm64I32x4Abs) \
- V(Arm64I32x4BitMask) \
- V(Arm64I32x4DotI16x8S) \
- V(Arm64I32x4TruncSatF64x2SZero) \
- V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64I16x8Splat) \
- V(Arm64I16x8ExtractLaneU) \
- V(Arm64I16x8ExtractLaneS) \
- V(Arm64I16x8ReplaceLane) \
- V(Arm64I16x8Neg) \
- V(Arm64I16x8Shl) \
- V(Arm64I16x8ShrS) \
- V(Arm64I16x8SConvertI32x4) \
- V(Arm64I16x8Add) \
- V(Arm64I16x8AddSatS) \
- V(Arm64I16x8Sub) \
- V(Arm64I16x8SubSatS) \
- V(Arm64I16x8Mul) \
- V(Arm64I16x8Mla) \
- V(Arm64I16x8Mls) \
- V(Arm64I16x8MinS) \
- V(Arm64I16x8MaxS) \
- V(Arm64I16x8Eq) \
- V(Arm64I16x8Ne) \
- V(Arm64I16x8GtS) \
- V(Arm64I16x8GeS) \
- V(Arm64I16x8ShrU) \
- V(Arm64I16x8UConvertI32x4) \
- V(Arm64I16x8AddSatU) \
- V(Arm64I16x8SubSatU) \
- V(Arm64I16x8MinU) \
- V(Arm64I16x8MaxU) \
- V(Arm64I16x8GtU) \
- V(Arm64I16x8GeU) \
- V(Arm64I16x8RoundingAverageU) \
- V(Arm64I16x8Q15MulRSatS) \
- V(Arm64I16x8Abs) \
- V(Arm64I16x8BitMask) \
- V(Arm64I8x16Splat) \
- V(Arm64I8x16ExtractLaneU) \
- V(Arm64I8x16ExtractLaneS) \
- V(Arm64I8x16ReplaceLane) \
- V(Arm64I8x16Neg) \
- V(Arm64I8x16Shl) \
- V(Arm64I8x16ShrS) \
- V(Arm64I8x16SConvertI16x8) \
- V(Arm64I8x16Add) \
- V(Arm64I8x16AddSatS) \
- V(Arm64I8x16Sub) \
- V(Arm64I8x16SubSatS) \
- V(Arm64I8x16Mla) \
- V(Arm64I8x16Mls) \
- V(Arm64I8x16MinS) \
- V(Arm64I8x16MaxS) \
- V(Arm64I8x16Eq) \
- V(Arm64I8x16Ne) \
- V(Arm64I8x16GtS) \
- V(Arm64I8x16GeS) \
- V(Arm64I8x16ShrU) \
- V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16AddSatU) \
- V(Arm64I8x16SubSatU) \
- V(Arm64I8x16MinU) \
- V(Arm64I8x16MaxU) \
- V(Arm64I8x16GtU) \
- V(Arm64I8x16GeU) \
- V(Arm64I8x16RoundingAverageU) \
- V(Arm64I8x16Abs) \
- V(Arm64I8x16BitMask) \
- V(Arm64S128Const) \
- V(Arm64S128Zero) \
- V(Arm64S128Dup) \
- V(Arm64S128And) \
- V(Arm64S128Or) \
- V(Arm64S128Xor) \
- V(Arm64S128Not) \
- V(Arm64S128Select) \
- V(Arm64S128AndNot) \
- V(Arm64S32x4ZipLeft) \
- V(Arm64S32x4ZipRight) \
- V(Arm64S32x4UnzipLeft) \
- V(Arm64S32x4UnzipRight) \
- V(Arm64S32x4TransposeLeft) \
- V(Arm64S32x4TransposeRight) \
- V(Arm64S32x4Shuffle) \
- V(Arm64S16x8ZipLeft) \
- V(Arm64S16x8ZipRight) \
- V(Arm64S16x8UnzipLeft) \
- V(Arm64S16x8UnzipRight) \
- V(Arm64S16x8TransposeLeft) \
- V(Arm64S16x8TransposeRight) \
- V(Arm64S8x16ZipLeft) \
- V(Arm64S8x16ZipRight) \
- V(Arm64S8x16UnzipLeft) \
- V(Arm64S8x16UnzipRight) \
- V(Arm64S8x16TransposeLeft) \
- V(Arm64S8x16TransposeRight) \
- V(Arm64S8x16Concat) \
- V(Arm64I8x16Swizzle) \
- V(Arm64I8x16Shuffle) \
- V(Arm64S32x2Reverse) \
- V(Arm64S16x4Reverse) \
- V(Arm64S16x2Reverse) \
- V(Arm64S8x8Reverse) \
- V(Arm64S8x4Reverse) \
- V(Arm64S8x2Reverse) \
- V(Arm64V128AnyTrue) \
- V(Arm64I64x2AllTrue) \
- V(Arm64I32x4AllTrue) \
- V(Arm64I16x8AllTrue) \
- V(Arm64I8x16AllTrue) \
- V(Arm64LoadSplat) \
- V(Arm64LoadLane) \
- V(Arm64StoreLane) \
- V(Arm64S128Load8x8S) \
- V(Arm64S128Load8x8U) \
- V(Arm64S128Load16x4S) \
- V(Arm64S128Load16x4U) \
- V(Arm64S128Load32x2S) \
- V(Arm64S128Load32x2U) \
- V(Arm64Word64AtomicLoadUint8) \
- V(Arm64Word64AtomicLoadUint16) \
- V(Arm64Word64AtomicLoadUint32) \
- V(Arm64Word64AtomicLoadUint64) \
- V(Arm64Word64AtomicStoreWord8) \
- V(Arm64Word64AtomicStoreWord16) \
- V(Arm64Word64AtomicStoreWord32) \
- V(Arm64Word64AtomicStoreWord64) \
- V(Arm64Word64AtomicAddUint8) \
- V(Arm64Word64AtomicAddUint16) \
- V(Arm64Word64AtomicAddUint32) \
- V(Arm64Word64AtomicAddUint64) \
- V(Arm64Word64AtomicSubUint8) \
- V(Arm64Word64AtomicSubUint16) \
- V(Arm64Word64AtomicSubUint32) \
- V(Arm64Word64AtomicSubUint64) \
- V(Arm64Word64AtomicAndUint8) \
- V(Arm64Word64AtomicAndUint16) \
- V(Arm64Word64AtomicAndUint32) \
- V(Arm64Word64AtomicAndUint64) \
- V(Arm64Word64AtomicOrUint8) \
- V(Arm64Word64AtomicOrUint16) \
- V(Arm64Word64AtomicOrUint32) \
- V(Arm64Word64AtomicOrUint64) \
- V(Arm64Word64AtomicXorUint8) \
- V(Arm64Word64AtomicXorUint16) \
- V(Arm64Word64AtomicXorUint32) \
- V(Arm64Word64AtomicXorUint64) \
- V(Arm64Word64AtomicExchangeUint8) \
- V(Arm64Word64AtomicExchangeUint16) \
- V(Arm64Word64AtomicExchangeUint32) \
- V(Arm64Word64AtomicExchangeUint64) \
- V(Arm64Word64AtomicCompareExchangeUint8) \
- V(Arm64Word64AtomicCompareExchangeUint16) \
- V(Arm64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
+ V(Arm64Cnt) \
+ V(Arm64Cnt32) \
+ V(Arm64Cnt64) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
+ V(Arm64Sadalp) \
+ V(Arm64Saddlp) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Smlal) \
+ V(Arm64Smlal2) \
+ V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uadalp) \
+ V(Arm64Uaddlp) \
+ V(Arm64Umlal) \
+ V(Arm64Umlal2) \
+ V(Arm64Umull) \
+ V(Arm64Umull2) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtb32) \
+ V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
+ V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
+ V(Arm64Sbfx32) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePair) \
+ V(Arm64Peek) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
+ V(Arm64Float32Neg) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
+ V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
+ V(Arm64Float64Neg) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
+ V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
+ V(Arm64LdrS) \
+ V(Arm64StrS) \
+ V(Arm64LdrD) \
+ V(Arm64StrD) \
+ V(Arm64LdrQ) \
+ V(Arm64StrQ) \
+ V(Arm64Ldrb) \
+ V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
+ V(Arm64Strb) \
+ V(Arm64Ldrh) \
+ V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
+ V(Arm64Strh) \
+ V(Arm64Ldrsw) \
+ V(Arm64LdrW) \
+ V(Arm64StrW) \
+ V(Arm64Ldr) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
+ V(Arm64LdarDecompressTaggedSigned) \
+ V(Arm64LdarDecompressTaggedPointer) \
+ V(Arm64LdarDecompressAnyTagged) \
+ V(Arm64Str) \
+ V(Arm64StrCompressTagged) \
+ V(Arm64StlrCompressTagged) \
+ V(Arm64DmbIsh) \
+ V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
+ V(Arm64FSplat) \
+ V(Arm64FAbs) \
+ V(Arm64FSqrt) \
+ V(Arm64FNeg) \
+ V(Arm64FExtractLane) \
+ V(Arm64FReplaceLane) \
+ V(Arm64FAdd) \
+ V(Arm64FSub) \
+ V(Arm64FMul) \
+ V(Arm64FMulElement) \
+ V(Arm64FDiv) \
+ V(Arm64FMin) \
+ V(Arm64FMax) \
+ V(Arm64FEq) \
+ V(Arm64FNe) \
+ V(Arm64FLt) \
+ V(Arm64FLe) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
+ V(Arm64ISplat) \
+ V(Arm64IAbs) \
+ V(Arm64INeg) \
+ V(Arm64IExtractLane) \
+ V(Arm64IReplaceLane) \
+ V(Arm64I64x2Shl) \
+ V(Arm64I64x2ShrS) \
+ V(Arm64IAdd) \
+ V(Arm64ISub) \
+ V(Arm64I64x2Mul) \
+ V(Arm64IEq) \
+ V(Arm64INe) \
+ V(Arm64IGtS) \
+ V(Arm64IGeS) \
+ V(Arm64I64x2ShrU) \
+ V(Arm64I64x2BitMask) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Mul) \
+ V(Arm64Mla) \
+ V(Arm64Mls) \
+ V(Arm64IMinS) \
+ V(Arm64IMaxS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64IMinU) \
+ V(Arm64IMaxU) \
+ V(Arm64IGtU) \
+ V(Arm64IGeU) \
+ V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
+ V(Arm64IExtractLaneU) \
+ V(Arm64IExtractLaneS) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64IAddSatS) \
+ V(Arm64ISubSatS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64IAddSatU) \
+ V(Arm64ISubSatU) \
+ V(Arm64RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
+ V(Arm64I16x8BitMask) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16BitMask) \
+ V(Arm64S128Const) \
+ V(Arm64S128Zero) \
+ V(Arm64S128Dup) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S128AndNot) \
+ V(Arm64Ssra) \
+ V(Arm64Usra) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64V128AnyTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
+ V(Arm64LoadSplat) \
+ V(Arm64LoadLane) \
+ V(Arm64StoreLane) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64Word64AtomicLoadUint64) \
+ V(Arm64Word64AtomicStoreWord64) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint64) \
+ V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index f4446cdbf8..bb16b76aaf 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -26,6 +26,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Cmn:
case kArm64Cmn32:
case kArm64Cnt:
+ case kArm64Cnt32:
+ case kArm64Cnt64:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
@@ -42,10 +44,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
+ case kArm64Smlal:
+ case kArm64Smlal2:
case kArm64Smull:
case kArm64Smull2:
case kArm64Uadalp:
case kArm64Uaddlp:
+ case kArm64Umlal:
+ case kArm64Umlal2:
case kArm64Umull:
case kArm64Umull2:
case kArm64Madd:
@@ -147,23 +153,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
- case kArm64F64x2Splat:
- case kArm64F64x2ExtractLane:
- case kArm64F64x2ReplaceLane:
- case kArm64F64x2Abs:
- case kArm64F64x2Neg:
- case kArm64F64x2Sqrt:
- case kArm64F64x2Add:
- case kArm64F64x2Sub:
- case kArm64F64x2Mul:
- case kArm64F64x2MulElement:
- case kArm64F64x2Div:
- case kArm64F64x2Min:
- case kArm64F64x2Max:
- case kArm64F64x2Eq:
- case kArm64F64x2Ne:
- case kArm64F64x2Lt:
- case kArm64F64x2Le:
+ case kArm64FExtractLane:
+ case kArm64FReplaceLane:
+ case kArm64FSplat:
+ case kArm64FAbs:
+ case kArm64FSqrt:
+ case kArm64FNeg:
+ case kArm64FAdd:
+ case kArm64FSub:
+ case kArm64FMul:
+ case kArm64FMulElement:
+ case kArm64FDiv:
+ case kArm64FMin:
+ case kArm64FMax:
+ case kArm64FEq:
+ case kArm64FNe:
+ case kArm64FLt:
+ case kArm64FLe:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
@@ -171,144 +177,73 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2ConvertLowI32x4S:
case kArm64F64x2ConvertLowI32x4U:
case kArm64F64x2PromoteLowF32x4:
- case kArm64F32x4Splat:
- case kArm64F32x4ExtractLane:
- case kArm64F32x4ReplaceLane:
case kArm64F32x4SConvertI32x4:
case kArm64F32x4UConvertI32x4:
- case kArm64F32x4Abs:
- case kArm64F32x4Neg:
- case kArm64F32x4Sqrt:
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
- case kArm64F32x4Add:
- case kArm64F32x4Sub:
- case kArm64F32x4Mul:
- case kArm64F32x4MulElement:
- case kArm64F32x4Div:
- case kArm64F32x4Min:
- case kArm64F32x4Max:
- case kArm64F32x4Eq:
- case kArm64F32x4Ne:
- case kArm64F32x4Lt:
- case kArm64F32x4Le:
case kArm64F32x4Qfma:
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
case kArm64F32x4DemoteF64x2Zero:
- case kArm64I64x2Splat:
- case kArm64I64x2ExtractLane:
- case kArm64I64x2ReplaceLane:
- case kArm64I64x2Abs:
- case kArm64I64x2Neg:
+ case kArm64IExtractLane:
+ case kArm64IReplaceLane:
+ case kArm64ISplat:
+ case kArm64IAbs:
+ case kArm64INeg:
+ case kArm64Mla:
+ case kArm64Mls:
+ case kArm64RoundingAverageU:
case kArm64I64x2Shl:
case kArm64I64x2ShrS:
- case kArm64I64x2Add:
- case kArm64I64x2Sub:
+ case kArm64IAdd:
+ case kArm64ISub:
case kArm64I64x2Mul:
- case kArm64I64x2Eq:
- case kArm64I64x2Ne:
- case kArm64I64x2GtS:
- case kArm64I64x2GeS:
+ case kArm64IEq:
+ case kArm64INe:
+ case kArm64IGtS:
+ case kArm64IGeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
- case kArm64I32x4Splat:
- case kArm64I32x4ExtractLane:
- case kArm64I32x4ReplaceLane:
case kArm64I32x4SConvertF32x4:
case kArm64Sxtl:
case kArm64Sxtl2:
case kArm64Uxtl:
case kArm64Uxtl2:
- case kArm64I32x4Neg:
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
- case kArm64I32x4Add:
- case kArm64I32x4Sub:
case kArm64I32x4Mul:
- case kArm64I32x4Mla:
- case kArm64I32x4Mls:
- case kArm64I32x4MinS:
- case kArm64I32x4MaxS:
- case kArm64I32x4Eq:
- case kArm64I32x4Ne:
- case kArm64I32x4GtS:
- case kArm64I32x4GeS:
+ case kArm64IMinS:
+ case kArm64IMaxS:
case kArm64I32x4UConvertF32x4:
case kArm64I32x4ShrU:
- case kArm64I32x4MinU:
- case kArm64I32x4MaxU:
- case kArm64I32x4GtU:
- case kArm64I32x4GeU:
- case kArm64I32x4Abs:
+ case kArm64IMinU:
+ case kArm64IMaxU:
+ case kArm64IGtU:
+ case kArm64IGeU:
case kArm64I32x4BitMask:
case kArm64I32x4DotI16x8S:
case kArm64I32x4TruncSatF64x2SZero:
case kArm64I32x4TruncSatF64x2UZero:
- case kArm64I16x8Splat:
- case kArm64I16x8ExtractLaneU:
- case kArm64I16x8ExtractLaneS:
- case kArm64I16x8ReplaceLane:
- case kArm64I16x8Neg:
+ case kArm64IExtractLaneU:
+ case kArm64IExtractLaneS:
case kArm64I16x8Shl:
case kArm64I16x8ShrS:
case kArm64I16x8SConvertI32x4:
- case kArm64I16x8Add:
- case kArm64I16x8AddSatS:
- case kArm64I16x8Sub:
- case kArm64I16x8SubSatS:
+ case kArm64IAddSatS:
+ case kArm64ISubSatS:
case kArm64I16x8Mul:
- case kArm64I16x8Mla:
- case kArm64I16x8Mls:
- case kArm64I16x8MinS:
- case kArm64I16x8MaxS:
- case kArm64I16x8Eq:
- case kArm64I16x8Ne:
- case kArm64I16x8GtS:
- case kArm64I16x8GeS:
case kArm64I16x8ShrU:
case kArm64I16x8UConvertI32x4:
- case kArm64I16x8AddSatU:
- case kArm64I16x8SubSatU:
- case kArm64I16x8MinU:
- case kArm64I16x8MaxU:
- case kArm64I16x8GtU:
- case kArm64I16x8GeU:
- case kArm64I16x8RoundingAverageU:
+ case kArm64IAddSatU:
+ case kArm64ISubSatU:
case kArm64I16x8Q15MulRSatS:
- case kArm64I16x8Abs:
case kArm64I16x8BitMask:
- case kArm64I8x16Splat:
- case kArm64I8x16ExtractLaneU:
- case kArm64I8x16ExtractLaneS:
- case kArm64I8x16ReplaceLane:
- case kArm64I8x16Neg:
case kArm64I8x16Shl:
case kArm64I8x16ShrS:
case kArm64I8x16SConvertI16x8:
- case kArm64I8x16Add:
- case kArm64I8x16AddSatS:
- case kArm64I8x16Sub:
- case kArm64I8x16SubSatS:
- case kArm64I8x16Mla:
- case kArm64I8x16Mls:
- case kArm64I8x16MinS:
- case kArm64I8x16MaxS:
- case kArm64I8x16Eq:
- case kArm64I8x16Ne:
- case kArm64I8x16GtS:
- case kArm64I8x16GeS:
case kArm64I8x16UConvertI16x8:
- case kArm64I8x16AddSatU:
- case kArm64I8x16SubSatU:
case kArm64I8x16ShrU:
- case kArm64I8x16MinU:
- case kArm64I8x16MaxU:
- case kArm64I8x16GtU:
- case kArm64I8x16GeU:
- case kArm64I8x16RoundingAverageU:
- case kArm64I8x16Abs:
case kArm64I8x16BitMask:
case kArm64S128Const:
case kArm64S128Zero:
@@ -319,6 +254,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S128Not:
case kArm64S128Select:
case kArm64S128AndNot:
+ case kArm64Ssra:
+ case kArm64Usra:
case kArm64S32x4ZipLeft:
case kArm64S32x4ZipRight:
case kArm64S32x4UnzipLeft:
@@ -373,6 +310,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
+ case kArm64LdarDecompressTaggedSigned:
+ case kArm64LdarDecompressTaggedPointer:
+ case kArm64LdarDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
case kArm64LoadLane:
@@ -395,48 +335,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64StlrCompressTagged:
case kArm64DmbIsh:
case kArm64DsbIsb:
case kArm64StoreLane:
return kHasSideEffect;
- case kArm64Word64AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint32:
case kArm64Word64AtomicLoadUint64:
return kIsLoadOperation;
- case kArm64Word64AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord32:
case kArm64Word64AtomicStoreWord64:
- case kArm64Word64AtomicAddUint8:
- case kArm64Word64AtomicAddUint16:
- case kArm64Word64AtomicAddUint32:
case kArm64Word64AtomicAddUint64:
- case kArm64Word64AtomicSubUint8:
- case kArm64Word64AtomicSubUint16:
- case kArm64Word64AtomicSubUint32:
case kArm64Word64AtomicSubUint64:
- case kArm64Word64AtomicAndUint8:
- case kArm64Word64AtomicAndUint16:
- case kArm64Word64AtomicAndUint32:
case kArm64Word64AtomicAndUint64:
- case kArm64Word64AtomicOrUint8:
- case kArm64Word64AtomicOrUint16:
- case kArm64Word64AtomicOrUint32:
case kArm64Word64AtomicOrUint64:
- case kArm64Word64AtomicXorUint8:
- case kArm64Word64AtomicXorUint16:
- case kArm64Word64AtomicXorUint32:
case kArm64Word64AtomicXorUint64:
- case kArm64Word64AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint32:
case kArm64Word64AtomicExchangeUint64:
- case kArm64Word64AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint32:
case kArm64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 6a1a101e35..d102ecabb2 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -190,7 +190,8 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
}
}
-void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRI(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -205,7 +206,8 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
-void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRIR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -845,10 +847,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (node->opcode() == IrOpcode::kProtectedLoad) {
opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
@@ -856,8 +854,6 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode, immediate_mode, rep);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@@ -1441,6 +1437,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
#define RR_OP_LIST(V) \
V(Word64Clz, kArm64Clz) \
V(Word32Clz, kArm64Clz32) \
+ V(Word32Popcnt, kArm64Cnt32) \
+ V(Word64Popcnt, kArm64Cnt64) \
V(Word32ReverseBits, kArm64Rbit32) \
V(Word64ReverseBits, kArm64Rbit) \
V(Word32ReverseBytes, kArm64Rev32) \
@@ -1531,10 +1529,6 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1938,7 +1932,9 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
@@ -2324,9 +2320,6 @@ template <int N>
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
- // Branch poisoning requires flags to be set, so when it's enabled for
- // a particular branch, we shouldn't be applying the cbz/tbz optimization.
- DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -2414,7 +2407,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
- if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
+ if (opcode == kArm64Cmp) {
Int64Matcher m(right);
if (m.HasResolvedValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
@@ -2432,19 +2425,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (!cont->IsPoisoned()) {
- if (m.right().HasResolvedValue()) {
- if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
- m.right().ResolvedValue(), node, cond, cont)) {
- return;
- }
- } else if (m.left().HasResolvedValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
- m.left().ResolvedValue(), node, commuted_cond,
- cont)) {
- return;
- }
+ if (m.right().HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
+ m.right().ResolvedValue(), node, cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasResolvedValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
+ m.left().ResolvedValue(), node, commuted_cond,
+ cont)) {
+ return;
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2533,8 +2524,7 @@ struct TestAndBranchMatcher {
Matcher matcher_;
void Initialize() {
- if (cont_->IsBranch() && !cont_->IsPoisoned() &&
- matcher_.right().HasResolvedValue() &&
+ if (cont_->IsBranch() && matcher_.right().HasResolvedValue() &&
base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
@@ -2583,7 +2573,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2592,13 +2582,14 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2609,40 +2600,149 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(new_value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDAR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kArm64Word64AtomicLoadUint64;
+ break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ code = kArm64LdarDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ code = kArm64LdarDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ code = kArm64LdarDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ code = kArm64Word64AtomicLoadUint64;
+ } else {
+ code = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ code = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |=
+ AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+
+ // The memory order is ignored as both release and sequentially consistent
+ // stores can emit STLR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kArm64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kArm64StlrCompressTagged;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK_EQ(width, AtomicWidth::kWord32);
+ code = kArm64StlrCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+ }
+
+ code |= AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
temps);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2653,7 +2753,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
@@ -2842,7 +2943,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Branch could not be combined with a compare, compare against 0 and branch.
- if (!cont->IsPoisoned() && cont->IsBranch()) {
+ if (cont->IsBranch()) {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
@@ -3196,159 +3297,91 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -3369,15 +3402,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3402,14 +3434,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \
- kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kArm64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3426,44 +3458,22 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-#define SIMD_TYPE_LIST(V) \
- V(F64x2) \
- V(F32x4) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
-
#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kArm64F64x2Abs) \
- V(F64x2Neg, kArm64F64x2Neg) \
- V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4Abs, kArm64F32x4Abs) \
- V(F32x4Neg, kArm64F32x4Neg) \
- V(F32x4Sqrt, kArm64F32x4Sqrt) \
V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
- V(I64x2Abs, kArm64I64x2Abs) \
- V(I64x2Neg, kArm64I64x2Neg) \
V(I64x2BitMask, kArm64I64x2BitMask) \
V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4Neg, kArm64I32x4Neg) \
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4Abs, kArm64I32x4Abs) \
V(I32x4BitMask, kArm64I32x4BitMask) \
V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
- V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8Abs, kArm64I16x8Abs) \
V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16Neg, kArm64I8x16Neg) \
- V(I8x16Abs, kArm64I8x16Abs) \
V(I8x16BitMask, kArm64I8x16BitMask) \
V(S128Not, kArm64S128Not) \
V(V128AnyTrue, kArm64V128AnyTrue) \
@@ -3472,6 +3482,28 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8AllTrue, kArm64I16x8AllTrue) \
V(I8x16AllTrue, kArm64I8x16AllTrue)
+#define SIMD_UNOP_LANE_SIZE_LIST(V) \
+ V(F64x2Splat, kArm64FSplat, 64) \
+ V(F64x2Abs, kArm64FAbs, 64) \
+ V(F64x2Sqrt, kArm64FSqrt, 64) \
+ V(F64x2Neg, kArm64FNeg, 64) \
+ V(F32x4Splat, kArm64FSplat, 32) \
+ V(F32x4Abs, kArm64FAbs, 32) \
+ V(F32x4Sqrt, kArm64FSqrt, 32) \
+ V(F32x4Neg, kArm64FNeg, 32) \
+ V(I64x2Splat, kArm64ISplat, 64) \
+ V(I64x2Abs, kArm64IAbs, 64) \
+ V(I64x2Neg, kArm64INeg, 64) \
+ V(I32x4Splat, kArm64ISplat, 32) \
+ V(I32x4Abs, kArm64IAbs, 32) \
+ V(I32x4Neg, kArm64INeg, 32) \
+ V(I16x8Splat, kArm64ISplat, 16) \
+ V(I16x8Abs, kArm64IAbs, 16) \
+ V(I16x8Neg, kArm64INeg, 16) \
+ V(I8x16Splat, kArm64ISplat, 8) \
+ V(I8x16Abs, kArm64IAbs, 8) \
+ V(I8x16Neg, kArm64INeg, 8)
+
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
V(I64x2ShrS, 64) \
@@ -3487,85 +3519,85 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, kArm64F64x2Add) \
- V(F64x2Sub, kArm64F64x2Sub) \
- V(F64x2Div, kArm64F64x2Div) \
- V(F64x2Min, kArm64F64x2Min) \
- V(F64x2Max, kArm64F64x2Max) \
- V(F64x2Eq, kArm64F64x2Eq) \
- V(F64x2Ne, kArm64F64x2Ne) \
- V(F64x2Lt, kArm64F64x2Lt) \
- V(F64x2Le, kArm64F64x2Le) \
- V(F32x4Add, kArm64F32x4Add) \
- V(F32x4Sub, kArm64F32x4Sub) \
- V(F32x4Div, kArm64F32x4Div) \
- V(F32x4Min, kArm64F32x4Min) \
- V(F32x4Max, kArm64F32x4Max) \
- V(F32x4Eq, kArm64F32x4Eq) \
- V(F32x4Ne, kArm64F32x4Ne) \
- V(F32x4Lt, kArm64F32x4Lt) \
- V(F32x4Le, kArm64F32x4Le) \
- V(I64x2Add, kArm64I64x2Add) \
- V(I64x2Sub, kArm64I64x2Sub) \
- V(I64x2Eq, kArm64I64x2Eq) \
- V(I64x2Ne, kArm64I64x2Ne) \
- V(I64x2GtS, kArm64I64x2GtS) \
- V(I64x2GeS, kArm64I64x2GeS) \
V(I32x4Mul, kArm64I32x4Mul) \
- V(I32x4MinS, kArm64I32x4MinS) \
- V(I32x4MaxS, kArm64I32x4MaxS) \
- V(I32x4Eq, kArm64I32x4Eq) \
- V(I32x4Ne, kArm64I32x4Ne) \
- V(I32x4GtS, kArm64I32x4GtS) \
- V(I32x4GeS, kArm64I32x4GeS) \
- V(I32x4MinU, kArm64I32x4MinU) \
- V(I32x4MaxU, kArm64I32x4MaxU) \
- V(I32x4GtU, kArm64I32x4GtU) \
- V(I32x4GeU, kArm64I32x4GeU) \
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8AddSatS, kArm64I16x8AddSatS) \
- V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
- V(I16x8MinS, kArm64I16x8MinS) \
- V(I16x8MaxS, kArm64I16x8MaxS) \
- V(I16x8Eq, kArm64I16x8Eq) \
- V(I16x8Ne, kArm64I16x8Ne) \
- V(I16x8GtS, kArm64I16x8GtS) \
- V(I16x8GeS, kArm64I16x8GeS) \
V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
- V(I16x8AddSatU, kArm64I16x8AddSatU) \
- V(I16x8SubSatU, kArm64I16x8SubSatU) \
- V(I16x8MinU, kArm64I16x8MinU) \
- V(I16x8MaxU, kArm64I16x8MaxU) \
- V(I16x8GtU, kArm64I16x8GtU) \
- V(I16x8GeU, kArm64I16x8GeU) \
- V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
- V(I8x16Add, kArm64I8x16Add) \
- V(I8x16Sub, kArm64I8x16Sub) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16AddSatS, kArm64I8x16AddSatS) \
- V(I8x16SubSatS, kArm64I8x16SubSatS) \
- V(I8x16MinS, kArm64I8x16MinS) \
- V(I8x16MaxS, kArm64I8x16MaxS) \
- V(I8x16Eq, kArm64I8x16Eq) \
- V(I8x16Ne, kArm64I8x16Ne) \
- V(I8x16GtS, kArm64I8x16GtS) \
- V(I8x16GeS, kArm64I8x16GeS) \
V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
- V(I8x16AddSatU, kArm64I8x16AddSatU) \
- V(I8x16SubSatU, kArm64I8x16SubSatU) \
- V(I8x16MinU, kArm64I8x16MinU) \
- V(I8x16MaxU, kArm64I8x16MaxU) \
- V(I8x16GtU, kArm64I8x16GtU) \
- V(I8x16GeU, kArm64I8x16GeU) \
- V(I8x16RoundingAverageU, kArm64I8x16RoundingAverageU) \
V(S128And, kArm64S128And) \
V(S128Or, kArm64S128Or) \
V(S128Xor, kArm64S128Xor) \
V(S128AndNot, kArm64S128AndNot)
+#define SIMD_BINOP_LANE_SIZE_LIST(V) \
+ V(F64x2Min, kArm64FMin, 64) \
+ V(F64x2Max, kArm64FMax, 64) \
+ V(F64x2Add, kArm64FAdd, 64) \
+ V(F64x2Sub, kArm64FSub, 64) \
+ V(F64x2Div, kArm64FDiv, 64) \
+ V(F64x2Eq, kArm64FEq, 64) \
+ V(F64x2Ne, kArm64FNe, 64) \
+ V(F64x2Lt, kArm64FLt, 64) \
+ V(F64x2Le, kArm64FLe, 64) \
+ V(F32x4Min, kArm64FMin, 32) \
+ V(F32x4Max, kArm64FMax, 32) \
+ V(F32x4Add, kArm64FAdd, 32) \
+ V(F32x4Sub, kArm64FSub, 32) \
+ V(F32x4Div, kArm64FDiv, 32) \
+ V(F32x4Eq, kArm64FEq, 32) \
+ V(F32x4Ne, kArm64FNe, 32) \
+ V(F32x4Lt, kArm64FLt, 32) \
+ V(F32x4Le, kArm64FLe, 32) \
+ V(I64x2Sub, kArm64ISub, 64) \
+ V(I64x2Eq, kArm64IEq, 64) \
+ V(I64x2Ne, kArm64INe, 64) \
+ V(I64x2GtS, kArm64IGtS, 64) \
+ V(I64x2GeS, kArm64IGeS, 64) \
+ V(I32x4Eq, kArm64IEq, 32) \
+ V(I32x4Ne, kArm64INe, 32) \
+ V(I32x4GtS, kArm64IGtS, 32) \
+ V(I32x4GeS, kArm64IGeS, 32) \
+ V(I32x4GtU, kArm64IGtU, 32) \
+ V(I32x4GeU, kArm64IGeU, 32) \
+ V(I32x4MinS, kArm64IMinS, 32) \
+ V(I32x4MaxS, kArm64IMaxS, 32) \
+ V(I32x4MinU, kArm64IMinU, 32) \
+ V(I32x4MaxU, kArm64IMaxU, 32) \
+ V(I16x8AddSatS, kArm64IAddSatS, 16) \
+ V(I16x8SubSatS, kArm64ISubSatS, 16) \
+ V(I16x8AddSatU, kArm64IAddSatU, 16) \
+ V(I16x8SubSatU, kArm64ISubSatU, 16) \
+ V(I16x8Eq, kArm64IEq, 16) \
+ V(I16x8Ne, kArm64INe, 16) \
+ V(I16x8GtS, kArm64IGtS, 16) \
+ V(I16x8GeS, kArm64IGeS, 16) \
+ V(I16x8GtU, kArm64IGtU, 16) \
+ V(I16x8GeU, kArm64IGeU, 16) \
+ V(I16x8RoundingAverageU, kArm64RoundingAverageU, 16) \
+ V(I8x16RoundingAverageU, kArm64RoundingAverageU, 8) \
+ V(I16x8MinS, kArm64IMinS, 16) \
+ V(I16x8MaxS, kArm64IMaxS, 16) \
+ V(I16x8MinU, kArm64IMinU, 16) \
+ V(I16x8MaxU, kArm64IMaxU, 16) \
+ V(I8x16Sub, kArm64ISub, 8) \
+ V(I8x16AddSatS, kArm64IAddSatS, 8) \
+ V(I8x16SubSatS, kArm64ISubSatS, 8) \
+ V(I8x16AddSatU, kArm64IAddSatU, 8) \
+ V(I8x16SubSatU, kArm64ISubSatU, 8) \
+ V(I8x16Eq, kArm64IEq, 8) \
+ V(I8x16Ne, kArm64INe, 8) \
+ V(I8x16GtS, kArm64IGtS, 8) \
+ V(I8x16GeS, kArm64IGeS, 8) \
+ V(I8x16GtU, kArm64IGtU, 8) \
+ V(I8x16GeU, kArm64IGeU, 8) \
+ V(I8x16MinS, kArm64IMinS, 8) \
+ V(I8x16MaxS, kArm64IMaxS, 8) \
+ V(I8x16MinU, kArm64IMinU, 8) \
+ V(I8x16MaxU, kArm64IMaxU, 8)
+
void InstructionSelector::VisitS128Const(Node* node) {
Arm64OperandGenerator g(this);
static const int kUint32Immediates = 4;
@@ -3589,34 +3621,34 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kArm64S128Zero, g.DefineAsRegister(node));
}
-#define SIMD_VISIT_SPLAT(Type) \
- void InstructionSelector::Visit##Type##Splat(Node* node) { \
- VisitRR(this, kArm64##Type##Splat, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
-#undef SIMD_VISIT_SPLAT
-
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- VisitRRI(this, kArm64##Type##ExtractLane##Sign, node); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, )
-SIMD_VISIT_EXTRACT_LANE(F32x4, )
-SIMD_VISIT_EXTRACT_LANE(I64x2, )
-SIMD_VISIT_EXTRACT_LANE(I32x4, )
-SIMD_VISIT_EXTRACT_LANE(I16x8, U)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, \
+ kArm64##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, F, , 64)
+SIMD_VISIT_EXTRACT_LANE(F32x4, F, , 32)
+SIMD_VISIT_EXTRACT_LANE(I64x2, I, , 64)
+SIMD_VISIT_EXTRACT_LANE(I32x4, I, , 32)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, U, 16)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, S, 16)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, U, 8)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, S, 8)
#undef SIMD_VISIT_EXTRACT_LANE
-#define SIMD_VISIT_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- VisitRRIR(this, kArm64##Type##ReplaceLane, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm64##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_REPLACE_LANE(F64x2, F, 64)
+SIMD_VISIT_REPLACE_LANE(F32x4, F, 32)
+SIMD_VISIT_REPLACE_LANE(I64x2, I, 64)
+SIMD_VISIT_REPLACE_LANE(I32x4, I, 32)
+SIMD_VISIT_REPLACE_LANE(I16x8, I, 16)
+SIMD_VISIT_REPLACE_LANE(I8x16, I, 8)
#undef SIMD_VISIT_REPLACE_LANE
-#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -3642,6 +3674,22 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+#define SIMD_VISIT_BINOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_BINOP_LANE_SIZE_LIST(SIMD_VISIT_BINOP_LANE_SIZE)
+#undef SIMD_VISIT_BINOP_LANE_SIZE
+#undef SIMD_BINOP_LANE_SIZE_LIST
+
+#define SIMD_VISIT_UNOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_UNOP_LANE_SIZE_LIST(SIMD_VISIT_UNOP_LANE_SIZE)
+#undef SIMD_VISIT_UNOP_LANE_SIZE
+#undef SIMD_UNOP_LANE_SIZE_LIST
+
using ShuffleMatcher =
ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher>;
@@ -3702,22 +3750,22 @@ MulWithDupResult TryMatchMulWithDup(Node* node) {
void InstructionSelector::VisitF32x4Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<4>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F32x4MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(32),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F32x4Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(32), node);
}
}
void InstructionSelector::VisitF64x2Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<2>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F64x2MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(64),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F64x2Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(64), node);
}
}
@@ -3729,84 +3777,178 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
arraysize(temps), temps);
}
-#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
- void InstructionSelector::Visit##Type##Add(Node* node) { \
- Arm64OperandGenerator g(this); \
- Node* left = node->InputAt(0); \
- Node* right = node->InputAt(1); \
- /* Select Mla(z, x, y) for Add(Mul(x, y), z). */ \
- if (left->opcode() == IrOpcode::k##Type##Mul && CanCover(node, left)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0)), g.UseRegister(left->InputAt(1))); \
- return; \
- } \
- /* Select Mla(z, x, y) for Add(z, Mul(x, y)). */ \
- if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0)), \
- g.UseRegister(right->InputAt(1))); \
- return; \
- } \
- /* Select Sadalp(x, y) for Add(x, ExtAddPairwiseS(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, right)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Sadalp(y, x) for Add(ExtAddPairwiseS(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, left)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(x, y) for Add(x, ExtAddPairwiseU(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, right)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(y, x) for Add(ExtAddPairwiseU(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, left)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- VisitRRR(this, kArm64##Type##Add, node); \
+namespace {
+
+// Used for pattern matching SIMD Add operations where one of the inputs matches
+// |opcode| and ensure that the matched input is on the LHS (input 0).
+struct SimdAddOpMatcher : public NodeMatcher {
+ explicit SimdAddOpMatcher(Node* node, IrOpcode::Value opcode)
+ : NodeMatcher(node),
+ opcode_(opcode),
+ left_(InputAt(0)),
+ right_(InputAt(1)) {
+ DCHECK(HasProperty(Operator::kCommutative));
+ PutOpOnLeft();
+ }
+
+ bool Matches() { return left_->opcode() == opcode_; }
+ Node* left() const { return left_; }
+ Node* right() const { return right_; }
+
+ private:
+ void PutOpOnLeft() {
+ if (right_->opcode() == opcode_) {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left_);
+ node()->ReplaceInput(1, right_);
+ }
+ }
+ IrOpcode::Value opcode_;
+ Node* left_;
+ Node* right_;
+};
+
+bool ShraHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode shra_code, InstructionCode add_code,
+ IrOpcode::Value shift_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, shift_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ if (!g.IsIntegerConstant(m.left()->InputAt(1))) return false;
+
+ // If shifting by zero, just do the addition
+ if (g.GetIntegerConstantValue(m.left()->InputAt(1)) % lane_size == 0) {
+ selector->Emit(add_code, g.DefineAsRegister(node),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.right()));
+ } else {
+ selector->Emit(shra_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseImmediate(m.left()->InputAt(1)));
+ }
+ return true;
+}
+
+bool AdalpHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode adalp_code, IrOpcode::Value ext_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(adalp_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)));
+ return true;
+}
+
+bool MlaHelper(InstructionSelector* selector, Node* node,
+ InstructionCode mla_code, IrOpcode::Value mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(mla_code, g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+bool SmlalHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode smlal_code, IrOpcode::Value ext_mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+
+ selector->Emit(smlal_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI64x2Add(Node* node) {
+ if (!ShraHelper(this, node, 64, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrS) &&
+ !ShraHelper(this, node, 64, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(64), node);
+ }
+}
+
+void InstructionSelector::VisitI8x16Add(Node* node) {
+ if (!ShraHelper(this, node, 8, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrS) &&
+ !ShraHelper(this, node, 8, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(8), node);
+ }
+}
+
+#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
+ void InstructionSelector::Visit##Type##Add(Node* node) { \
+ /* Select Mla(z, x, y) for Add(x, Mul(y, z)). */ \
+ if (MlaHelper(this, node, kArm64Mla | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##Mul)) { \
+ return; \
+ } \
+ /* Select S/Uadalp(x, y) for Add(x, ExtAddPairwise(y)). */ \
+ if (AdalpHelper(this, node, LaneSize, kArm64Sadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S) || \
+ AdalpHelper(this, node, LaneSize, kArm64Uadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U)) { \
+ return; \
+ } \
+ /* Select S/Usra(x, y) for Add(x, ShiftRight(y, imm)). */ \
+ if (ShraHelper(this, node, LaneSize, kArm64Ssra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrS) || \
+ ShraHelper(this, node, LaneSize, kArm64Usra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrU)) { \
+ return; \
+ } \
+ /* Select Smlal/Umlal(x, y, z) for Add(x, ExtMulLow(y, z)) and \
+ * Smlal2/Umlal2(x, y, z) for Add(x, ExtMulHigh(y, z)). */ \
+ if (SmlalHelper(this, node, LaneSize, kArm64Smlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Smlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##U) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##U)) { \
+ return; \
+ } \
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(LaneSize), node); \
}
VISIT_SIMD_ADD(I32x4, I16x8, 32)
VISIT_SIMD_ADD(I16x8, I8x16, 16)
#undef VISIT_SIMD_ADD
-#define VISIT_SIMD_SUB(Type) \
+#define VISIT_SIMD_SUB(Type, LaneSize) \
void InstructionSelector::Visit##Type##Sub(Node* node) { \
Arm64OperandGenerator g(this); \
Node* left = node->InputAt(0); \
Node* right = node->InputAt(1); \
/* Select Mls(z, x, y) for Sub(z, Mul(x, y)). */ \
if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mls, g.DefineSameAsFirst(node), g.UseRegister(left), \
+ Emit(kArm64Mls | LaneSizeField::encode(LaneSize), \
+ g.DefineSameAsFirst(node), g.UseRegister(left), \
g.UseRegister(right->InputAt(0)), \
g.UseRegister(right->InputAt(1))); \
return; \
} \
- VisitRRR(this, kArm64##Type##Sub, node); \
+ VisitRRR(this, kArm64ISub | LaneSizeField::encode(LaneSize), node); \
}
-VISIT_SIMD_SUB(I32x4)
-VISIT_SIMD_SUB(I16x8)
+VISIT_SIMD_SUB(I32x4, 32)
+VISIT_SIMD_SUB(I16x8, 16)
#undef VISIT_SIMD_SUB
void InstructionSelector::VisitS128Select(Node* node) {
@@ -4110,6 +4252,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat32RoundTiesEven |
MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 9e378b8458..ad5e18d002 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -41,14 +41,16 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name)
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -80,7 +82,6 @@ CodeGenerator::CodeGenerator(
codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
protected_instructions_(codegen_zone),
result_(kSuccess),
- poisoning_level_(poisoning_level),
block_starts_(codegen_zone),
instr_starts_(codegen_zone),
debug_name_(debug_name) {
@@ -284,9 +285,6 @@ void CodeGenerator::AssembleCode() {
BailoutIfDeoptimized();
}
- offsets_info_.init_poison = tasm()->pc_offset();
- InitializeSpeculationPoison();
-
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
@@ -355,8 +353,6 @@ void CodeGenerator::AssembleCode() {
tasm()->bind(GetLabel(current_block_));
- TryInsertBranchPoisoning(block);
-
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -494,37 +490,6 @@ void CodeGenerator::AssembleCode() {
result_ = kSuccess;
}
-void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
- // See if our predecessor was a basic block terminated by a branch_and_poison
- // instruction. If yes, then perform the masking based on the flags.
- if (block->PredecessorCount() != 1) return;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return;
- Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- switch (mode) {
- case kFlags_branch_and_poison: {
- BranchInfo branch;
- RpoNumber target = ComputeBranchInfo(&branch, instr);
- if (!target.IsValid()) {
- // Non-trivial branch, add the masking code.
- FlagsCondition condition = branch.condition;
- if (branch.false_label == GetLabel(block->rpo_number())) {
- condition = NegateFlagsCondition(condition);
- }
- AssembleBranchPoisoning(condition, instr);
- }
- break;
- }
- case kFlags_deoptimize_and_poison: {
- UNREACHABLE();
- }
- default:
- break;
- }
-}
-
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
@@ -839,8 +804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
- case kFlags_branch:
- case kFlags_branch_and_poison: {
+ case kFlags_branch: {
BranchInfo branch;
RpoNumber target = ComputeBranchInfo(&branch, instr);
if (target.IsValid()) {
@@ -854,8 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBranch(instr, &branch);
break;
}
- case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison: {
+ case kFlags_deoptimize: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset =
@@ -864,17 +827,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
- Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
- branch.false_label = &continue_label;
+ branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(&continue_label);
- if (mode == kFlags_deoptimize_and_poison) {
- AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
- }
tasm()->bind(exit->continue_label());
break;
}
@@ -890,21 +848,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
case kFlags_trap: {
#if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(instr, condition);
+ break;
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
- break;
}
case kFlags_none: {
break;
}
}
- // TODO(jarin) We should thread the flag through rather than set it.
- if (instr->IsCall()) {
- ResetSpeculationPoison();
- }
-
return kSuccess;
}
@@ -1087,9 +1040,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 2 - after
- // the code address and the poison-alias index.
- size_t frame_state_offset = 2;
+ // If the frame state is present, it starts at argument 1 - after
+ // the code address.
+ size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint();
@@ -1428,29 +1381,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
OutputFrameStateCombine::Ignore());
}
-void CodeGenerator::InitializeSpeculationPoison() {
- if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
-
- // Initialize {kSpeculationPoisonRegister} either by comparing the expected
- // with the actual call target, or by unconditionally using {-1} initially.
- // Masking register arguments with it only makes sense in the first case.
- if (info()->called_with_code_start_register()) {
- tasm()->RecordComment("-- Prologue: generate speculation poison --");
- GenerateSpeculationPoisonFromCodeStartRegister();
- if (info()->poison_register_arguments()) {
- AssembleRegisterArgumentPoisoning();
- }
- } else {
- ResetSpeculationPoison();
- }
-}
-
-void CodeGenerator::ResetSpeculationPoison() {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- tasm()->ResetSpeculationPoisonRegister();
- }
-}
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 7ccb09d5ac..18de20f92c 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -103,7 +103,6 @@ class DeoptimizationLiteral {
struct TurbolizerCodeOffsetsInfo {
int code_start_register_check = -1;
int deopt_check = -1;
- int init_poison = -1;
int blocks_start = -1;
int out_of_line_code = -1;
int deoptimization_exits = -1;
@@ -120,14 +119,16 @@ struct TurbolizerInstructionStartInfo {
// Generates native code for a sequence of instructions.
class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
public:
- explicit CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name = nullptr);
+ explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name = nullptr);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -216,17 +217,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
- // Inserts mask update at the beginning of an instruction block if the
- // predecessor blocks ends with a masking branch.
- void TryInsertBranchPoisoning(const InstructionBlock* block);
-
- // Initializes the masking register in the prologue of a function.
- void InitializeSpeculationPoison();
- // Reset the masking register during execution of a function.
- void ResetSpeculationPoison();
- // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
- void GenerateSpeculationPoisonFromCodeStartRegister();
-
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(int instruction_index,
const InstructionBlock* block);
@@ -276,18 +266,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// contains the expected pointer to the start of the instruction stream.
void AssembleCodeStartRegisterCheck();
- void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
-
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
- // Generates code to poison the stack pointer and implicit register arguments
- // like the context register and the function register.
- void AssembleRegisterArgumentPoisoning();
-
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -484,7 +468,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
- PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
TurbolizerCodeOffsetsInfo offsets_info_;
ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 5db3f20fa4..e03f934ba5 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -5,6 +5,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/cpu-features.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/macro-assembler.h"
@@ -684,16 +685,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ bind(&skip);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -712,11 +703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -738,19 +725,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ call(wasm_code, constant.rmode());
- }
+ __ call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -762,12 +740,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Address wasm_code = static_cast<Address>(constant.ToInt32());
__ jmp(wasm_code, constant.rmode());
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -784,11 +757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -800,11 +769,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -993,7 +958,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall thrugh.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1005,7 +971,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ mov(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ mov(operand, value);
+ } else {
+ __ mov(scratch0, value);
+ __ xchg(scratch0, operand);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1278,9 +1249,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Bswap:
__ bswap(i.OutputRegister());
break;
- case kArchWordPoisonOnSpeculation:
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
case kIA32MFence:
__ mfence();
break;
@@ -1290,40 +1258,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Add:
- __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Sub:
- __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Mul:
- __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Div:
- __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
@@ -1334,21 +1271,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat64Add:
- __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Sub:
- __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Mul:
- __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Div:
- __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
case kSSEFloat32Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
@@ -1488,22 +1410,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(esp, tmp);
break;
}
- case kSSEFloat64Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat64Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
@@ -1571,94 +1477,106 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Add: {
+ __ Addss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Sub: {
+ __ Subss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Mul: {
+ __ Mulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Div: {
+ __ Divss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Add: {
+ __ Addsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Sub: {
+ __ Subsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Mul: {
+ __ Mulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Div: {
+ __ Divsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat32Abs: {
+ case kFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, byte{33});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat32Neg: {
+ case kFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, byte{31});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat64Abs: {
+ case kFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, byte{1});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat64Neg: {
+ case kFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, byte{63});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
case kSSEFloat64SilenceNaN:
@@ -2374,48 +2292,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vandnps(dst, dst, kScratchDoubleReg);
break;
}
- case kSSEF32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Eq: {
+ __ Cmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32F32x4Ne: {
+ __ Cmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEF32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Lt: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Lt: {
+ __ Cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Le: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32F32x4Le: {
+ __ Cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32F32x4Pmin: {
@@ -2445,20 +2339,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4SConvertF32x4: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // NAN->0
- __ Cmpeqps(kScratchDoubleReg, src, src);
- __ Pand(dst, src, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
break;
}
case kIA32I32x4SConvertI16x8Low: {
@@ -2490,117 +2373,63 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrad, 5);
break;
}
- case kSSEI32x4Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Add: {
+ __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4Sub: {
+ __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI32x4Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Mul: {
+ __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4MinS: {
+ __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmulld(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MaxS: {
+ __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Eq: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Ne: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
break;
}
- case kSSEI32x4MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4GtS: {
+ __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
- break;
- }
- case kAVXI32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg);
- break;
- }
- case kSSEI32x4GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4GeS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32I32x4GeS: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(1);
- __ pminsd(dst, src);
- __ pcmpeqd(dst, src);
- break;
- }
- case kAVXI32x4GeS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = i.InputOperand(1);
- __ vpminsd(kScratchDoubleReg, src1, src2);
- __ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ XMMRegister src2 = i.InputSimd128Register(1);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminsd(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqd(dst, kScratchDoubleReg, src2);
+ } else {
+ DCHECK_EQ(dst, src1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pminsd(dst, src2);
+ __ pcmpeqd(dst, src2);
+ }
break;
}
case kSSEI32x4UConvertF32x4: {
@@ -2671,28 +2500,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrld, 5);
break;
}
- case kSSEI32x4MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminud(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxud(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MinU: {
+ __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4MaxU: {
+ __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI32x4GtU: {
@@ -2748,10 +2563,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputRegister(0));
+ } else {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputOperand(0));
+ }
break;
}
case kIA32I16x8ExtractLaneS: {
@@ -2789,105 +2605,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psraw, 4);
break;
}
- case kSSEI16x8SConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8SConvertI32x4: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpackssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8SConvertI32x4: {
+ __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Add: {
+ __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8AddSatS: {
+ __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8AddSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Sub: {
+ __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatS: {
+ __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmullw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pminsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Mul: {
+ __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MinS: {
+ __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmaxsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MaxS: {
+ __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8Eq: {
+ __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI16x8Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2904,15 +2666,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSEI16x8GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8GtS: {
+ __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GeS: {
@@ -2944,63 +2700,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
break;
}
- case kSSEI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackusdw(dst, dst, i.InputSimd128Register(1));
- break;
- }
- case kSSEI16x8AddSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8UConvertI32x4: {
+ __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kSSEI16x8SubSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8AddSatU: {
+ __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatU: {
+ __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MinU: {
+ __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MaxU: {
+ __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GtU: {
@@ -3060,10 +2782,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I8x16Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputRegister(0),
+ kScratchDoubleReg);
+ } else {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputOperand(0),
+ kScratchDoubleReg);
+ }
break;
}
case kIA32I8x16ExtractLaneS: {
@@ -3137,15 +2862,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ S128Store32Lane(operand, i.InputSimd128Register(index), laneidx);
break;
}
- case kSSEI8x16SConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packsswb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16SConvertI16x8: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpacksswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16SConvertI16x8: {
+ __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32I8x16Neg: {
@@ -3162,64 +2881,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Take shift value modulo 8.
- __ mov(tmp, i.InputRegister(1));
- __ and_(tmp, 7);
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): sub here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ sub(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psllw(dst, dst, tmp_simd);
+ XMMRegister tmp_simd = i.TempSimd128Register(1);
+ __ I8x16Shl(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ tmp_simd);
}
break;
}
case kIA32I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), i.TempRegister(0),
+ kScratchDoubleReg, i.TempSimd128Register(1));
}
break;
}
@@ -3296,18 +2980,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kSSEI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- __ packuswb(dst, i.InputOperand(1));
- break;
- }
- case kAVXI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackuswb(dst, dst, i.InputOperand(1));
+ case kIA32I8x16UConvertI16x8: {
+ __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32I8x16AddSatU: {
@@ -3322,34 +2997,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Unpack the bytes into words, do logical shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ i.TempSimd128Register(1));
}
+
break;
}
case kIA32I8x16MinU: {
@@ -3444,37 +3102,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSES128And: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ andps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128And: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSES128Or: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ orps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128Or: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32S128And: {
+ __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSES128Xor: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ xorps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32S128Or: {
+ __ Por(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXS128Xor: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32S128Xor: {
+ __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32S128Select: {
@@ -3541,20 +3181,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32S128Load8Splat: {
- __ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load16Splat: {
- __ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- uint8_t{0});
- __ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load32Splat: {
- __ Vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kIA32S128Load64Splat: {
@@ -3640,10 +3277,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3671,10 +3308,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3937,17 +3574,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairLoad: {
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ movq(tmp, i.MemoryOperand());
- __ Pextrd(i.OutputRegister(0), tmp, 0);
- __ Pextrd(i.OutputRegister(1), tmp, 1);
+ __ movq(kScratchDoubleReg, i.MemoryOperand());
+ __ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
+ __ Pextrd(i.OutputRegister(1), kScratchDoubleReg, 1);
break;
}
- case kIA32Word32AtomicPairStore: {
+ case kIA32Word32ReleasePairStore: {
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(1));
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(3);
+ __ movq(kScratchDoubleReg, MemOperand(esp, 0));
+ __ pop(ebx);
+ __ pop(ebx);
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-3);
+ __ movq(i.MemoryOperand(2), kScratchDoubleReg);
+ break;
+ }
+ case kIA32Word32SeqCstPairStore: {
Label store;
__ bind(&store);
- __ mov(i.TempRegister(0), i.MemoryOperand(2));
- __ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ mov(eax, i.MemoryOperand(2));
+ __ mov(edx, i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
@@ -3958,27 +3609,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &store);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
@@ -3998,31 +3649,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &exchange);
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
@@ -4038,27 +3689,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
+ case kAtomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint8: { \
+ case kAtomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Int16: { \
+ case kAtomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint16: { \
+ case kAtomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Word32: { \
+ case kAtomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
@@ -4107,16 +3758,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &binop);
break;
}
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
- break;
}
return kSuccess;
}
@@ -4126,41 +3776,29 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnorderedEqual:
case kEqual:
return equal;
- break;
case kUnorderedNotEqual:
case kNotEqual:
return not_equal;
- break;
case kSignedLessThan:
return less;
- break;
case kSignedGreaterThanOrEqual:
return greater_equal;
- break;
case kSignedLessThanOrEqual:
return less_equal;
- break;
case kSignedGreaterThan:
return greater;
- break;
case kUnsignedLessThan:
return below;
- break;
case kUnsignedGreaterThanOrEqual:
return above_equal;
- break;
case kUnsignedLessThanOrEqual:
return below_equal;
- break;
case kUnsignedGreaterThan:
return above;
- break;
case kOverflow:
return overflow;
- break;
case kNotOverflow:
return no_overflow;
- break;
default:
UNREACHABLE();
}
@@ -4183,12 +3821,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4648,18 +4280,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 42af3326f3..bb54c726aa 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -48,26 +48,14 @@ namespace compiler {
V(IA32MFence) \
V(IA32LFence) \
V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat32Max) \
V(SSEFloat64Max) \
V(SSEFloat32Min) \
V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSEFloat32ToFloat64) \
@@ -86,18 +74,18 @@ namespace compiler {
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Abs) \
+ V(Float64Neg) \
+ V(Float32Abs) \
+ V(Float32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
@@ -177,14 +165,10 @@ namespace compiler {
V(AVXF32x4Min) \
V(SSEF32x4Max) \
V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
+ V(IA32F32x4Eq) \
+ V(IA32F32x4Ne) \
+ V(IA32F32x4Lt) \
+ V(IA32F32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
@@ -197,33 +181,22 @@ namespace compiler {
V(IA32I32x4Neg) \
V(IA32I32x4Shl) \
V(IA32I32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
+ V(IA32I32x4Add) \
+ V(IA32I32x4Sub) \
+ V(IA32I32x4Mul) \
+ V(IA32I32x4MinS) \
+ V(IA32I32x4MaxS) \
+ V(IA32I32x4Eq) \
+ V(IA32I32x4Ne) \
+ V(IA32I32x4GtS) \
+ V(IA32I32x4GeS) \
V(SSEI32x4UConvertF32x4) \
V(AVXI32x4UConvertF32x4) \
V(IA32I32x4UConvertI16x8Low) \
V(IA32I32x4UConvertI16x8High) \
V(IA32I32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
+ V(IA32I32x4MinU) \
+ V(IA32I32x4MaxU) \
V(SSEI32x4GtU) \
V(AVXI32x4GtU) \
V(SSEI32x4GeU) \
@@ -246,43 +219,28 @@ namespace compiler {
V(IA32I16x8Neg) \
V(IA32I16x8Shl) \
V(IA32I16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSatS) \
- V(AVXI16x8AddSatS) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSatS) \
- V(AVXI16x8SubSatS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
+ V(IA32I16x8SConvertI32x4) \
+ V(IA32I16x8Add) \
+ V(IA32I16x8AddSatS) \
+ V(IA32I16x8Sub) \
+ V(IA32I16x8SubSatS) \
+ V(IA32I16x8Mul) \
+ V(IA32I16x8MinS) \
+ V(IA32I16x8MaxS) \
+ V(IA32I16x8Eq) \
V(SSEI16x8Ne) \
V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
+ V(IA32I16x8GtS) \
V(SSEI16x8GeS) \
V(AVXI16x8GeS) \
V(IA32I16x8UConvertI8x16Low) \
V(IA32I16x8UConvertI8x16High) \
V(IA32I16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSatU) \
- V(AVXI16x8AddSatU) \
- V(SSEI16x8SubSatU) \
- V(AVXI16x8SubSatU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
+ V(IA32I16x8UConvertI32x4) \
+ V(IA32I16x8AddSatU) \
+ V(IA32I16x8SubSatU) \
+ V(IA32I16x8MinU) \
+ V(IA32I16x8MaxU) \
V(SSEI16x8GtU) \
V(AVXI16x8GtU) \
V(SSEI16x8GeU) \
@@ -305,8 +263,7 @@ namespace compiler {
V(IA32Pextrb) \
V(IA32Pextrw) \
V(IA32S128Store32Lane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
+ V(IA32I8x16SConvertI16x8) \
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
@@ -322,8 +279,7 @@ namespace compiler {
V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
+ V(IA32I8x16UConvertI16x8) \
V(IA32I8x16AddSatU) \
V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
@@ -341,12 +297,9 @@ namespace compiler {
V(IA32S128Zero) \
V(IA32S128AllOnes) \
V(IA32S128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
+ V(IA32S128And) \
+ V(IA32S128Or) \
+ V(IA32S128Xor) \
V(IA32S128Select) \
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
@@ -402,7 +355,8 @@ namespace compiler {
V(IA32I16x8AllTrue) \
V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
- V(IA32Word32AtomicPairStore) \
+ V(IA32Word32ReleasePairStore) \
+ V(IA32Word32SeqCstPairStore) \
V(IA32Word32AtomicPairAdd) \
V(IA32Word32AtomicPairSub) \
V(IA32Word32AtomicPairAnd) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 278e7ea99b..3910d45195 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -49,26 +49,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Bswap:
case kIA32Lea:
case kSSEFloat32Cmp:
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Mul:
- case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat64Cmp:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
- case kSSEFloat64Mul:
- case kSSEFloat64Div:
case kSSEFloat64Mod:
case kSSEFloat32Max:
case kSSEFloat64Max:
case kSSEFloat32Min:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32ToFloat64:
@@ -87,18 +75,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
case kSSEFloat64SilenceNaN:
- case kAVXFloat32Add:
- case kAVXFloat32Sub:
- case kAVXFloat32Mul:
- case kAVXFloat32Div:
- case kAVXFloat64Add:
- case kAVXFloat64Sub:
- case kAVXFloat64Mul:
- case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Mul:
+ case kFloat32Div:
+ case kFloat64Mul:
+ case kFloat64Div:
+ case kFloat64Abs:
+ case kFloat64Neg:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
case kIA32F64x2Splat:
@@ -162,14 +150,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Min:
case kSSEF32x4Max:
case kAVXF32x4Max:
- case kSSEF32x4Eq:
- case kAVXF32x4Eq:
- case kSSEF32x4Ne:
- case kAVXF32x4Ne:
- case kSSEF32x4Lt:
- case kAVXF32x4Lt:
- case kSSEF32x4Le:
- case kAVXF32x4Le:
+ case kIA32F32x4Eq:
+ case kIA32F32x4Ne:
+ case kIA32F32x4Lt:
+ case kIA32F32x4Le:
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
@@ -182,33 +166,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Neg:
case kIA32I32x4Shl:
case kIA32I32x4ShrS:
- case kSSEI32x4Add:
- case kAVXI32x4Add:
- case kSSEI32x4Sub:
- case kAVXI32x4Sub:
- case kSSEI32x4Mul:
- case kAVXI32x4Mul:
- case kSSEI32x4MinS:
- case kAVXI32x4MinS:
- case kSSEI32x4MaxS:
- case kAVXI32x4MaxS:
- case kSSEI32x4Eq:
- case kAVXI32x4Eq:
- case kSSEI32x4Ne:
- case kAVXI32x4Ne:
- case kSSEI32x4GtS:
- case kAVXI32x4GtS:
- case kSSEI32x4GeS:
- case kAVXI32x4GeS:
+ case kIA32I32x4Add:
+ case kIA32I32x4Sub:
+ case kIA32I32x4Mul:
+ case kIA32I32x4MinS:
+ case kIA32I32x4MaxS:
+ case kIA32I32x4Eq:
+ case kIA32I32x4Ne:
+ case kIA32I32x4GtS:
+ case kIA32I32x4GeS:
case kSSEI32x4UConvertF32x4:
case kAVXI32x4UConvertF32x4:
case kIA32I32x4UConvertI16x8Low:
case kIA32I32x4UConvertI16x8High:
case kIA32I32x4ShrU:
- case kSSEI32x4MinU:
- case kAVXI32x4MinU:
- case kSSEI32x4MaxU:
- case kAVXI32x4MaxU:
+ case kIA32I32x4MinU:
+ case kIA32I32x4MaxU:
case kSSEI32x4GtU:
case kAVXI32x4GtU:
case kSSEI32x4GeU:
@@ -231,43 +204,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8Neg:
case kIA32I16x8Shl:
case kIA32I16x8ShrS:
- case kSSEI16x8SConvertI32x4:
- case kAVXI16x8SConvertI32x4:
- case kSSEI16x8Add:
- case kAVXI16x8Add:
- case kSSEI16x8AddSatS:
- case kAVXI16x8AddSatS:
- case kSSEI16x8Sub:
- case kAVXI16x8Sub:
- case kSSEI16x8SubSatS:
- case kAVXI16x8SubSatS:
- case kSSEI16x8Mul:
- case kAVXI16x8Mul:
- case kSSEI16x8MinS:
- case kAVXI16x8MinS:
- case kSSEI16x8MaxS:
- case kAVXI16x8MaxS:
- case kSSEI16x8Eq:
- case kAVXI16x8Eq:
+ case kIA32I16x8SConvertI32x4:
+ case kIA32I16x8Add:
+ case kIA32I16x8AddSatS:
+ case kIA32I16x8Sub:
+ case kIA32I16x8SubSatS:
+ case kIA32I16x8Mul:
+ case kIA32I16x8MinS:
+ case kIA32I16x8MaxS:
+ case kIA32I16x8Eq:
case kSSEI16x8Ne:
case kAVXI16x8Ne:
- case kSSEI16x8GtS:
- case kAVXI16x8GtS:
+ case kIA32I16x8GtS:
case kSSEI16x8GeS:
case kAVXI16x8GeS:
case kIA32I16x8UConvertI8x16Low:
case kIA32I16x8UConvertI8x16High:
case kIA32I16x8ShrU:
- case kSSEI16x8UConvertI32x4:
- case kAVXI16x8UConvertI32x4:
- case kSSEI16x8AddSatU:
- case kAVXI16x8AddSatU:
- case kSSEI16x8SubSatU:
- case kAVXI16x8SubSatU:
- case kSSEI16x8MinU:
- case kAVXI16x8MinU:
- case kSSEI16x8MaxU:
- case kAVXI16x8MaxU:
+ case kIA32I16x8UConvertI32x4:
+ case kIA32I16x8AddSatU:
+ case kIA32I16x8SubSatU:
+ case kIA32I16x8MinU:
+ case kIA32I16x8MaxU:
case kSSEI16x8GtU:
case kAVXI16x8GtU:
case kSSEI16x8GeU:
@@ -290,8 +248,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Pextrb:
case kIA32Pextrw:
case kIA32S128Store32Lane:
- case kSSEI8x16SConvertI16x8:
- case kAVXI8x16SConvertI16x8:
+ case kIA32I8x16SConvertI16x8:
case kIA32I8x16Neg:
case kIA32I8x16Shl:
case kIA32I8x16ShrS:
@@ -307,8 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16GtS:
case kSSEI8x16GeS:
case kAVXI8x16GeS:
- case kSSEI8x16UConvertI16x8:
- case kAVXI8x16UConvertI16x8:
+ case kIA32I8x16UConvertI16x8:
case kIA32I8x16AddSatU:
case kIA32I8x16SubSatU:
case kIA32I8x16ShrU:
@@ -326,12 +282,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32S128Zero:
case kIA32S128AllOnes:
case kIA32S128Not:
- case kSSES128And:
- case kAVXS128And:
- case kSSES128Or:
- case kAVXS128Or:
- case kSSES128Xor:
- case kAVXS128Xor:
+ case kIA32S128And:
+ case kIA32S128Or:
+ case kIA32S128Xor:
case kIA32S128Select:
case kIA32S128AndNot:
case kIA32I8x16Swizzle:
@@ -423,7 +376,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairLoad:
return kIsLoadOperation;
- case kIA32Word32AtomicPairStore:
+ case kIA32Word32ReleasePairStore:
+ case kIA32Word32SeqCstPairStore:
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:
@@ -447,7 +401,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kSSEFloat64Mul:
+ case kFloat64Mul:
return 5;
case kIA32Imul:
case kIA32ImulHigh:
@@ -455,18 +409,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kSSEFloat32Cmp:
case kSSEFloat64Cmp:
return 9;
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
+ case kFloat64Abs:
+ case kFloat64Neg:
return 5;
- case kSSEFloat32Mul:
+ case kFloat32Mul:
return 4;
case kSSEFloat32ToFloat64:
case kSSEFloat64ToFloat32:
@@ -484,9 +438,9 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 33;
case kIA32Udiv:
return 26;
- case kSSEFloat32Div:
+ case kFloat32Div:
return 35;
- case kSSEFloat64Div:
+ case kFloat64Div:
return 63;
case kSSEFloat32Sqrt:
case kSSEFloat64Sqrt:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f36fdb2935..ce792692f0 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -246,6 +246,41 @@ class IA32OperandGenerator final : public OperandGenerator {
namespace {
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kIA32Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Movl;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ return opcode;
+}
+
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* input = node->InputAt(0);
@@ -280,27 +315,24 @@ void VisitRR(InstructionSelector* selector, Node* node,
}
void VisitRROFloat(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
InstructionOperand operand1 = g.Use(node->InputAt(1));
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineAsRegister(node), operand0, operand1);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1);
}
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(input), arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -329,7 +361,7 @@ void VisitRROSimd(InstructionSelector* selector, Node* node,
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0,
- g.Use(node->InputAt(1)));
+ g.UseRegister(node->InputAt(1)));
} else {
selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0,
g.UseRegister(node->InputAt(1)));
@@ -389,14 +421,28 @@ void VisitRROSimdShift(InstructionSelector* selector, Node* node,
}
}
-void VisitRROI8x16SimdShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitI8x16Shift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ InstructionOperand output = CpuFeatures::IsSupported(AVX)
+ ? g.UseRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ if (opcode == kIA32I8x16ShrS) {
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
+ }
+ } else {
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ selector->Emit(opcode, output, operand0, operand1, arraysize(temps), temps);
+ }
}
} // namespace
@@ -521,72 +567,110 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(!load_rep.IsMapWord());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
- ArchOpcode opcode;
- switch (load_rep.representation()) {
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
+ return kIA32Movss;
case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
+ return kIA32Movsd;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
- break;
+ return kIA32Movb;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
- break;
+ return kIA32Movw;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
+ return kIA32Movl;
case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
+ return kIA32Movdqu;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
- case MachineRepresentation::kMapWord:
UNREACHABLE();
}
+}
- IA32OperandGenerator g(this);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
+ArchOpcode GetSeqCstStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicExchangeInt8;
+ case MachineRepresentation::kWord16:
+ return kAtomicExchangeInt16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicExchangeWord32;
+ default:
+ UNREACHABLE();
}
- Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
+ AddressingMode addressing_mode;
+ InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ (rep == MachineRepresentation::kWord8)
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ ? g.DefineAsFixed(node, edx)
+ : g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
}
-void InstructionSelector::VisitStore(Node* node) {
- IA32OperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
@@ -603,48 +687,23 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
+ temps);
+ } else if (is_seqcst) {
+ VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(rep), rep);
} else {
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kIA32Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kIA32Movw;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- }
+ // Release and non-atomic stores emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
- } else if (rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit) {
+ } else if (!atomic_order && (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@@ -655,13 +714,20 @@ void InstructionSelector::VisitStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
- opcode | AddressingModeField::encode(addressing_mode);
+ GetStoreOpcode(rep) | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1106,31 +1172,31 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
-#define RRO_FLOAT_OP_LIST(V) \
- V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
- V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
- V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
- V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
- V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
- V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
- V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
- V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \
- V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \
- V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \
- V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \
- V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \
- V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \
- V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \
- V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le)
-
-#define FLOAT_UNOP_LIST(V) \
- V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
- V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
- V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \
- V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+#define RRO_FLOAT_OP_LIST(V) \
+ V(Float32Add, kFloat32Add) \
+ V(Float64Add, kFloat64Add) \
+ V(Float32Sub, kFloat32Sub) \
+ V(Float64Sub, kFloat64Sub) \
+ V(Float32Mul, kFloat32Mul) \
+ V(Float64Mul, kFloat64Mul) \
+ V(Float32Div, kFloat32Div) \
+ V(Float64Div, kFloat64Div) \
+ V(F64x2Add, kIA32F64x2Add) \
+ V(F64x2Sub, kIA32F64x2Sub) \
+ V(F64x2Mul, kIA32F64x2Mul) \
+ V(F64x2Div, kIA32F64x2Div) \
+ V(F64x2Eq, kIA32F64x2Eq) \
+ V(F64x2Ne, kIA32F64x2Ne) \
+ V(F64x2Lt, kIA32F64x2Lt) \
+ V(F64x2Le, kIA32F64x2Le)
+
+#define FLOAT_UNOP_LIST(V) \
+ V(Float32Abs, kFloat32Abs) \
+ V(Float64Abs, kFloat64Abs) \
+ V(Float32Neg, kFloat32Neg) \
+ V(Float64Neg, kFloat64Neg) \
+ V(F64x2Abs, kFloat64Abs) \
+ V(F64x2Neg, kFloat64Neg)
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1164,17 +1230,17 @@ RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
#undef RR_OP_LIST
-#define RRO_FLOAT_VISITOR(Name, avx, sse) \
+#define RRO_FLOAT_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitRROFloat(this, node, avx, sse); \
+ VisitRROFloat(this, node, opcode); \
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
#undef RRO_FLOAT_OP_LIST
-#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
- void InstructionSelector::Visit##Name(Node* node) { \
- VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+#define FLOAT_UNOP_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitFloatUnop(this, node, node->InputAt(0), opcode); \
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
@@ -1617,29 +1683,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, MachineRepresentation rep) {
- IA32OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- AddressingMode addressing_mode;
- InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
- ? g.UseFixed(value, edx)
- : g.UseUniqueRegister(value);
- InstructionOperand inputs[] = {
- value_operand, g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- (rep == MachineRepresentation::kWord8)
- // Using DefineSameAsFirst requires the register to be unallocated.
- ? g.DefineAsFixed(node, edx)
- : g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
-}
-
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
AddressingMode addressing_mode;
@@ -1949,32 +1992,25 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
+ load_rep.representation() == MachineRepresentation::kWord32 ||
+ load_rep.representation() == MachineRepresentation::kTaggedSigned ||
+ load_rep.representation() == MachineRepresentation::kTaggedPointer ||
+ load_rep.representation() == MachineRepresentation::kTagged);
USE(load_rep);
- VisitLoad(node);
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- IA32OperandGenerator g(this);
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -1982,15 +2018,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2007,15 +2043,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2053,12 +2089,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
VisitAtomicBinOp(this, node, opcode, type.representation());
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2068,6 +2103,8 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ // Both acquire and sequentially consistent loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
IA32OperandGenerator g(this);
AddressingMode mode;
Node* base = node->InputAt(0);
@@ -2079,10 +2116,9 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
g.GetEffectiveIndexOperand(index, &mode)};
InstructionCode code =
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
g.DefineAsRegister(projection1)};
- Emit(code, 2, outputs, 2, inputs, 1, temps);
+ Emit(code, 2, outputs, 2, inputs);
} else if (projection0 || projection1) {
// Only one word is needed, so it's enough to load just that.
ArchOpcode opcode = kIA32Movl;
@@ -2103,25 +2139,45 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ // Release pair stores emit a MOVQ via a double register, and sequentially
+ // consistent stores emit CMPXCHG8B.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
- g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- // Allocating temp registers here as stores are performed using an atomic
- // exchange, the output of which is stored in edx:eax, which should be saved
- // and restored at the end of the instruction.
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
- const int num_temps = arraysize(temps);
- InstructionCode code =
- kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
+ if (order == AtomicMemoryOrder::kAcqRel) {
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value),
+ g.UseUniqueRegisterOrSlotOrConstant(value_high),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ };
+ InstructionCode code = kIA32Word32ReleasePairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs);
+ } else {
+ DCHECK_EQ(order, AtomicMemoryOrder::kSeqCst);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ // Allocating temp registers here as stores are performed using an atomic
+ // exchange, the output of which is stored in edx:eax, which should be saved
+ // and restored at the end of the instruction.
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
+ InstructionCode code = kIA32Word32SeqCstPairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ }
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
@@ -2193,60 +2249,57 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_LIST(V) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4GtU) \
V(I32x4GeU) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSatS) \
- V(I16x8Sub) \
- V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
V(I16x8Ne) \
- V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSatU) \
- V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
- V(I8x16SConvertI16x8) \
V(I8x16Ne) \
V(I8x16GeS) \
V(I8x16GtU) \
- V(I8x16GeU) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor)
+ V(I8x16GeU)
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
V(I64x2Ne) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
V(I32x4DotI16x8S) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8GtS) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16AddSatS) \
@@ -2260,7 +2313,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
- V(I8x16RoundingAverageU)
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
// These opcodes require all inputs to be registers because the codegen is
// simpler with all registers.
@@ -2462,7 +2520,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4SConvertF32x4);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32I32x4SConvertF32x4, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
@@ -2625,26 +2688,6 @@ SIMD_BINOP_RRR(VISIT_SIMD_BINOP_RRR)
#undef VISIT_SIMD_BINOP_RRR
#undef SIMD_BINOP_RRR
-// TODO(v8:9198): SSE requires operand1 to be a register as we don't have memory
-// alignment yet. For AVX, memory operands are fine, but can have performance
-// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
-// chapter 14.9
-void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
- ArchOpcode sse_opcode) {
- IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
- if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- } else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- }
-}
-
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- VisitPack(this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
-}
-
void InstructionSelector::VisitI16x8BitMask(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -2652,43 +2695,16 @@ void InstructionSelector::VisitI16x8BitMask(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
-}
-
void InstructionSelector::VisitI8x16Shl(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16Shl, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16Shl);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16Shl);
}
void InstructionSelector::VisitI8x16ShrS(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- this->Emit(kIA32I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)));
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrS);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrS);
}
void InstructionSelector::VisitI8x16ShrU(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16ShrU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrU);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrU);
}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 31d669813e..63cf3ca06f 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -17,6 +17,8 @@
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/compiler/backend/loong64/instruction-codes-loong64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/backend/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
@@ -30,6 +32,7 @@
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
#include "src/base/bit-field.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/compiler/write-barrier-kind.h"
namespace v8 {
@@ -99,53 +102,53 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
+ V(ArchAtomicStoreWithWriteBarrier) \
V(ArchStackSlot) \
- V(ArchWordPoisonOnSpeculation) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
- V(Word32AtomicLoadInt8) \
- V(Word32AtomicLoadUint8) \
- V(Word32AtomicLoadInt16) \
- V(Word32AtomicLoadUint16) \
- V(Word32AtomicLoadWord32) \
- V(Word32AtomicStoreWord8) \
- V(Word32AtomicStoreWord16) \
- V(Word32AtomicStoreWord32) \
- V(Word32AtomicExchangeInt8) \
- V(Word32AtomicExchangeUint8) \
- V(Word32AtomicExchangeInt16) \
- V(Word32AtomicExchangeUint16) \
- V(Word32AtomicExchangeWord32) \
- V(Word32AtomicCompareExchangeInt8) \
- V(Word32AtomicCompareExchangeUint8) \
- V(Word32AtomicCompareExchangeInt16) \
- V(Word32AtomicCompareExchangeUint16) \
- V(Word32AtomicCompareExchangeWord32) \
- V(Word32AtomicAddInt8) \
- V(Word32AtomicAddUint8) \
- V(Word32AtomicAddInt16) \
- V(Word32AtomicAddUint16) \
- V(Word32AtomicAddWord32) \
- V(Word32AtomicSubInt8) \
- V(Word32AtomicSubUint8) \
- V(Word32AtomicSubInt16) \
- V(Word32AtomicSubUint16) \
- V(Word32AtomicSubWord32) \
- V(Word32AtomicAndInt8) \
- V(Word32AtomicAndUint8) \
- V(Word32AtomicAndInt16) \
- V(Word32AtomicAndUint16) \
- V(Word32AtomicAndWord32) \
- V(Word32AtomicOrInt8) \
- V(Word32AtomicOrUint8) \
- V(Word32AtomicOrInt16) \
- V(Word32AtomicOrUint16) \
- V(Word32AtomicOrWord32) \
- V(Word32AtomicXorInt8) \
- V(Word32AtomicXorUint8) \
- V(Word32AtomicXorInt16) \
- V(Word32AtomicXorUint16) \
- V(Word32AtomicXorWord32) \
+ V(AtomicLoadInt8) \
+ V(AtomicLoadUint8) \
+ V(AtomicLoadInt16) \
+ V(AtomicLoadUint16) \
+ V(AtomicLoadWord32) \
+ V(AtomicStoreWord8) \
+ V(AtomicStoreWord16) \
+ V(AtomicStoreWord32) \
+ V(AtomicExchangeInt8) \
+ V(AtomicExchangeUint8) \
+ V(AtomicExchangeInt16) \
+ V(AtomicExchangeUint16) \
+ V(AtomicExchangeWord32) \
+ V(AtomicCompareExchangeInt8) \
+ V(AtomicCompareExchangeUint8) \
+ V(AtomicCompareExchangeInt16) \
+ V(AtomicCompareExchangeUint16) \
+ V(AtomicCompareExchangeWord32) \
+ V(AtomicAddInt8) \
+ V(AtomicAddUint8) \
+ V(AtomicAddInt16) \
+ V(AtomicAddUint16) \
+ V(AtomicAddWord32) \
+ V(AtomicSubInt8) \
+ V(AtomicSubUint8) \
+ V(AtomicSubInt16) \
+ V(AtomicSubUint16) \
+ V(AtomicSubWord32) \
+ V(AtomicAndInt8) \
+ V(AtomicAndUint8) \
+ V(AtomicAndInt16) \
+ V(AtomicAndUint16) \
+ V(AtomicAndWord32) \
+ V(AtomicOrInt8) \
+ V(AtomicOrUint8) \
+ V(AtomicOrInt16) \
+ V(AtomicOrUint16) \
+ V(AtomicOrWord32) \
+ V(AtomicXorInt8) \
+ V(AtomicXorUint8) \
+ V(AtomicXorInt16) \
+ V(AtomicXorUint16) \
+ V(AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
@@ -208,12 +211,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
- kFlags_branch_and_poison = 2,
- kFlags_deoptimize = 3,
- kFlags_deoptimize_and_poison = 4,
- kFlags_set = 5,
- kFlags_trap = 6,
- kFlags_select = 7,
+ kFlags_deoptimize = 2,
+ kFlags_set = 3,
+ kFlags_trap = 4,
+ kFlags_select = 5,
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -262,9 +263,20 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum MemoryAccessMode {
kMemoryAccessDirect = 0,
kMemoryAccessProtected = 1,
- kMemoryAccessPoisoned = 2
};
+enum class AtomicWidth { kWord32, kWord64 };
+
+inline size_t AtomicWidthSize(AtomicWidth width) {
+ switch (width) {
+ case AtomicWidth::kWord32:
+ return 4;
+ case AtomicWidth::kWord64:
+ return 8;
+ }
+ UNREACHABLE();
+}
+
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
@@ -279,6 +291,9 @@ using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
"All opcodes must fit in the 9-bit ArchOpcodeField.");
using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
+static_assert(
+ AddressingModeField::is_valid(kLastAddressingMode),
+ "All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
@@ -287,8 +302,29 @@ using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
+// AtomicWidthField overlaps with MiscField and is used for the various Atomic
+// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
+// architectures are assumed to be 32bit wide.
+using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
+// AtomicMemoryOrderField overlaps with MiscField and is used for the various
+// Atomic opcodes. This field is not used on all architectures. It is used on
+// architectures where the codegen for kSeqCst and kAcqRel differ only by
+// emitting fences.
+using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
+using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
using MiscField = base::BitField<int, 22, 10>;
+// This static assertion serves as an early warning if we are about to exhaust
+// the available opcode space. If we are about to exhaust it, we should start
+// looking into options to compress some opcodes (see
+// https://crbug.com/v8/12093) before we fully run out of available opcodes.
+// Otherwise we risk being unable to land an important security fix or merge
+// back fixes that add new opcodes.
+// It is OK to temporarily reduce the required slack if we have a tracking bug
+// to reduce the number of used opcodes again.
+static_assert(ArchOpcodeField::kMax - kLastArchOpcode >= 16,
+ "We are running close to the number of available opcodes.");
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index c46d263bae..bdad838f3e 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -132,7 +132,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
// We should not have branches in the middle of a block.
DCHECK_NE(instr->flags_mode(), kFlags_branch);
- DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
@@ -298,11 +297,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// effects.
return kIsLoadOperation;
- case kArchWordPoisonOnSpeculation:
- // While poisoning operations have no side effect, they must not be
- // reordered relative to branches.
- return kHasSideEffect;
-
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchTailCallCodeObject:
@@ -334,55 +328,56 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kIsBarrier;
case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier:
return kHasSideEffect;
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return kIsLoadOperation;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return kHasSideEffect;
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16:
- case kWord32AtomicExchangeWord32:
- case kWord32AtomicCompareExchangeInt8:
- case kWord32AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeInt16:
- case kWord32AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeWord32:
- case kWord32AtomicAddInt8:
- case kWord32AtomicAddUint8:
- case kWord32AtomicAddInt16:
- case kWord32AtomicAddUint16:
- case kWord32AtomicAddWord32:
- case kWord32AtomicSubInt8:
- case kWord32AtomicSubUint8:
- case kWord32AtomicSubInt16:
- case kWord32AtomicSubUint16:
- case kWord32AtomicSubWord32:
- case kWord32AtomicAndInt8:
- case kWord32AtomicAndUint8:
- case kWord32AtomicAndInt16:
- case kWord32AtomicAndUint16:
- case kWord32AtomicAndWord32:
- case kWord32AtomicOrInt8:
- case kWord32AtomicOrUint8:
- case kWord32AtomicOrInt16:
- case kWord32AtomicOrUint16:
- case kWord32AtomicOrWord32:
- case kWord32AtomicXorInt8:
- case kWord32AtomicXorUint8:
- case kWord32AtomicXorInt16:
- case kWord32AtomicXorUint16:
- case kWord32AtomicXorWord32:
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index f279ea1590..cd2b83ac3d 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -39,7 +39,7 @@ InstructionSelector::InstructionSelector(
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
- PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
+ EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -63,7 +63,6 @@ InstructionSelector::InstructionSelector(
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
state_values_cache_(zone),
- poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
@@ -1076,17 +1075,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(1u, buffer->instruction_args.size());
- // Argument 1 is used for poison-alias index (encoded in a word-sized
- // immediate. This an index of the operand that aliases with poison register
- // or -1 if there is no aliasing.
- buffer->instruction_args.push_back(g.TempImmediate(-1));
- const size_t poison_alias_index = 1;
- DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
-
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
- // arg 2 : deoptimization id.
- // arg 3 - arg (n + 2) : value inputs to the frame state.
+ // arg 1 : deoptimization id.
+ // arg 2 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
@@ -1123,7 +1115,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
- DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -1159,23 +1151,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
- // If we do load poisoning and the linkage uses the poisoning register,
- // then we request the input in memory location, and during code
- // generation, we move the input to the register.
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
- unallocated.HasFixedRegisterPolicy()) {
- int reg = unallocated.fixed_register_index();
- if (Register::from_code(reg) == kSpeculationPoisonRegister) {
- buffer->instruction_args[poison_alias_index] = g.TempImmediate(
- static_cast<int32_t>(buffer->instruction_args.size()));
- op = g.UseRegisterOrSlotOrConstant(*iter);
- }
- }
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- frame_state_entries - 1);
+ frame_state_entries);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@@ -1509,11 +1489,6 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
- case IrOpcode::kPoisonedLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
- MarkAsRepresentation(type.representation(), node);
- return VisitPoisonedLoad(node);
- }
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@@ -1850,12 +1825,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
- case IrOpcode::kTaggedPoisonOnSpeculation:
- return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
- case IrOpcode::kWord32PoisonOnSpeculation:
- return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
- case IrOpcode::kWord64PoisonOnSpeculation:
- return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
@@ -1900,12 +1869,14 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
@@ -2389,30 +2360,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
-void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- OperandGenerator g(this);
- Node* input_node = NodeProperties::GetValueInput(node, 0);
- InstructionOperand input = g.UseRegister(input_node);
- InstructionOperand output = g.DefineSameAsFirst(node);
- Emit(kArchWordPoisonOnSpeculation, output, input);
- } else {
- EmitIdentity(node);
- }
-}
-
-void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
@@ -2766,7 +2713,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2792,7 +2740,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
- // !V8_TARGET_ARCH_RISCV64
+ // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
@@ -2806,11 +2754,12 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
-#if !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
+ // !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
@@ -3104,45 +3053,24 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
- if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
- FlagsContinuation cont =
- FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- } else {
- FlagsContinuation cont =
- FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- }
+ FlagsContinuation cont =
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSelect(Node* node) {
@@ -3186,17 +3114,10 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
}
- if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
+ dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
+ VisitWordCompareZero(node, n.condition(), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
@@ -3409,18 +3330,6 @@ void InstructionSelector::SwapShuffleInputs(Node* node) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-// static
-bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return safety_check != IsSafetyCheck::kNoSafetyCheck;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
- }
- UNREACHABLE();
-}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 11a329d1d6..b33de8e856 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -54,13 +54,6 @@ class FlagsContinuation final {
return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
}
- static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
- BasicBlock* true_block,
- BasicBlock* false_block) {
- return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
- false_block);
- }
-
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
@@ -71,16 +64,6 @@ class FlagsContinuation final {
extra_args_count);
}
- // Creates a new flags continuation for an eager deoptimization exit.
- static FlagsContinuation ForDeoptimizeAndPoison(
- FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
- InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
- return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
- reason, node_id, feedback, frame_state, extra_args,
- extra_args_count);
- }
-
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(condition, result);
@@ -98,16 +81,8 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const {
- return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
- }
- bool IsDeoptimize() const {
- return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
- }
- bool IsPoisoned() const {
- return mode_ == kFlags_branch_and_poison ||
- mode_ == kFlags_deoptimize_and_poison;
- }
+ bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
bool IsSelect() const { return mode_ == kFlags_select; }
@@ -226,7 +201,7 @@ class FlagsContinuation final {
condition_(condition),
true_block_(true_block),
false_block_(false_block) {
- DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK(mode == kFlags_branch);
DCHECK_NOT_NULL(true_block);
DCHECK_NOT_NULL(false_block);
}
@@ -245,7 +220,7 @@ class FlagsContinuation final {
frame_state_or_result_(frame_state),
extra_args_(extra_args),
extra_args_count_(extra_args_count) {
- DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
+ DCHECK(mode == kFlags_deoptimize);
DCHECK_NOT_NULL(frame_state);
}
@@ -338,8 +313,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
: kDisableScheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing =
kDisableRootsRelativeAddressing,
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kDontPoison,
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
@@ -443,8 +416,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
- bool NeedsPoisoning(IsSafetyCheck safety_check) const;
-
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -681,8 +652,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
- void EmitWordPoisonOnSpeculation(Node* node);
-
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
@@ -797,7 +766,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FrameStateInput::Equal>
state_values_cache_;
- PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 63ca78e060..0da8e054ae 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -410,12 +410,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
- case kFlags_branch_and_poison:
- return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
- case kFlags_deoptimize_and_poison:
- return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 204683c973..8698ed8a98 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -935,8 +935,7 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
bool IsTrap() const {
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index e91b7e17d2..258d05955e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -55,17 +55,6 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
-bool IsBlockWithBranchPoisoning(InstructionSequence* code,
- InstructionBlock* block) {
- if (block->PredecessorCount() != 1) return false;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return false;
- Instruction* instr = code->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- return mode == kFlags_branch_and_poison;
-}
-
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
@@ -92,85 +81,80 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
RpoNumber fw = block->rpo_number();
- if (!IsBlockWithBranchPoisoning(code, block)) {
- bool fallthru = true;
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- Instruction* instr = code->InstructionAt(i);
- if (!instr->AreMovesRedundant()) {
- // can't skip instructions with non redundant moves.
- TRACE(" parallel move\n");
- fallthru = false;
- } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- // can't skip instructions with flags continuations.
- TRACE(" flags\n");
- fallthru = false;
- } else if (instr->IsNop()) {
- // skip nops.
- TRACE(" nop\n");
- continue;
- } else if (instr->arch_opcode() == kArchJmp) {
- // try to forward the jump instruction.
- TRACE(" jmp\n");
- // if this block deconstructs the frame, we can't forward it.
- // TODO(mtrofin): we can still forward if we end up building
- // the frame at start. So we should move the decision of whether
- // to build a frame or not in the register allocator, and trickle it
- // here and to the code generator.
- if (frame_at_start || !(block->must_deconstruct_frame() ||
- block->must_construct_frame())) {
- fw = code->InputRpo(instr, 0);
- }
- fallthru = false;
- } else if (instr->IsRet()) {
- TRACE(" ret\n");
- if (fallthru) {
- CHECK_IMPLIES(block->must_construct_frame(),
- block->must_deconstruct_frame());
- // Only handle returns with immediate/constant operands, since
- // they must always be the same for all returns in a function.
- // Dynamic return values might use different registers at
- // different return sites and therefore cannot be shared.
- if (instr->InputAt(0)->IsImmediate()) {
- int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
- ->inline_int32_value();
- // Instructions can be shared only for blocks that share
- // the same |must_deconstruct_frame| attribute.
- if (block->must_deconstruct_frame()) {
- if (empty_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_deconstruct_frame_return_block = block->rpo_number();
- empty_deconstruct_frame_return_size = return_size;
- } else if (empty_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_deconstruct_frame_return_block;
- block->clear_must_deconstruct_frame();
- }
- } else {
- if (empty_no_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_no_deconstruct_frame_return_block =
- block->rpo_number();
- empty_no_deconstruct_frame_return_size = return_size;
- } else if (empty_no_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_no_deconstruct_frame_return_block;
- }
+ bool fallthru = true;
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code->InstructionAt(i);
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
+ } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ // can't skip instructions with flags continuations.
+ TRACE(" flags\n");
+ fallthru = false;
+ } else if (instr->IsNop()) {
+ // skip nops.
+ TRACE(" nop\n");
+ continue;
+ } else if (instr->arch_opcode() == kArchJmp) {
+ // try to forward the jump instruction.
+ TRACE(" jmp\n");
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
+ fw = code->InputRpo(instr, 0);
+ }
+ fallthru = false;
+ } else if (instr->IsRet()) {
+ TRACE(" ret\n");
+ if (fallthru) {
+ CHECK_IMPLIES(block->must_construct_frame(),
+ block->must_deconstruct_frame());
+ // Only handle returns with immediate/constant operands, since
+ // they must always be the same for all returns in a function.
+ // Dynamic return values might use different registers at
+ // different return sites and therefore cannot be shared.
+ if (instr->InputAt(0)->IsImmediate()) {
+ int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
+ ->inline_int32_value();
+ // Instructions can be shared only for blocks that share
+ // the same |must_deconstruct_frame| attribute.
+ if (block->must_deconstruct_frame()) {
+ if (empty_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_deconstruct_frame_return_block = block->rpo_number();
+ empty_deconstruct_frame_return_size = return_size;
+ } else if (empty_deconstruct_frame_return_size == return_size) {
+ fw = empty_deconstruct_frame_return_block;
+ block->clear_must_deconstruct_frame();
+ }
+ } else {
+ if (empty_no_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_no_deconstruct_frame_return_block = block->rpo_number();
+ empty_no_deconstruct_frame_return_size = return_size;
+ } else if (empty_no_deconstruct_frame_return_size ==
+ return_size) {
+ fw = empty_no_deconstruct_frame_return_block;
}
}
}
- fallthru = false;
- } else {
- // can't skip other instructions.
- TRACE(" other\n");
- fallthru = false;
}
- break;
- }
- if (fallthru) {
- int next = 1 + block->rpo_number().ToInt();
- if (next < code->InstructionBlockCount())
- fw = RpoNumber::FromInt(next);
+ fallthru = false;
+ } else {
+ // can't skip other instructions.
+ TRACE(" other\n");
+ fallthru = false;
}
+ break;
+ }
+ if (fallthru) {
+ int next = 1 + block->rpo_number().ToInt();
+ if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@@ -225,7 +209,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
- if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
+ if (mode == kFlags_branch) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp ||
instr->arch_opcode() == kArchRet) {
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
new file mode 100644
index 0000000000..0397a36145
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -0,0 +1,2636 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ tasm()->
+
+// TODO(LOONG_dev): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_loong64: %s at line %d\n", \
+ __FUNCTION__, __LINE__)
+
+// Adds Loong64-specific methods to convert InstructionOperands.
+class Loong64OperandConverter final : public InstructionOperandConverter {
+ public:
+ Loong64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(size_t index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(size_t index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on LOONG64,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK_EQ(0, InputInt32(index));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
+ case Constant::kHeapObject:
+ break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates on loong64?
+ }
+ UNREACHABLE();
+ }
+
+ Operand InputOperand(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ }
+
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand offset,
+ Register value, RecordWriteMode mode,
+ StubCallMode stub_mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
+ stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {
+ }
+
+ void Generate() final {
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+ if (must_save_lr_) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode,
+ StubCallMode::kCallWasmRuntimeStub);
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode);
+ }
+ if (must_save_lr_) {
+ __ Pop(ra);
+ }
+ }
+
+ private:
+ Register const object_;
+ Operand const offset_;
+ Register const value_;
+ RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
+ StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
+ bool must_save_lr_;
+ Zone* zone_;
+};
+
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kNotEqual:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterThanOrEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessThanOrEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ *predicate = true;
+ break;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dbar(0); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// only use for sub_w and sub_d
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr, representation) \
+ do { \
+ Label binop; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label exchange; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 2, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 1, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
+ } while (0)
+
+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
+ do { \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+namespace {
+
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset);
+}
+
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ComputeCodeStartAddress(scratch);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(scratch));
+}
+
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Ld_w(scratch, FieldMemOperand(
+ scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, scratch, Operand(zero_reg));
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
+ case kArchCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ CallCodeObject(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Check the function's context matches the context argument.
+ __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, Operand(scratch));
+ }
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(a2);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, scratch);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // kReturnRegister0 should have been saved before entering the stub.
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
+ break;
+ }
+ case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // Don't overwrite the returned value.
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = __ root_array_available() ? 36 : 80; // 9 or 20 instrs
+#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ offset += 12; // see CallCFunction
+ }
+#endif
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ __ bind(&start_call);
+ __ pcaddi(t7, -4);
+ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ RecordSafepoint(instr->reference_map());
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
+ frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchAbortCSAAssert:
+ DCHECK(i.InputRegister(0) == a0);
+ {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(tasm(), StackFrame::NONE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
+ }
+ __ stop();
+ break;
+ case kArchDebugBreak:
+ __ DebugBreak();
+ break;
+ case kArchComment:
+ __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ DeoptimizationExit* exit =
+ BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
+ __ Branch(exit->label());
+ break;
+ }
+ case kArchRet:
+ AssembleReturn(instr->InputAt(0));
+ break;
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Sub_d(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
+ break;
+ }
+ case kArchStackCheckOffset:
+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
+ break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0), DetermineStubCallMode());
+ break;
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ Register object = i.InputRegister(0);
+ Operand offset(zero_reg);
+ if (addressing_mode == kMode_MRI) {
+ offset = Operand(i.InputInt64(1));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ offset = Operand(i.InputRegister(1));
+ }
+ Register value = i.InputRegister(2);
+
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode());
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ if (addressing_mode == kMode_MRI) {
+ __ St_d(value, MemOperand(object, i.InputInt64(1)));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ __ St_d(value, MemOperand(object, i.InputRegister(1)));
+ }
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ DCHECK_EQ(addressing_mode, kMode_MRI);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Add_d(scratch, object, Operand(i.InputInt64(1)));
+ __ amswap_db_d(zero_reg, value, scratch);
+ }
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ if (FLAG_debug_code) {
+ // Verify that the output_register is properly aligned
+ __ And(scratch, i.OutputRegister(), Operand(kSystemPointerSize - 1));
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, scratch,
+ Operand(zero_reg));
+ }
+ break;
+ }
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kLoong64Add_w:
+ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Add_d:
+ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64AddOvf_d:
+ __ AddOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Sub_w:
+ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Sub_d:
+ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64SubOvf_d:
+ __ SubOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mul_w:
+ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64MulOvf_w:
+ __ MulOverflow_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mulh_w:
+ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_wu:
+ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_d:
+ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_w:
+ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_wu:
+ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_w:
+ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_wu:
+ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mul_d:
+ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_d:
+ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_du:
+ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_d:
+ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_du:
+ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Alsl_d:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64Alsl_w:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64And:
+ case kLoong64And32:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Or:
+ case kLoong64Or32:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Nor:
+ case kLoong64Nor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Clz_w:
+ __ clz_w(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Clz_d:
+ __ clz_d(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Sll_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Bstrpick_w:
+ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ case kLoong64Bstrins_w:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_w(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Bstrpick_d: {
+ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ }
+ case kLoong64Bstrins_d:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_d(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Sll_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kLoong64Rotr_w:
+ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Rotr_d:
+ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Tst:
+ __ And(t8, i.InputRegister(0), i.InputOperand(1));
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Cmp:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Mov:
+ // TODO(LOONG_dev): Should we combine mov/li, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ mov(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kLoong64Float32Cmp: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF32(left, right, cc);
+ } break;
+ case kLoong64Float32Add:
+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Sub:
+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Mul:
+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Div:
+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Abs:
+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Neg:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Sqrt: {
+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32Min: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float32Max: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Cmp: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF64(left, right, cc);
+ } break;
+ case kLoong64Float64Add:
+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Sub:
+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mul:
+ // TODO(LOONG_dev): LOONG64 add special case: right op is -1.0, see arm
+ // port.
+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Div:
+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mod: {
+ // TODO(turbofan): implement directly.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ break;
+ }
+ case kLoong64Float64Abs:
+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Neg:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Sqrt: {
+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float64Min: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Max: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64RoundDown: {
+ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundDown: {
+ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTruncate: {
+ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTruncate: {
+ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundUp: {
+ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundUp: {
+ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTiesEven: {
+ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTiesEven: {
+ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ToFloat32:
+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float32ToFloat64:
+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Int32ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_d_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int32ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat32: {
+ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Int64ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int64ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat64: {
+ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat64: {
+ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat32: {
+ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Float64ToInt32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch);
+ break;
+ }
+ case kLoong64Float32ToInt32: {
+ FPURegister scratch_d = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch_d);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_w(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToInt64: {
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ break;
+ }
+ case kLoong64Float64ToInt64: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_d(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(0), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ if (set_overflow_to_min_i64) {
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ __ addi_d(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_d(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float64ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ break;
+ }
+ case kLoong64Float32ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64Float64ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64BitcastDL:
+ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64BitcastLD:
+ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Float64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ExtractHighWord32:
+ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kLoong64Float64InsertHighWord32:
+ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ // ... more basic instructions ...
+
+ case kLoong64Ext_w_b:
+ __ ext_w_b(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ext_w_h:
+ __ ext_w_h(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ld_bu:
+ __ Ld_bu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_b:
+ __ Ld_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_b:
+ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_hu:
+ __ Ld_hu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_h:
+ __ Ld_h(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_h:
+ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_w:
+ __ Ld_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_wu:
+ __ Ld_wu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_d:
+ __ Ld_d(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_w:
+ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64St_d:
+ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Fld_s: {
+ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kLoong64Fst_s: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_s(ft, operand);
+ break;
+ }
+ case kLoong64Fld_d:
+ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Fst_d: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_d(ft, i.MemoryOperand());
+ break;
+ }
+ case kLoong64Dbar: {
+ __ dbar(0);
+ break;
+ }
+ case kLoong64Push:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sub_d(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kLoong64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
+ __ Fld_s(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ abort();
+ }
+ } else {
+ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
+ case kLoong64StackClaim: {
+ __ Sub_d(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
+ break;
+ }
+ case kLoong64Poke: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
+ break;
+ }
+ case kLoong64ByteSwap64: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kLoong64ByteSwap32: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu);
+ break;
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w);
+ break;
+ case kLoong64Word64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu);
+ break;
+ case kLoong64Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
+ break;
+ case kLoong64StoreCompressTagged:
+ case kLoong64Word64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
+ break;
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicExchangeUint64:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32,
+ 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d);
+ break;
+ case kAtomicAddWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Add_d, 64);
+ break;
+ }
+ break;
+ case kAtomicSubWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Sub_d, 64);
+ break;
+ }
+ break;
+ case kAtomicAndWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, And, 64);
+ break;
+ }
+ break;
+ case kAtomicOrWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Or, 64);
+ break;
+ }
+ break;
+ case kAtomicXorWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Xor, 64);
+ break;
+ }
+ break;
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst64, 64); \
+ break; \
+ } \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add_w, Add_d)
+ ATOMIC_BINOP_CASE(Sub, Sub_w, Sub_d)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+
+ case kLoong64Word64AtomicAddUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d);
+ break;
+ case kLoong64Word64AtomicAndUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicOrUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicXorUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+#undef ATOMIC_BINOP_CASE
+ case kLoong64S128Const:
+ case kLoong64S128Zero:
+ case kLoong64I32x4Splat:
+ case kLoong64I32x4ExtractLane:
+ case kLoong64I32x4Add:
+ case kLoong64I32x4ReplaceLane:
+ case kLoong64I32x4Sub:
+ case kLoong64F64x2Abs:
+ default:
+ break;
+ }
+ return kSuccess;
+}
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
+ UNIMPLEMENTED();
+
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ tasm->
+ Loong64OperandConverter i(gen, instr);
+
+ Condition cc = kNoCondition;
+ // LOONG64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare pseudo-op are not modified before this branch op, as
+ // they are tested here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ __ Branch(tlabel, cc, t8, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ __ srai_d(scratch, i.OutputRegister(), 32);
+ __ srai_w(scratch2, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, scratch2, Operand(scratch));
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ switch (condition) {
+ // Overflow occurs if overflow register is negative
+ case kOverflow:
+ __ Branch(tlabel, lt, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, ge, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ Branch(tlabel, ne, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, eq, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(kLoong64MulOvf_w, condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
+ }
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Float32Cmp ||
+ instr->arch_opcode() == kLoong64Float64Cmp) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ if (predicate) {
+ __ BranchTrueF(tlabel);
+ } else {
+ __ BranchFalseF(tlabel);
+ }
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ tasm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
+}
+
+#undef UNSUPPORTED_COND
+
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
+ void Generate() final {
+ Loong64OperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+ }
+ }
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ Loong64OperandConverter i(this, instr);
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ // Loong64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are checked and handled here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ if (cc == eq) {
+ __ Sltu(result, t8, 1);
+ } else {
+ __ Sltu(result, zero_reg, t8);
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ srli_d(scratch, i.OutputRegister(), 63);
+ __ srli_w(result, i.OutputRegister(), 31);
+ __ xor_(result, scratch, result);
+ if (cc == eq) // Toggle result for not overflow.
+ __ xori(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ // Overflow occurs if overflow register is negative
+ __ slt(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ if (instr->InputAt(1)->IsImmediate()) {
+ if (is_int12(-right.immediate())) {
+ if (right.immediate() == 0) {
+ if (cc == eq) {
+ __ Sltu(result, left, 1);
+ } else {
+ __ Sltu(result, zero_reg, left);
+ }
+ } else {
+ __ Add_d(result, left, Operand(-right.immediate()));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, Operand(right));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, right);
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Float64Cmp ||
+ instr->arch_opcode() == kLoong64Float32Cmp) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ {
+ __ movcf2gr(result, FCC0);
+ if (!predicate) {
+ __ xori(result, result, 1);
+ }
+ }
+ return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+}
+
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
+}
+
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::FinishFrame(Frame* frame) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kSystemPointerSize));
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation(saves);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
+ if (call_descriptor->IsWasmFunctionCall()) {
+ __ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
+ // Wasm import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ Ld_d(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ Ld_d(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+ }
+
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+ if (required_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ Ld_d(scratch, MemOperand(scratch, 0));
+ __ Add_d(scratch, scratch,
+ Operand(required_slots * kSystemPointerSize));
+ __ Branch(&done, uge, sp, Operand(scratch));
+ }
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
+ ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
+ RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+
+ __ bind(&done);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+
+ const int returns = frame()->GetReturnSlotCount();
+
+ // Skip callee-saved and return slots, which are pushed below.
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize));
+ }
+
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ }
+
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ }
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+}
+
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+
+ // Restore GP registers.
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ Loong64OperandConverter g(this, nullptr);
+
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_slots != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_slots} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_slots != 0;
+
+ if (call_descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ }
+ }
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
+ }
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Add_d(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_slots > 1) {
+ __ li(t1, parameter_slots);
+ __ slt(t2, t0, t1);
+ __ Movn(t0, t1, t2);
+ }
+ __ slli_d(t0, t0, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_slots + additional_count);
+ } else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_slots);
+ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, pop_reg);
+ }
+ __ Ret();
+}
+
+void CodeGenerator::FinishCode() {}
+
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ St_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ld_d(g.ToRegister(destination), src);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, src);
+ __ St_d(scratch, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : scratch;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
+ break;
+ case Constant::kInt64:
+#if V8_ENABLE_WEBASSEMBLY
+ if (RelocInfo::IsWasmReference(src.rmode()))
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ else
+#endif // V8_ENABLE_WEBASSEMBLY
+ __ li(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat64:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, src.ToExternalReference());
+ break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64.
+ }
+ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsFPStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ St_d(zero_reg, dst);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ li(scratch, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ St_d(scratch, dst);
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ Move(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ Move(dst, src.ToFloat64().value());
+ if (destination->IsFPStackSlot()) {
+ __ Fst_d(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsFPRegister()) {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ Fst_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsFPRegister()) {
+ __ Fld_d(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ __ Fld_d(temp, src);
+ __ Fst_d(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Register-register.
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(scratch, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(scratch, src);
+ __ Ld_d(src, dst);
+ __ St_d(scratch, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ // TODO(LOONG_dev): LOONG64 Optimize scratch registers usage
+ // Since the Ld instruction may need a scratch reg,
+ // we should not use both of the two scratch registers in
+ // UseScratchRegisterScope here.
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Ld_d(scratch, src);
+ __ Fld_d(scratch_d, dst);
+ __ St_d(scratch, dst);
+ __ Fst_d(scratch_d, src);
+ } else if (source->IsFPRegister()) {
+ FPURegister scratch_d = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(scratch_d, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch_d);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(scratch_d, src);
+ __ Fld_d(src, dst);
+ __ Fst_d(scratch_d, dst);
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.base(), src0.offset() + kIntSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize);
+ FPURegister scratch_d = kScratchDoubleReg;
+ __ Fld_d(scratch_d, dst0); // Save destination in temp_1.
+ __ Ld_w(scratch, src0); // Then use scratch to copy source to destination.
+ __ St_w(scratch, dst0);
+ __ Ld_w(scratch, src1);
+ __ St_w(scratch, dst1);
+ __ Fst_d(scratch_d, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit LOONG64 we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
new file mode 100644
index 0000000000..f31818cac2
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -0,0 +1,397 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// LOONG64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4RecipApprox) \
+ V(Loong64F32x4RecipSqrtApprox) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
+ V(Loong64Word64AtomicCompareExchangeUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [%rr + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
new file mode 100644
index 0000000000..3cfec9c403
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(LOONG_dev): LOONG64 Support instruction scheduler.
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNREACHABLE();
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNREACHABLE();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
new file mode 100644
index 0000000000..454bfa9986
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -0,0 +1,3124 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+// Adds loong64-specific methods for generating InstructionOperands.
+class Loong64OperandGenerator final : public OperandGenerator {
+ public:
+ explicit Loong64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return OpParameter<int64_t>(node->op());
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node->op());
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kLoong64Sll_w:
+ case kLoong64Srl_w:
+ case kLoong64Sra_w:
+ return is_uint5(value);
+ case kLoong64Sll_d:
+ case kLoong64Srl_d:
+ case kLoong64Sra_d:
+ return is_uint6(value);
+ case kLoong64And:
+ case kLoong64And32:
+ case kLoong64Or:
+ case kLoong64Or32:
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ case kLoong64Tst:
+ return is_uint12(value);
+ case kLoong64Ld_b:
+ case kLoong64Ld_bu:
+ case kLoong64St_b:
+ case kLoong64Ld_h:
+ case kLoong64Ld_hu:
+ case kLoong64St_h:
+ case kLoong64Ld_w:
+ case kLoong64Ld_wu:
+ case kLoong64St_w:
+ case kLoong64Ld_d:
+ case kLoong64St_d:
+ case kLoong64Fld_s:
+ case kLoong64Fst_s:
+ case kLoong64Fld_d:
+ case kLoong64Fst_d:
+ return is_int16(value);
+ default:
+ return is_int12(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
+ Loong64OperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kLoong64Ld_w;
+ if (g.CanBeImmediate(offset, opcode_)) {
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+ matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
+ ExtendingLoadMatcher m(node, selector);
+ Loong64OperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ Loong64OperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ VisitBinop(selector, node, opcode, false, kArchNop);
+}
+
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+}
+
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector->isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode,
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (params.transformation) {
+ // TODO(LOONG_dev): LOONG64 S128 LoadSplat
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kLoong64S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kLoong64S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kLoong64S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kLoong64S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kLoong64S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kLoong64S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kLoong64S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kLoong64S128Load64Zero;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fld_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fld_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64Ld_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ // TODO(loong64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+ // must check kArithmeticImm as well as kLoadStoreImm64.
+ if (g.CanBeImmediate(index, kLoong64Add_d)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MRR;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fst_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fst_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kLoong64St_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kLoong64St_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64St_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64St_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseImmediate(static_cast<int32_t>(delta)), g.UseImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index),
+ g.UseRegisterOrImmediateZero(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Bstrpick_w for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
+
+ // Bstrpick_w cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_w with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kLoong64Bstrins_w, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And32, true, kLoong64And32);
+}
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Bstrpick_d for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
+
+ // Bstrpick_d cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_d with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ } else {
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ }
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kLoong64Bstrins_d, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And, true, kLoong64And);
+}
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32);
+}
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or, true, kLoong64Or);
+}
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32);
+}
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor);
+}
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Sll_w where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_w, node);
+}
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_w for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_w, node);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sra_w, node);
+}
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Sll_d where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_d, node);
+}
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_d for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_d, node);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node, node)) return;
+ VisitRRO(this, kLoong64Sra_d, node);
+}
+
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_w, node);
+}
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_d, node);
+}
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_w, node);
+}
+
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_d, node);
+}
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ // Select Alsl_w for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_w for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_w, true, kLoong64Add_w);
+}
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ // Select Alsl_d for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_d for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_d, true, kLoong64Add_d);
+}
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_w);
+}
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_d);
+}
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Mulh_d.
+ Emit(kLoong64Mulh_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kLoong64Mul_w, node);
+}
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_w, node);
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_wu, node);
+}
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ // Alsl_d macro will handle the shifting value out of bound cases.
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kLoong64Mul_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Div_d.
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Div_w, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Div_wu, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Mod_d.
+ Emit(kLoong64Mod_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Mod_w, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Mod_wu, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_du, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_du, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Float32ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToInt32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToUint32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // TODO(LOONG_dev): LOONG64 Match ChangeFloat64ToInt32(Float64Round##OP) to
+ // corresponding instruction which does rounding and conversion to
+ // integer format.
+ if (CanCover(node, value)) {
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (!CanCover(value, next)) {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kLoong64Float32ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt64, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float64ToInt64;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kLoong64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+#ifdef USE_SIMULATOR
+ Node* value = node->InputAt(0);
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0));
+ }
+#else
+ EmitIdentity(node);
+#endif
+}
+
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ switch (node->opcode()) {
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8: // Fall through.
+ case MachineRepresentation::kWord16:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+
+ if (value->opcode() == IrOpcode::kLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, kLoong64Ld_wu, node);
+ return;
+ }
+ }
+ if (ZeroExtendsWord32ToWord64(value)) {
+ EmitIdentity(node);
+ return;
+ }
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0),
+ g.TempImmediate(32));
+}
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
+ return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kLoong64Sra_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kLoong64Int32ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kLoong64Float64ToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat64, node);
+}
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64BitcastDL, node);
+}
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64BitcastLD, node);
+}
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kLoong64Float32Add, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kLoong64Float64Add, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kLoong64Float32Sub, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kLoong64Float64Sub, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kLoong64Float32Mul, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kLoong64Float64Mul, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kLoong64Float32Div, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kLoong64Float64Div, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Mod, g.DefineAsFixed(node, f0),
+ g.UseFixed(node->InputAt(0), f0), g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kLoong64Float32Abs, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kLoong64Float64Abs, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float32Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float64Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float32RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float64RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float32RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float64RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kLoong64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kLoong64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0),
+ g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = 0;
+ for (PushParameter input : (*arguments)) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
+ Emit(kLoong64StackClaim, g.NoOutput(),
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ } else if (output.location.GetType() == MachineType::Simd128()) {
+ abort();
+ }
+ int offset = call_descriptor->GetOffsetToReturns();
+ int reverse_slot = -output.location.GetLocation() - offset;
+ Emit(kLoong64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+}
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float32Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float64Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ Loong64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ if (opcode == kLoong64Tst) {
+ if (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ VisitCompare(selector, opcode, g.UseRegister(left->InputAt(0)),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ }
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ if (opcode == kLoong64Tst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ }
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ // TODO(LOONG_dev): LOONG64 Add check for debug mode
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
+
+#ifdef USE_SIMULATOR
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kLoong64Sll_d, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kLoong64Sll_d, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+#endif
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ // LOONG64 doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead.
+#ifdef USE_SIMULATOR
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+ if (node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall) {
+ VisitFullWord32Compare(selector, node, kLoong64Cmp, cont);
+ return;
+ }
+#endif
+ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont);
+}
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kLoong64Cmp, cont, false);
+}
+
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
+}
+
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
+ : kLoong64Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(kTaggedSize, 8);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kLoong64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
+} // namespace
+
+void InstructionSelector::VisitStackPointerGreaterThan(
+ Node* node, FlagsContinuation* cont) {
+ StackCheckKind kind = StackCheckKindOf(node->op());
+ InstructionCode opcode =
+ kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
+
+ Loong64OperandGenerator g(this);
+
+ // No outputs.
+ InstructionOperand* const outputs = nullptr;
+ const int output_count = 0;
+
+ // TempRegister(0) is used to store the comparison result.
+ // Applying an offset to this stack check requires a temp register. Offsets
+ // are only applied to the first stack check. If applying an offset, we must
+ // ensure the input and temp registers do not alias, thus kUniqueRegister.
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
+ ? OperandGenerator::kUniqueRegister
+ : OperandGenerator::kRegister;
+
+ Node* const value = node->InputAt(0);
+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
+ static constexpr int input_count = arraysize(inputs);
+
+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
+ temp_count, temps, cont);
+}
+
+// Shared routine for word comparisons against zero.
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Add_d, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Sub_d, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64MulOvf_w, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64AddOvf_d, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64SubOvf_d, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kLoong64Tst, cont, true);
+ case IrOpcode::kStackPointerGreaterThan:
+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+ return VisitStackPointerGreaterThan(value, cont);
+ default:
+ break;
+ }
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(this, value, cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value()) {
+ index_operand = g.TempRegister();
+ Emit(kLoong64Sub_w, index_operand, value_operand,
+ g.TempImmediate(sw.min_value()));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+ }
+
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Add_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Add_d, &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Sub_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Sub_d, &cont);
+}
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+}
+
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+}
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractHighWord32, node);
+}
+
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kLoong64Float64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Dbar, g.NoOutput());
+}
+
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kLoong64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+#define SIMD_TYPE_LIST(V) \
+ V(F64x2) \
+ V(F32x4) \
+ V(I64x2) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kLoong64F64x2Abs) \
+ V(F64x2Neg, kLoong64F64x2Neg) \
+ V(F64x2Sqrt, kLoong64F64x2Sqrt) \
+ V(F64x2Ceil, kLoong64F64x2Ceil) \
+ V(F64x2Floor, kLoong64F64x2Floor) \
+ V(F64x2Trunc, kLoong64F64x2Trunc) \
+ V(F64x2NearestInt, kLoong64F64x2NearestInt) \
+ V(I64x2Neg, kLoong64I64x2Neg) \
+ V(I64x2BitMask, kLoong64I64x2BitMask) \
+ V(F64x2ConvertLowI32x4S, kLoong64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kLoong64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kLoong64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \
+ V(F32x4Abs, kLoong64F32x4Abs) \
+ V(F32x4Neg, kLoong64F32x4Neg) \
+ V(F32x4Sqrt, kLoong64F32x4Sqrt) \
+ V(F32x4RecipApprox, kLoong64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kLoong64F32x4Ceil) \
+ V(F32x4Floor, kLoong64F32x4Floor) \
+ V(F32x4Trunc, kLoong64F32x4Trunc) \
+ V(F32x4NearestInt, kLoong64F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kLoong64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kLoong64I64x2Abs) \
+ V(I64x2SConvertI32x4Low, kLoong64I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kLoong64I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kLoong64I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kLoong64I64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \
+ V(I32x4Neg, kLoong64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kLoong64I32x4Abs) \
+ V(I32x4BitMask, kLoong64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kLoong64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kLoong64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kLoong64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kLoong64I16x8Abs) \
+ V(I16x8BitMask, kLoong64I16x8BitMask) \
+ V(I8x16Neg, kLoong64I8x16Neg) \
+ V(I8x16Abs, kLoong64I8x16Abs) \
+ V(I8x16Popcnt, kLoong64I8x16Popcnt) \
+ V(I8x16BitMask, kLoong64I8x16BitMask) \
+ V(S128Not, kLoong64S128Not) \
+ V(I64x2AllTrue, kLoong64I64x2AllTrue) \
+ V(I32x4AllTrue, kLoong64I32x4AllTrue) \
+ V(I16x8AllTrue, kLoong64I16x8AllTrue) \
+ V(I8x16AllTrue, kLoong64I8x16AllTrue) \
+ V(V128AnyTrue, kLoong64V128AnyTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, kLoong64F64x2Add) \
+ V(F64x2Sub, kLoong64F64x2Sub) \
+ V(F64x2Mul, kLoong64F64x2Mul) \
+ V(F64x2Div, kLoong64F64x2Div) \
+ V(F64x2Min, kLoong64F64x2Min) \
+ V(F64x2Max, kLoong64F64x2Max) \
+ V(F64x2Eq, kLoong64F64x2Eq) \
+ V(F64x2Ne, kLoong64F64x2Ne) \
+ V(F64x2Lt, kLoong64F64x2Lt) \
+ V(F64x2Le, kLoong64F64x2Le) \
+ V(I64x2Eq, kLoong64I64x2Eq) \
+ V(I64x2Ne, kLoong64I64x2Ne) \
+ V(I64x2Add, kLoong64I64x2Add) \
+ V(I64x2Sub, kLoong64I64x2Sub) \
+ V(I64x2Mul, kLoong64I64x2Mul) \
+ V(I64x2GtS, kLoong64I64x2GtS) \
+ V(I64x2GeS, kLoong64I64x2GeS) \
+ V(F32x4Add, kLoong64F32x4Add) \
+ V(F32x4Sub, kLoong64F32x4Sub) \
+ V(F32x4Mul, kLoong64F32x4Mul) \
+ V(F32x4Div, kLoong64F32x4Div) \
+ V(F32x4Max, kLoong64F32x4Max) \
+ V(F32x4Min, kLoong64F32x4Min) \
+ V(F32x4Eq, kLoong64F32x4Eq) \
+ V(F32x4Ne, kLoong64F32x4Ne) \
+ V(F32x4Lt, kLoong64F32x4Lt) \
+ V(F32x4Le, kLoong64F32x4Le) \
+ V(I32x4Add, kLoong64I32x4Add) \
+ V(I32x4Sub, kLoong64I32x4Sub) \
+ V(I32x4Mul, kLoong64I32x4Mul) \
+ V(I32x4MaxS, kLoong64I32x4MaxS) \
+ V(I32x4MinS, kLoong64I32x4MinS) \
+ V(I32x4MaxU, kLoong64I32x4MaxU) \
+ V(I32x4MinU, kLoong64I32x4MinU) \
+ V(I32x4Eq, kLoong64I32x4Eq) \
+ V(I32x4Ne, kLoong64I32x4Ne) \
+ V(I32x4GtS, kLoong64I32x4GtS) \
+ V(I32x4GeS, kLoong64I32x4GeS) \
+ V(I32x4GtU, kLoong64I32x4GtU) \
+ V(I32x4GeU, kLoong64I32x4GeU) \
+ V(I32x4DotI16x8S, kLoong64I32x4DotI16x8S) \
+ V(I16x8Add, kLoong64I16x8Add) \
+ V(I16x8AddSatS, kLoong64I16x8AddSatS) \
+ V(I16x8AddSatU, kLoong64I16x8AddSatU) \
+ V(I16x8Sub, kLoong64I16x8Sub) \
+ V(I16x8SubSatS, kLoong64I16x8SubSatS) \
+ V(I16x8SubSatU, kLoong64I16x8SubSatU) \
+ V(I16x8Mul, kLoong64I16x8Mul) \
+ V(I16x8MaxS, kLoong64I16x8MaxS) \
+ V(I16x8MinS, kLoong64I16x8MinS) \
+ V(I16x8MaxU, kLoong64I16x8MaxU) \
+ V(I16x8MinU, kLoong64I16x8MinU) \
+ V(I16x8Eq, kLoong64I16x8Eq) \
+ V(I16x8Ne, kLoong64I16x8Ne) \
+ V(I16x8GtS, kLoong64I16x8GtS) \
+ V(I16x8GeS, kLoong64I16x8GeS) \
+ V(I16x8GtU, kLoong64I16x8GtU) \
+ V(I16x8GeU, kLoong64I16x8GeU) \
+ V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \
+ V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \
+ V(I16x8Q15MulRSatS, kLoong64I16x8Q15MulRSatS) \
+ V(I8x16Add, kLoong64I8x16Add) \
+ V(I8x16AddSatS, kLoong64I8x16AddSatS) \
+ V(I8x16AddSatU, kLoong64I8x16AddSatU) \
+ V(I8x16Sub, kLoong64I8x16Sub) \
+ V(I8x16SubSatS, kLoong64I8x16SubSatS) \
+ V(I8x16SubSatU, kLoong64I8x16SubSatU) \
+ V(I8x16MaxS, kLoong64I8x16MaxS) \
+ V(I8x16MinS, kLoong64I8x16MinS) \
+ V(I8x16MaxU, kLoong64I8x16MaxU) \
+ V(I8x16MinU, kLoong64I8x16MinU) \
+ V(I8x16Eq, kLoong64I8x16Eq) \
+ V(I8x16Ne, kLoong64I8x16Ne) \
+ V(I8x16GtS, kLoong64I8x16GtS) \
+ V(I8x16GeS, kLoong64I8x16GeS) \
+ V(I8x16GtU, kLoong64I8x16GtU) \
+ V(I8x16GeU, kLoong64I8x16GeU) \
+ V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \
+ V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \
+ V(S128And, kLoong64S128And) \
+ V(S128Or, kLoong64S128Or) \
+ V(S128Xor, kLoong64S128Xor) \
+ V(S128AndNot, kLoong64S128AndNot)
+
+void InstructionSelector::VisitS128Const(Node* node) {
+ Loong64OperandGenerator g(this);
+ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
+ uint32_t val[kUint32Immediates];
+ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
+ // If all bytes are zeros or ones, avoid emitting code for generic constants
+ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
+ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
+ val[2] == UINT32_MAX && val[3] == UINT32_MAX;
+ InstructionOperand dst = g.DefineAsRegister(node);
+ if (all_zeros) {
+ Emit(kLoong64S128Zero, dst);
+ } else if (all_ones) {
+ Emit(kLoong64S128AllOnes, dst);
+ } else {
+ Emit(kLoong64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
+ g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64S128Zero, g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kLoong64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
+SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
+SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I16x8, U)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShift(this, kLoong64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kLoong64S128Select, node);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+namespace {
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kLoong64S32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kLoong64S32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kLoong64S32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kLoong64S32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kLoong64S16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kLoong64S16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kLoong64S16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kLoong64S16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kLoong64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kLoong64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kLoong64S8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kLoong64S8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kLoong64S8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kLoong64S8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kLoong64S8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kLoong64S8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kLoong64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kLoong64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kLoong64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ is_swizzle, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t offset;
+ Loong64OperandGenerator g(this);
+ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
+ return;
+ }
+ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ return;
+ }
+ Emit(kLoong64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
+}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ // We don't want input 0 or input 1 to be the same as output, since we will
+ // modify output before do the calculation.
+ Emit(kLoong64I8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmax, node);
+}
+
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) {} \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) {}
+
+VISIT_EXT_MUL(I64x2, I32x4S)
+VISIT_EXT_MUL(I64x2, I32x4U)
+VISIT_EXT_MUL(I32x4, I16x8S)
+VISIT_EXT_MUL(I32x4, I16x8U)
+VISIT_EXT_MUL(I16x8, I8x16S)
+VISIT_EXT_MUL(I16x8, I8x16U)
+#undef VISIT_EXT_MUL
+
+#define VISIT_EXTADD_PAIRWISE(OPCODE) \
+ void InstructionSelector::Visit##OPCODE(Node* node) { \
+ Loong64OperandGenerator g(this); \
+ Emit(kLoong64ExtAddPairwise, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S)
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U)
+#undef VISIT_EXTADD_PAIRWISE
+
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 2b8197e7e6..736248c824 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -93,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -313,16 +312,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -614,31 +603,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -902,7 +866,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -914,7 +879,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Addu(kScratchReg, object, index);
- __ sw(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ sw(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ sw(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -938,10 +910,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1541,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1574,11 +1536,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1658,7 +1618,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1823,74 +1782,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128RegZero, dst);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break;
- case kWord32AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
break;
- case kWord32AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
break;
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -3675,7 +3634,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3688,7 +3646,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3727,85 +3684,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMipsCmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMipsTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMipsAddOvf:
- case kMipsSubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsMulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsCmpS:
- case kMipsCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4130,7 +4008,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4333,7 +4210,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
UNREACHABLE();
- break;
case Constant::kFloat64:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
@@ -4357,7 +4233,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
- break;
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 48635c9c15..aeb1756227 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1444,8 +1444,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 +
SubuLatency() + AdduLatency();
}
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1657,19 +1655,15 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
switch (op->representation()) {
case MachineRepresentation::kFloat32:
return Latency::SWC1 + SubuLatency(false);
- break;
case MachineRepresentation::kFloat64:
return Sdc1Latency() + SubuLatency(false);
- break;
default: {
UNREACHABLE();
- break;
}
}
} else {
return PushRegisterLatency();
}
- break;
}
case kMipsPeek: {
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1682,7 +1676,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsStackClaim:
return SubuLatency(false);
@@ -1699,41 +1692,40 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1;
case kMipsTst:
return AndLatency(instr->InputAt(1)->IsRegister());
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index c823612246..477c791ca0 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -375,10 +375,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -393,8 +389,6 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1906,22 +1900,26 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic loads of tagged values with barriers.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1941,7 +1939,10 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic stores of tagged values with barriers.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1949,13 +1950,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -1983,15 +1987,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2021,15 +2025,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2091,12 +2095,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 6fce103d24..f6fccd43d2 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -95,7 +95,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -321,16 +320,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -577,31 +566,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -803,7 +767,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -864,7 +827,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -876,7 +840,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
- __ Sd(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ Sd(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ Sd(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -900,10 +871,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1646,30 +1613,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1679,27 +1640,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1919,149 +1874,172 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
- break;
- case kMips64Word64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kMips64Word64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kMips64Word64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ case kAtomicLoadWord32:
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord32)
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
+ else
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
break;
case kMips64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kMips64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kMips64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kMips64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ sll(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ sll(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Addu)
- ATOMIC_BINOP_CASE(Sub, Subu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kMips64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Daddu)
- ATOMIC_BINOP_CASE(Sub, Dsubu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Addu, Daddu)
+ ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -3851,7 +3829,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMips64MulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3864,7 +3841,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3904,104 +3880,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMips64Cmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMips64Tst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMips64Dadd:
- case kMips64Dsub: {
- // Check for overflow creates 1 or 0 for result.
- __ dsrl32(kScratchReg, i.OutputRegister(), 31);
- __ srl(kScratchReg2, i.OutputRegister(), 31);
- __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64DaddOvf:
- case kMips64DsubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64MulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64CmpS:
- case kMips64CmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -4340,7 +4218,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4568,7 +4445,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index e1b40a4be5..30d7f5af75 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -11,419 +11,393 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64Sync) \
- V(Mips64AssertEqual) \
- V(Mips64S128Const) \
- V(Mips64S128Zero) \
- V(Mips64S128AllOnes) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4Sub) \
- V(Mips64F64x2Abs) \
- V(Mips64F64x2Neg) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F64x2Sqrt) \
- V(Mips64F64x2Add) \
- V(Mips64F64x2Sub) \
- V(Mips64F64x2Mul) \
- V(Mips64F64x2Div) \
- V(Mips64F64x2Min) \
- V(Mips64F64x2Max) \
- V(Mips64F64x2Eq) \
- V(Mips64F64x2Ne) \
- V(Mips64F64x2Lt) \
- V(Mips64F64x2Le) \
- V(Mips64F64x2Splat) \
- V(Mips64F64x2ExtractLane) \
- V(Mips64F64x2ReplaceLane) \
- V(Mips64F64x2Pmin) \
- V(Mips64F64x2Pmax) \
- V(Mips64F64x2Ceil) \
- V(Mips64F64x2Floor) \
- V(Mips64F64x2Trunc) \
- V(Mips64F64x2NearestInt) \
- V(Mips64F64x2ConvertLowI32x4S) \
- V(Mips64F64x2ConvertLowI32x4U) \
- V(Mips64F64x2PromoteLowF32x4) \
- V(Mips64I64x2Splat) \
- V(Mips64I64x2ExtractLane) \
- V(Mips64I64x2ReplaceLane) \
- V(Mips64I64x2Add) \
- V(Mips64I64x2Sub) \
- V(Mips64I64x2Mul) \
- V(Mips64I64x2Neg) \
- V(Mips64I64x2Shl) \
- V(Mips64I64x2ShrS) \
- V(Mips64I64x2ShrU) \
- V(Mips64I64x2BitMask) \
- V(Mips64I64x2Eq) \
- V(Mips64I64x2Ne) \
- V(Mips64I64x2GtS) \
- V(Mips64I64x2GeS) \
- V(Mips64I64x2Abs) \
- V(Mips64I64x2SConvertI32x4Low) \
- V(Mips64I64x2SConvertI32x4High) \
- V(Mips64I64x2UConvertI32x4Low) \
- V(Mips64I64x2UConvertI32x4High) \
- V(Mips64ExtMulLow) \
- V(Mips64ExtMulHigh) \
- V(Mips64ExtAddPairwise) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4Sqrt) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Div) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64F32x4Pmin) \
- V(Mips64F32x4Pmax) \
- V(Mips64F32x4Ceil) \
- V(Mips64F32x4Floor) \
- V(Mips64F32x4Trunc) \
- V(Mips64F32x4NearestInt) \
- V(Mips64F32x4DemoteF64x2Zero) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I32x4Abs) \
- V(Mips64I32x4BitMask) \
- V(Mips64I32x4DotI16x8S) \
- V(Mips64I32x4TruncSatF64x2SZero) \
- V(Mips64I32x4TruncSatF64x2UZero) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLaneU) \
- V(Mips64I16x8ExtractLaneS) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSatS) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSatS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSatU) \
- V(Mips64I16x8SubSatU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I16x8RoundingAverageU) \
- V(Mips64I16x8Abs) \
- V(Mips64I16x8BitMask) \
- V(Mips64I16x8Q15MulRSatS) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLaneU) \
- V(Mips64I8x16ExtractLaneS) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSatS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSatS) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSatU) \
- V(Mips64I8x16SubSatU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64I8x16RoundingAverageU) \
- V(Mips64I8x16Abs) \
- V(Mips64I8x16Popcnt) \
- V(Mips64I8x16BitMask) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S128AndNot) \
- V(Mips64I64x2AllTrue) \
- V(Mips64I32x4AllTrue) \
- V(Mips64I16x8AllTrue) \
- V(Mips64I8x16AllTrue) \
- V(Mips64V128AnyTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64I8x16Shuffle) \
- V(Mips64I8x16Swizzle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64S128LoadSplat) \
- V(Mips64S128Load8x8S) \
- V(Mips64S128Load8x8U) \
- V(Mips64S128Load16x4S) \
- V(Mips64S128Load16x4U) \
- V(Mips64S128Load32x2S) \
- V(Mips64S128Load32x2U) \
- V(Mips64S128Load32Zero) \
- V(Mips64S128Load64Zero) \
- V(Mips64S128LoadLane) \
- V(Mips64S128StoreLane) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8) \
- V(Mips64Word64AtomicLoadUint8) \
- V(Mips64Word64AtomicLoadUint16) \
- V(Mips64Word64AtomicLoadUint32) \
- V(Mips64Word64AtomicLoadUint64) \
- V(Mips64Word64AtomicStoreWord8) \
- V(Mips64Word64AtomicStoreWord16) \
- V(Mips64Word64AtomicStoreWord32) \
- V(Mips64Word64AtomicStoreWord64) \
- V(Mips64Word64AtomicAddUint8) \
- V(Mips64Word64AtomicAddUint16) \
- V(Mips64Word64AtomicAddUint32) \
- V(Mips64Word64AtomicAddUint64) \
- V(Mips64Word64AtomicSubUint8) \
- V(Mips64Word64AtomicSubUint16) \
- V(Mips64Word64AtomicSubUint32) \
- V(Mips64Word64AtomicSubUint64) \
- V(Mips64Word64AtomicAndUint8) \
- V(Mips64Word64AtomicAndUint16) \
- V(Mips64Word64AtomicAndUint32) \
- V(Mips64Word64AtomicAndUint64) \
- V(Mips64Word64AtomicOrUint8) \
- V(Mips64Word64AtomicOrUint16) \
- V(Mips64Word64AtomicOrUint32) \
- V(Mips64Word64AtomicOrUint64) \
- V(Mips64Word64AtomicXorUint8) \
- V(Mips64Word64AtomicXorUint16) \
- V(Mips64Word64AtomicXorUint32) \
- V(Mips64Word64AtomicXorUint64) \
- V(Mips64Word64AtomicExchangeUint8) \
- V(Mips64Word64AtomicExchangeUint16) \
- V(Mips64Word64AtomicExchangeUint32) \
- V(Mips64Word64AtomicExchangeUint64) \
- V(Mips64Word64AtomicCompareExchangeUint8) \
- V(Mips64Word64AtomicCompareExchangeUint16) \
- V(Mips64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64Sync) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Const) \
+ V(Mips64S128Zero) \
+ V(Mips64S128AllOnes) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F64x2Abs) \
+ V(Mips64F64x2Neg) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F64x2Sqrt) \
+ V(Mips64F64x2Add) \
+ V(Mips64F64x2Sub) \
+ V(Mips64F64x2Mul) \
+ V(Mips64F64x2Div) \
+ V(Mips64F64x2Min) \
+ V(Mips64F64x2Max) \
+ V(Mips64F64x2Eq) \
+ V(Mips64F64x2Ne) \
+ V(Mips64F64x2Lt) \
+ V(Mips64F64x2Le) \
+ V(Mips64F64x2Splat) \
+ V(Mips64F64x2ExtractLane) \
+ V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
+ V(Mips64F64x2Ceil) \
+ V(Mips64F64x2Floor) \
+ V(Mips64F64x2Trunc) \
+ V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
+ V(Mips64I64x2Add) \
+ V(Mips64I64x2Sub) \
+ V(Mips64I64x2Mul) \
+ V(Mips64I64x2Neg) \
+ V(Mips64I64x2Shl) \
+ V(Mips64I64x2ShrS) \
+ V(Mips64I64x2ShrU) \
+ V(Mips64I64x2BitMask) \
+ V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
+ V(Mips64ExtMulLow) \
+ V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Div) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
+ V(Mips64F32x4Ceil) \
+ V(Mips64F32x4Floor) \
+ V(Mips64F32x4Trunc) \
+ V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLaneU) \
+ V(Mips64I16x8ExtractLaneS) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSatS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSatS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
+ V(Mips64I16x8Q15MulRSatS) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLaneU) \
+ V(Mips64I8x16ExtractLaneS) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSatS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSatS) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
+ V(Mips64I8x16BitMask) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S128AndNot) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64S128LoadSplat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
+ V(Mips64S128LoadLane) \
+ V(Mips64S128StoreLane) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64StoreCompressTagged) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index c63e0aa3d3..f79e334ed6 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -375,9 +375,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S128Load32Zero:
case kMips64S128Load64Zero:
case kMips64S128LoadLane:
- case kMips64Word64AtomicLoadUint8:
- case kMips64Word64AtomicLoadUint16:
- case kMips64Word64AtomicLoadUint32:
case kMips64Word64AtomicLoadUint64:
return kIsLoadOperation;
@@ -400,37 +397,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Uswc1:
case kMips64Sync:
case kMips64S128StoreLane:
- case kMips64Word64AtomicStoreWord8:
- case kMips64Word64AtomicStoreWord16:
- case kMips64Word64AtomicStoreWord32:
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
- case kMips64Word64AtomicAddUint8:
- case kMips64Word64AtomicAddUint16:
- case kMips64Word64AtomicAddUint32:
case kMips64Word64AtomicAddUint64:
- case kMips64Word64AtomicSubUint8:
- case kMips64Word64AtomicSubUint16:
- case kMips64Word64AtomicSubUint32:
case kMips64Word64AtomicSubUint64:
- case kMips64Word64AtomicAndUint8:
- case kMips64Word64AtomicAndUint16:
- case kMips64Word64AtomicAndUint32:
case kMips64Word64AtomicAndUint64:
- case kMips64Word64AtomicOrUint8:
- case kMips64Word64AtomicOrUint16:
- case kMips64Word64AtomicOrUint32:
case kMips64Word64AtomicOrUint64:
- case kMips64Word64AtomicXorUint8:
- case kMips64Word64AtomicXorUint16:
- case kMips64Word64AtomicXorUint32:
case kMips64Word64AtomicXorUint64:
- case kMips64Word64AtomicExchangeUint8:
- case kMips64Word64AtomicExchangeUint16:
- case kMips64Word64AtomicExchangeUint32:
case kMips64Word64AtomicExchangeUint64:
- case kMips64Word64AtomicCompareExchangeUint8:
- case kMips64Word64AtomicCompareExchangeUint16:
- case kMips64Word64AtomicCompareExchangeUint32:
case kMips64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -1352,8 +1326,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return DadduLatency(false) + AndLatency(false) + AssertLatency() +
DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
1 + DsubuLatency() + DadduLatency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1740,35 +1712,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kMips64ByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kMips64AssertEqual:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index bec7bbefdc..192f82c9db 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -515,16 +515,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2041,10 +2035,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -2144,12 +2141,43 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -2157,35 +2185,93 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
- addr_reg, g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), addr_reg, g.TempImmediate(0),
- g.UseRegisterOrImmediateZero(value));
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kMips64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kMips64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2203,12 +2289,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2228,12 +2315,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2252,7 +2340,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2615,163 +2704,93 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2792,15 +2811,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2825,14 +2843,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
- kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kMips64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index cf324353f2..0bf29ba686 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -38,9 +38,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -289,15 +287,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@@ -777,25 +766,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, cr0);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = kScratchReg;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ li(scratch, Operand::Zero());
- __ notx(kSpeculationPoisonRegister, scratch);
- __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1164,10 +1134,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()), r0);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kPPC_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -1953,10 +1919,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_BitcastFloat32ToInt32:
- __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
break;
case kPPC_BitcastInt32ToFloat32:
- __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0), ip);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_BitcastDoubleToInt64:
@@ -1968,33 +1935,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@@ -2051,25 +2011,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
- case kWord32AtomicLoadInt8:
- case kPPC_AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
+ case kAtomicLoadInt8:
+ case kAtomicLoadInt16:
UNREACHABLE();
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kPPC_AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
@@ -2082,13 +2034,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_AtomicExchangeWord64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lbarx, stbcx, extsb);
break;
case kPPC_AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lbarx, stbcx, ZeroExtByte);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lharx, sthcx, extsh);
break;
case kPPC_AtomicCompareExchangeUint16:
@@ -2135,6 +2087,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
Register temp1 = r0;
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brw(output, input);
+ break;
+ }
__ rotlwi(temp1, input, 8);
__ rlwimi(temp1, input, 24, 0, 7);
__ rlwimi(temp1, input, 24, 16, 23);
@@ -2143,7 +2099,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev32: {
ASSEMBLE_LOAD_INTEGER_RR(lwbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev32: {
@@ -2156,6 +2111,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp1 = r0;
Register temp2 = kScratchReg;
Register temp3 = i.TempRegister(0);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brd(output, input);
+ break;
+ }
__ rldicl(temp1, input, 32, 32);
__ rotlwi(temp2, input, 8);
__ rlwimi(temp2, input, 24, 0, 7);
@@ -2169,7 +2128,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev64: {
ASSEMBLE_LOAD_INTEGER_RR(ldbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev64: {
@@ -2186,7 +2144,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_F32x4Splat: {
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0));
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0),
+ kScratchDoubleReg);
__ mtvsrd(dst, kScratchReg);
__ vspltw(dst, dst, Operand(1));
break;
@@ -2229,7 +2188,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vextractuw(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
__ mfvsrd(kScratchReg, kScratchSimd128Reg);
- __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
+ __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg, ip);
break;
}
case kPPC_I64x2ExtractLane: {
@@ -2292,7 +2251,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2), kScratchDoubleReg);
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
__ vinsw(dst, r0, Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
} else {
@@ -3522,7 +3481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ vextractub(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand(15 - i.InputInt8(3)));
+ Operand(15 - i.InputUint8(3)));
__ stxsibx(kScratchSimd128Reg, operand);
break;
}
@@ -3799,21 +3758,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- ArchOpcode op = instr->arch_opcode();
- condition = NegateFlagsCondition(condition);
- __ li(kScratchReg, Operand::Zero());
- __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
- kScratchReg, kSpeculationPoisonRegister, cr0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3940,7 +3884,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
break;
default:
UNREACHABLE();
- break;
}
} else {
if (reg_value != 0) __ li(reg, Operand::Zero());
@@ -4079,7 +4022,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4353,7 +4295,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 64f532a52b..4182e8b71b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -138,17 +138,6 @@ namespace compiler {
V(PPC_StoreSimd128) \
V(PPC_ByteRev32) \
V(PPC_ByteRev64) \
- V(PPC_CompressSigned) \
- V(PPC_CompressPointer) \
- V(PPC_CompressAny) \
- V(PPC_AtomicStoreUint8) \
- V(PPC_AtomicStoreUint16) \
- V(PPC_AtomicStoreWord32) \
- V(PPC_AtomicStoreWord64) \
- V(PPC_AtomicLoadUint8) \
- V(PPC_AtomicLoadUint16) \
- V(PPC_AtomicLoadWord32) \
- V(PPC_AtomicLoadWord64) \
V(PPC_AtomicExchangeUint8) \
V(PPC_AtomicExchangeUint16) \
V(PPC_AtomicExchangeWord32) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index aeb1377879..0270dc401e 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -112,9 +112,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_BitcastDoubleToInt64:
case kPPC_ByteRev32:
case kPPC_ByteRev64:
- case kPPC_CompressSigned:
- case kPPC_CompressPointer:
- case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
case kPPC_F64x2ReplaceLane:
@@ -332,10 +329,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
case kPPC_LoadSimd128:
- case kPPC_AtomicLoadUint8:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
@@ -378,10 +371,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_S128Store64Lane:
return kHasSideEffect;
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
case kPPC_AtomicExchangeUint8:
case kPPC_AtomicExchangeUint16:
case kPPC_AtomicExchangeWord32:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index c74211aa38..bfa7c0a6e0 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -167,9 +167,9 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
+static void VisitLoadCommon(InstructionSelector* selector, Node* node,
+ LoadRepresentation load_rep) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
InstructionCode opcode = kArchNop;
@@ -229,54 +229,51 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad &&
- poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
-
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad);
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(offset), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset),
+ g.UseImmediate(base), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseRegister(offset), g.UseImmediate(is_atomic));
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- PPCOperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
+ // TODO(miladfarca): maybe use atomic_order?
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
node->opcode() == IrOpcode::kWord64AtomicStore);
- MachineRepresentation rep;
+ MachineRepresentation rep = store_rep.representation();
WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
- if (is_atomic) {
- rep = AtomicStoreRepresentationOf(node->op());
- } else {
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ if (!is_atomic) {
write_barrier_kind = store_rep.write_barrier_kind();
- rep = store_rep.representation();
}
if (FLAG_enable_unconditional_write_barriers &&
@@ -312,7 +309,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
CHECK_EQ(is_atomic, false);
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
ImmediateMode mode = kInt16Imm;
@@ -346,7 +343,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -374,21 +370,26 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(offset), g.UseImmediate(base),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.NoOutput(), g.UseRegister(base), g.UseRegister(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
}
}
}
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1956,16 +1957,28 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
Emit(kPPC_Sync, g.NoOutput());
}
-void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
-void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
@@ -1991,11 +2004,11 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
@@ -2052,11 +2065,11 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 2d92ae1567..559378b19b 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -106,7 +106,6 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates
- break;
}
UNREACHABLE();
}
@@ -307,17 +306,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- RiscvOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -336,7 +324,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&binop); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -351,7 +339,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
size, bin_instr, representation) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
} else { \
@@ -380,7 +368,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
Label exchange; \
__ sync(); \
__ bind(&exchange); \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
__ Move(i.TempRegister(1), i.InputRegister(2)); \
__ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
@@ -392,7 +380,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
load_linked, store_conditional, sign_extend, size, representation) \
do { \
Label exchange; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -419,7 +407,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&compareExchange); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -438,7 +426,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -570,31 +558,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -887,10 +850,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1094,17 +1053,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvPopcnt32: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt32(dst, src);
+ __ Popcnt32(dst, src, kScratchReg);
} break;
case kRiscvPopcnt64: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt64(dst, src);
+ __ Popcnt64(dst, src, kScratchReg);
} break;
case kRiscvShl32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sll32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sll32(i.OutputRegister(), i.InputRegister(0),
@@ -1113,8 +1071,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvShr32:
if (instr->InputAt(1)->IsRegister()) {
- __ Srl32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Srl32(i.OutputRegister(), i.InputRegister(0),
@@ -1123,8 +1080,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvSar32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sra32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sra32(i.OutputRegister(), i.InputRegister(0),
@@ -1553,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kRiscvLhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1586,27 +1536,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLd:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1625,7 +1569,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvULoadFloat: {
- __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvStoreFloat: {
@@ -1645,14 +1589,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0f);
}
- __ UStoreFloat(ft, operand);
+ __ UStoreFloat(ft, operand, kScratchReg);
break;
}
case kRiscvLoadDouble:
__ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kRiscvULoadDouble:
- __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kRiscvStoreDouble: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
@@ -1667,7 +1611,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0);
}
- __ UStoreDouble(ft, i.MemoryOperand());
+ __ UStoreDouble(ft, i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvSync: {
@@ -1723,156 +1667,175 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvByteSwap64: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8, kScratchReg);
break;
}
case kRiscvByteSwap32: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4, kScratchReg);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
- case kRiscvWord64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kRiscvWord64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kRiscvWord64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
- break;
case kRiscvWord64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kRiscvWord64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kRiscvWord64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kRiscvWord64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kRiscvWord64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Add32)
- ATOMIC_BINOP_CASE(Sub, Sub32)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kRiscvWord64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kRiscvWord64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Add64)
- ATOMIC_BINOP_CASE(Sub, Sub64)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Add32, Add64)
+ ATOMIC_BINOP_CASE(Sub, Sub32, Sub64)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kRiscvAssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -1905,7 +1868,543 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressAnyTagged(result, operand);
break;
}
+ case kRiscvRvvSt: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register dst = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(dst, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vs(i.InputSimd128Register(2), dst, 0, VSew::E8);
+ break;
+ }
+ case kRiscvRvvLd: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register src = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(src, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vl(i.OutputSimd128Register(), src, 0, VSew::E8);
+ break;
+ }
+ case kRiscvS128Const: {
+ Simd128Register dst = i.OutputSimd128Register();
+ uint8_t imm[16];
+ *reinterpret_cast<uint64_t*>(imm) =
+ make_uint64(i.InputUint32(1), i.InputUint32(0));
+ *(reinterpret_cast<uint64_t*>(imm) + 1) =
+ make_uint64(i.InputUint32(3), i.InputUint32(2));
+ __ WasmRvvS128const(dst, imm);
+ break;
+ }
+ case kRiscvI64x2Add: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Add: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Add: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Add: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI64x2Sub: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Sub: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Sub: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Sub: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128And: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Or: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Xor: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Not: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvS128AndNot: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvI32x4ExtractLane: {
+ __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Splat: {
+ (__ VU).set(kScratchReg, E8, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI16x8Splat: {
+ (__ VU).set(kScratchReg, E16, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Abs: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vsub_vv(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0), Mask);
+ break;
+ }
+ case kRiscvI8x16Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtS: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GtS: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Shl: {
+ __ VU.set(kScratchReg, E8, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
+ break;
+ }
+ case kRiscvI16x8Shl: {
+ __ VU.set(kScratchReg, E16, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
+ break;
+ }
+ case kRiscvI32x4Shl: {
+ __ VU.set(kScratchReg, E32, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
+ break;
+ }
+ case kRiscvI64x2Shl: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ if (is_int5(i.InputInt6(1))) {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ } else {
+ __ li(kScratchReg, i.InputInt6(1));
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ }
+ break;
+ }
+ case kRiscvI8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E16, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E64, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI8x16BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI64x2BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvV128AnyTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label t;
+ __ vmv_sx(kSimd128ScratchReg, zero_reg);
+ __ vredmaxu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beq(dst, zero_reg, &t);
+ __ li(dst, 1);
+ __ bind(&t);
+ break;
+ }
+ case kRiscvI64x2AllTrue: {
+ __ VU.set(kScratchReg, E64, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI32x4AllTrue: {
+ __ VU.set(kScratchReg, E32, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI16x8AllTrue: {
+ __ VU.set(kScratchReg, E16, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16AllTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16Shuffle: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
+ int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ li(kScratchReg, 1);
+ __ vmv_vx(v0, kScratchReg);
+ __ li(kScratchReg, imm1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ __ li(kScratchReg, imm2);
+ __ vsll_vi(v0, v0, 1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ __ VU.set(kScratchReg, E8, m1);
+ if (dst == src0) {
+ __ vmv_vv(kSimd128ScratchReg2, src0);
+ src0 = kSimd128ScratchReg2;
+ } else if (dst == src1) {
+ __ vmv_vv(kSimd128ScratchReg2, src1);
+ src1 = kSimd128ScratchReg2;
+ }
+ __ vrgather_vv(dst, src0, kSimd128ScratchReg);
+ __ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ __ vrgather_vv(kSimd128ScratchReg, src1, kSimd128ScratchReg);
+ __ vor_vv(dst, dst, kSimd128ScratchReg);
+ break;
+ }
default:
+#ifdef DEBUG
+ switch (arch_opcode) {
+#define Print(name) \
+ case k##name: \
+ printf("k%s", #name); \
+ break;
+ TARGET_ARCH_OPCODE_LIST(Print);
+#undef Print
+ default:
+ break;
+ }
+#endif
UNIMPLEMENTED();
}
return kSuccess;
@@ -1916,6 +2415,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
+bool IsInludeEqual(Condition cc) {
+ switch (cc) {
+ case equal:
+ case greater_equal:
+ case less_equal:
+ case Uless_equal:
+ case Ugreater_equal:
+ return true;
+ default:
+ return false;
+ }
+}
+
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -1952,7 +2464,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvMulOvf32) {
// Overflow occurs if overflow register is not zero
@@ -1965,14 +2476,17 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kRiscvMulOvf32, condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kRiscvCmpZero) {
cc = FlagsConditionToConditionCmp(condition);
- __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ if (i.InputOrZeroRegister(0) == zero_reg && IsInludeEqual(cc)) {
+ __ Branch(tlabel);
+ } else if (i.InputOrZeroRegister(0) != zero_reg) {
+ __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ }
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
Register lhs_register = sp;
@@ -2011,110 +2525,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- RiscvOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kRiscvCmp: {
- __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvCmpZero: {
- __ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kRiscvAdd64:
- case kRiscvSub64: {
- // Check for overflow creates 1 or 0 for result.
- __ Srl64(kScratchReg, i.OutputRegister(), 63);
- __ Srl32(kScratchReg2, i.OutputRegister(), 31);
- __ Xor(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvAddOvf64:
- case kRiscvSubOvf64: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvMulOvf32: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvCmpS:
- case kRiscvCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- } else {
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -2489,7 +2899,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -2735,7 +3144,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
@@ -2765,7 +3173,21 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ VRegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ VRegister dst = g.ToSimd128Register(destination);
+ __ vmv_vv(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vs(src, dst, 0, E8);
+ }
} else {
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
@@ -2786,7 +3208,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ __ Add64(src_reg, src.rm(), src.offset());
+ }
+ if (destination->IsSimd128Register()) {
+ __ vl(g.ToSimd128Register(destination), src_reg, 0, E8);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ VRegister temp = kSimd128ScratchReg;
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vl(temp, src_reg, 0, E8);
+ __ vs(temp, dst, 0, E8);
+ }
} else {
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 2f51c2b1c7..0c8d99a8e8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -355,7 +355,7 @@ namespace compiler {
V(RiscvS8x16PackOdd) \
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
- V(RiscvS8x16Shuffle) \
+ V(RiscvI8x16Shuffle) \
V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
@@ -373,8 +373,8 @@ namespace compiler {
V(RiscvS128Load32x2U) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
- V(RiscvMsaLd) \
- V(RiscvMsaSt) \
+ V(RiscvRvvLd) \
+ V(RiscvRvvSt) \
V(RiscvI32x4SConvertI16x8Low) \
V(RiscvI32x4SConvertI16x8High) \
V(RiscvI32x4UConvertI16x8Low) \
@@ -387,41 +387,14 @@ namespace compiler {
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
- V(RiscvWord64AtomicLoadUint8) \
- V(RiscvWord64AtomicLoadUint16) \
- V(RiscvWord64AtomicLoadUint32) \
V(RiscvWord64AtomicLoadUint64) \
- V(RiscvWord64AtomicStoreWord8) \
- V(RiscvWord64AtomicStoreWord16) \
- V(RiscvWord64AtomicStoreWord32) \
V(RiscvWord64AtomicStoreWord64) \
- V(RiscvWord64AtomicAddUint8) \
- V(RiscvWord64AtomicAddUint16) \
- V(RiscvWord64AtomicAddUint32) \
V(RiscvWord64AtomicAddUint64) \
- V(RiscvWord64AtomicSubUint8) \
- V(RiscvWord64AtomicSubUint16) \
- V(RiscvWord64AtomicSubUint32) \
V(RiscvWord64AtomicSubUint64) \
- V(RiscvWord64AtomicAndUint8) \
- V(RiscvWord64AtomicAndUint16) \
- V(RiscvWord64AtomicAndUint32) \
V(RiscvWord64AtomicAndUint64) \
- V(RiscvWord64AtomicOrUint8) \
- V(RiscvWord64AtomicOrUint16) \
- V(RiscvWord64AtomicOrUint32) \
V(RiscvWord64AtomicOrUint64) \
- V(RiscvWord64AtomicXorUint8) \
- V(RiscvWord64AtomicXorUint16) \
- V(RiscvWord64AtomicXorUint32) \
V(RiscvWord64AtomicXorUint64) \
- V(RiscvWord64AtomicExchangeUint8) \
- V(RiscvWord64AtomicExchangeUint16) \
- V(RiscvWord64AtomicExchangeUint32) \
V(RiscvWord64AtomicExchangeUint64) \
- V(RiscvWord64AtomicCompareExchangeUint8) \
- V(RiscvWord64AtomicCompareExchangeUint16) \
- V(RiscvWord64AtomicCompareExchangeUint32) \
V(RiscvWord64AtomicCompareExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 157b11c930..471628b1f8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -318,7 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS8x2Reverse:
case kRiscvS8x4Reverse:
case kRiscvS8x8Reverse:
- case kRiscvS8x16Shuffle:
+ case kRiscvI8x16Shuffle:
case kRiscvI8x16Swizzle:
case kRiscvSar32:
case kRiscvSignExtendByte:
@@ -352,7 +352,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvLw:
case kRiscvLoadFloat:
case kRiscvLwu:
- case kRiscvMsaLd:
+ case kRiscvRvvLd:
case kRiscvPeek:
case kRiscvUld:
case kRiscvULoadDouble:
@@ -372,9 +372,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Load32x2S:
case kRiscvS128Load32x2U:
case kRiscvS128LoadLane:
- case kRiscvWord64AtomicLoadUint8:
- case kRiscvWord64AtomicLoadUint16:
- case kRiscvWord64AtomicLoadUint32:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
case kRiscvLoadDecompressTaggedPointer:
@@ -383,7 +380,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvModD:
case kRiscvModS:
- case kRiscvMsaSt:
+ case kRiscvRvvSt:
case kRiscvPush:
case kRiscvSb:
case kRiscvSd:
@@ -399,37 +396,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUsw:
case kRiscvUStoreFloat:
case kRiscvSync:
- case kRiscvWord64AtomicStoreWord8:
- case kRiscvWord64AtomicStoreWord16:
- case kRiscvWord64AtomicStoreWord32:
case kRiscvWord64AtomicStoreWord64:
- case kRiscvWord64AtomicAddUint8:
- case kRiscvWord64AtomicAddUint16:
- case kRiscvWord64AtomicAddUint32:
case kRiscvWord64AtomicAddUint64:
- case kRiscvWord64AtomicSubUint8:
- case kRiscvWord64AtomicSubUint16:
- case kRiscvWord64AtomicSubUint32:
case kRiscvWord64AtomicSubUint64:
- case kRiscvWord64AtomicAndUint8:
- case kRiscvWord64AtomicAndUint16:
- case kRiscvWord64AtomicAndUint32:
case kRiscvWord64AtomicAndUint64:
- case kRiscvWord64AtomicOrUint8:
- case kRiscvWord64AtomicOrUint16:
- case kRiscvWord64AtomicOrUint32:
case kRiscvWord64AtomicOrUint64:
- case kRiscvWord64AtomicXorUint8:
- case kRiscvWord64AtomicXorUint16:
- case kRiscvWord64AtomicXorUint32:
case kRiscvWord64AtomicXorUint64:
- case kRiscvWord64AtomicExchangeUint8:
- case kRiscvWord64AtomicExchangeUint16:
- case kRiscvWord64AtomicExchangeUint32:
case kRiscvWord64AtomicExchangeUint64:
- case kRiscvWord64AtomicCompareExchangeUint8:
- case kRiscvWord64AtomicCompareExchangeUint16:
- case kRiscvWord64AtomicCompareExchangeUint32:
case kRiscvWord64AtomicCompareExchangeUint64:
case kRiscvStoreCompressTagged:
case kRiscvS128StoreLane:
@@ -1169,8 +1142,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return Add64Latency(false) + AndLatency(false) + AssertLatency() +
Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
1 + Sub64Latency() + Add64Latency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1541,35 +1512,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kRiscvByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kRiscvAssertEqual:
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 72706201e2..85d61aa02f 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -475,7 +475,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kRiscvLd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
@@ -489,16 +489,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -560,7 +554,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kRiscvSd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
@@ -569,7 +563,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -1639,7 +1632,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = kRiscvUld;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1693,7 +1686,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
opcode = kRiscvUsd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1789,7 +1782,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node, true);
NumberBinopMatcher n(node, true);
if (m.right().Is(0) || n.right().IsZero()) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1802,7 +1796,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
case kUnsignedGreaterThanOrEqual: {
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right), cont);
@@ -1811,7 +1806,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
default:
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1827,10 +1823,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -1930,16 +1929,18 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
RiscvOperandGenerator g(selector);
- selector->EmitWithContinuation(kRiscvCmpZero, g.UseRegister(value), cont);
+ selector->EmitWithContinuation(kRiscvCmpZero,
+ g.UseRegisterOrImmediateZero(value), cont);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -1947,20 +1948,22 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
} else {
@@ -1968,14 +1971,15 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1993,12 +1997,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2018,12 +2023,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2042,7 +2048,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2404,163 +2411,201 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicLoadUint8;
+ opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicLoadUint16;
+ opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicLoadUint32;
+ opcode = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicLoadUint64;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kRiscv64LdDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kRiscv64LdDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kRiscv64LdDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ opcode = kRiscvWord64AtomicLoadUint64;
+ } else {
+ opcode = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicLoadWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicStoreWord64;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kRiscvWord64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicStoreWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2581,15 +2626,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2614,14 +2658,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kRiscvWord64Atomic##op##Uint8, kRiscvWord64Atomic##op##Uint16, \
- kRiscvWord64Atomic##op##Uint32, kRiscvWord64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kRiscvWord64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2640,6 +2684,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
@@ -2844,6 +2889,7 @@ SIMD_VISIT_SPLAT(F64x2)
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
SIMD_VISIT_EXTRACT_LANE(I8x16, U)
@@ -2890,73 +2936,75 @@ struct ShuffleEntry {
ArchOpcode opcode;
};
-static const ShuffleEntry arch_shuffles[] = {
- {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
- kRiscvS32x4InterleaveRight},
- {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveLeft},
- {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
- kRiscvS32x4PackEven},
- {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
- kRiscvS32x4PackOdd},
- {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
- kRiscvS32x4InterleaveEven},
- {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveOdd},
-
- {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
- kRiscvS16x8InterleaveRight},
- {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
- kRiscvS16x8InterleaveLeft},
- {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
- kRiscvS16x8PackEven},
- {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
- kRiscvS16x8PackOdd},
- {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
- kRiscvS16x8InterleaveEven},
- {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
- kRiscvS16x8InterleaveOdd},
- {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
- kRiscvS16x4Reverse},
- {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
- kRiscvS16x2Reverse},
-
- {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
- kRiscvS8x16InterleaveRight},
- {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
- kRiscvS8x16InterleaveLeft},
- {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
- kRiscvS8x16PackEven},
- {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
- kRiscvS8x16PackOdd},
- {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
- kRiscvS8x16InterleaveEven},
- {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
- kRiscvS8x16InterleaveOdd},
- {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kRiscvS8x8Reverse},
- {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kRiscvS8x4Reverse},
- {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
- kRiscvS8x2Reverse}};
-
-bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, bool is_swizzle,
- ArchOpcode* opcode) {
- uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
- for (size_t i = 0; i < num_entries; ++i) {
- const ShuffleEntry& entry = table[i];
- int j = 0;
- for (; j < kSimd128Size; ++j) {
- if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
- break;
- }
- }
- if (j == kSimd128Size) {
- *opcode = entry.opcode;
- return true;
- }
- }
- return false;
-}
+// static const ShuffleEntry arch_shuffles[] = {
+// {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+// kRiscvS32x4InterleaveRight},
+// {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveLeft},
+// {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+// kRiscvS32x4PackEven},
+// {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+// kRiscvS32x4PackOdd},
+// {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+// kRiscvS32x4InterleaveEven},
+// {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveOdd},
+
+// {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+// kRiscvS16x8InterleaveRight},
+// {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveLeft},
+// {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+// kRiscvS16x8PackEven},
+// {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+// kRiscvS16x8PackOdd},
+// {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+// kRiscvS16x8InterleaveEven},
+// {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveOdd},
+// {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+// kRiscvS16x4Reverse},
+// {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+// kRiscvS16x2Reverse},
+
+// {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+// kRiscvS8x16InterleaveRight},
+// {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+// kRiscvS8x16InterleaveLeft},
+// {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+// kRiscvS8x16PackEven},
+// {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+// kRiscvS8x16PackOdd},
+// {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+// kRiscvS8x16InterleaveEven},
+// {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+// kRiscvS8x16InterleaveOdd},
+// {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+// kRiscvS8x8Reverse},
+// {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+// kRiscvS8x4Reverse},
+// {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+// kRiscvS8x2Reverse}};
+
+// bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+// size_t num_entries, bool is_swizzle,
+// ArchOpcode* opcode) {
+// uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+// for (size_t i = 0; i < num_entries; ++i) {
+// const ShuffleEntry& entry = table[i];
+// int j = 0;
+// for (; j < kSimd128Size; ++j) {
+// if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+// break;
+// }
+// }
+// if (j == kSimd128Size) {
+// *opcode = entry.opcode;
+// return true;
+// }
+// }
+// return false;
+// }
} // namespace
@@ -2964,29 +3012,29 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
- uint8_t shuffle32x4[4];
- ArchOpcode opcode;
- if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- is_swizzle, &opcode)) {
- VisitRRR(this, opcode, node);
- return;
- }
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t offset;
RiscvOperandGenerator g(this);
- if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
- Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
- g.UseRegister(input0), g.UseImmediate(offset));
- return;
- }
- if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
- return;
- }
- Emit(kRiscvS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // uint8_t shuffle32x4[4];
+ // ArchOpcode opcode;
+ // if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ // is_swizzle, &opcode)) {
+ // VisitRRR(this, opcode, node);
+ // return;
+ // }
+ // uint8_t offset;
+ // if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ // Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ // g.UseRegister(input0), g.UseImmediate(offset));
+ // return;
+ // }
+ // if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ // Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // g.UseRegister(input1),
+ // g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ // return;
+ // }
+ Emit(kRiscvI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 685293169d..3c2c3d6c06 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -985,15 +985,6 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -1071,25 +1062,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = r1;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ mov(kSpeculationPoisonRegister, Operand::Zero());
- __ mov(r0, Operand(-1));
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ AndP(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1395,10 +1367,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kS390_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -2155,7 +2123,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadS8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
@@ -2173,35 +2140,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadU8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadU16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadS16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadU32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadS32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@@ -2238,7 +2197,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@@ -2258,7 +2216,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
__ vl(i.OutputSimd128Register(), operand, Condition(0));
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kS390_StoreWord8:
@@ -2327,40 +2284,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lay(i.OutputRegister(), mem);
break;
}
- case kS390_Word64AtomicExchangeUint8:
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU8(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt8) {
+ if (opcode == kAtomicExchangeInt8) {
__ LoadS8(output, output);
} else {
__ LoadU8(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint16:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU16(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt16) {
+ if (opcode == kAtomicExchangeInt16) {
__ lghr(output, output);
} else {
__ llghr(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint32:
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2373,34 +2327,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bne(&do_cs, Label::kNear);
break;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadS8);
break;
- case kS390_Word64AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadU8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadS16);
break;
- case kS390_Word64AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadU16);
break;
- case kS390_Word64AtomicCompareExchangeUint32:
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS8(result, result); \
+ __ LoadS8(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint8: \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
@@ -2408,15 +2358,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
true); \
}); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS16(result, result); \
+ __ LoadS16(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint16: \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
@@ -2430,24 +2379,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
- case kS390_Word64AtomicAddUint32:
- case kWord32AtomicAddWord32:
+ case kAtomicAddWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(laa);
break;
- case kS390_Word64AtomicSubUint32:
- case kWord32AtomicSubWord32:
+ case kAtomicSubWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
break;
- case kS390_Word64AtomicAndUint32:
- case kWord32AtomicAndWord32:
+ case kAtomicAndWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lan);
break;
- case kS390_Word64AtomicOrUint32:
- case kWord32AtomicOrWord32:
+ case kAtomicOrWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lao);
break;
- case kS390_Word64AtomicXorUint32:
- case kWord32AtomicXorWord32:
+ case kAtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
case kS390_Word64AtomicAddUint64:
@@ -2482,77 +2426,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name) \
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register)
+
+#define EMIT_SIMD_BINOP(name, stype) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- i.InputSimd128Register(1)); \
+ i.Input##stype(1)); \
break; \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
@@ -2657,64 +2613,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector shifts
-#define VECTOR_SHIFT(op, mode) \
- { \
- __ vlvg(kScratchDoubleReg, i.InputRegister(1), MemOperand(r0, 0), \
- Condition(mode)); \
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), \
- Condition(mode)); \
- __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
- }
- case kS390_I64x2Shl: {
- VECTOR_SHIFT(veslv, 3);
- break;
- }
- case kS390_I64x2ShrS: {
- VECTOR_SHIFT(vesrav, 3);
- break;
- }
- case kS390_I64x2ShrU: {
- VECTOR_SHIFT(vesrlv, 3);
- break;
- }
- case kS390_I32x4Shl: {
- VECTOR_SHIFT(veslv, 2);
- break;
- }
- case kS390_I32x4ShrS: {
- VECTOR_SHIFT(vesrav, 2);
- break;
- }
- case kS390_I32x4ShrU: {
- VECTOR_SHIFT(vesrlv, 2);
- break;
- }
- case kS390_I16x8Shl: {
- VECTOR_SHIFT(veslv, 1);
- break;
- }
- case kS390_I16x8ShrS: {
- VECTOR_SHIFT(vesrav, 1);
- break;
- }
- case kS390_I16x8ShrU: {
- VECTOR_SHIFT(vesrlv, 1);
- break;
- }
- case kS390_I8x16Shl: {
- VECTOR_SHIFT(veslv, 0);
- break;
- }
- case kS390_I8x16ShrS: {
- VECTOR_SHIFT(vesrav, 0);
- break;
- }
- case kS390_I8x16ShrU: {
- VECTOR_SHIFT(vesrlv, 0);
- break;
- }
// vector unary ops
case kS390_F64x2Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3489,6 +3387,120 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
break;
}
+#define LOAD_SPLAT(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndSplat##type##LE(dst, operand);
+ case kS390_S128Load64Splat: {
+ LOAD_SPLAT(64x2);
+ break;
+ }
+ case kS390_S128Load32Splat: {
+ LOAD_SPLAT(32x4);
+ break;
+ }
+ case kS390_S128Load16Splat: {
+ LOAD_SPLAT(16x8);
+ break;
+ }
+ case kS390_S128Load8Splat: {
+ LOAD_SPLAT(8x16);
+ break;
+ }
+#undef LOAD_SPLAT
+#define LOAD_EXTEND(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndExtend##type##LE(dst, operand);
+ case kS390_S128Load32x2U: {
+ LOAD_EXTEND(32x2U);
+ break;
+ }
+ case kS390_S128Load32x2S: {
+ LOAD_EXTEND(32x2S);
+ break;
+ }
+ case kS390_S128Load16x4U: {
+ LOAD_EXTEND(16x4U);
+ break;
+ }
+ case kS390_S128Load16x4S: {
+ LOAD_EXTEND(16x4S);
+ break;
+ }
+ case kS390_S128Load8x8U: {
+ LOAD_EXTEND(8x8U);
+ break;
+ }
+ case kS390_S128Load8x8S: {
+ LOAD_EXTEND(8x8S);
+ break;
+ }
+#undef LOAD_EXTEND
+#define LOAD_AND_ZERO(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadV##type##ZeroLE(dst, operand);
+ case kS390_S128Load32Zero: {
+ LOAD_AND_ZERO(32);
+ break;
+ }
+ case kS390_S128Load64Zero: {
+ LOAD_AND_ZERO(64);
+ break;
+ }
+#undef LOAD_AND_ZERO
+#undef LOAD_EXTEND
+#define LOAD_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ LoadLane##type##LE(dst, operand, lane);
+ case kS390_S128Load8Lane: {
+ LOAD_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load16Lane: {
+ LOAD_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load32Lane: {
+ LOAD_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load64Lane: {
+ LOAD_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef LOAD_LANE
+#define STORE_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register src = i.InputSimd128Register(0); \
+ __ StoreLane##type##LE(src, operand, lane);
+ case kS390_S128Store8Lane: {
+ STORE_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store16Lane: {
+ STORE_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store32Lane: {
+ STORE_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store64Lane: {
+ STORE_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef STORE_LANE
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
@@ -3541,20 +3553,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ mov(r0, Operand::Zero());
- __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
- kSpeculationPoisonRegister, r0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3781,7 +3779,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4028,7 +4025,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 4eea2fa865..03806b57b1 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -161,36 +161,12 @@ namespace compiler {
V(S390_StoreReverseSimd128) \
V(S390_StoreFloat32) \
V(S390_StoreDouble) \
- V(S390_CompressSigned) \
- V(S390_CompressPointer) \
- V(S390_CompressAny) \
- V(S390_Word64AtomicExchangeUint8) \
- V(S390_Word64AtomicExchangeUint16) \
- V(S390_Word64AtomicExchangeUint32) \
V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint8) \
- V(S390_Word64AtomicCompareExchangeUint16) \
- V(S390_Word64AtomicCompareExchangeUint32) \
V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint8) \
- V(S390_Word64AtomicAddUint16) \
- V(S390_Word64AtomicAddUint32) \
V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint8) \
- V(S390_Word64AtomicSubUint16) \
- V(S390_Word64AtomicSubUint32) \
V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint8) \
- V(S390_Word64AtomicAndUint16) \
- V(S390_Word64AtomicAndUint32) \
V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint8) \
- V(S390_Word64AtomicOrUint16) \
- V(S390_Word64AtomicOrUint32) \
V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint8) \
- V(S390_Word64AtomicXorUint16) \
- V(S390_Word64AtomicXorUint32) \
V(S390_Word64AtomicXorUint64) \
V(S390_F64x2Splat) \
V(S390_F64x2ReplaceLane) \
@@ -396,6 +372,26 @@ namespace compiler {
V(S390_S128Not) \
V(S390_S128Select) \
V(S390_S128AndNot) \
+ V(S390_S128Load8Splat) \
+ V(S390_S128Load16Splat) \
+ V(S390_S128Load32Splat) \
+ V(S390_S128Load64Splat) \
+ V(S390_S128Load8x8S) \
+ V(S390_S128Load8x8U) \
+ V(S390_S128Load16x4S) \
+ V(S390_S128Load16x4U) \
+ V(S390_S128Load32x2S) \
+ V(S390_S128Load32x2U) \
+ V(S390_S128Load32Zero) \
+ V(S390_S128Load64Zero) \
+ V(S390_S128Load8Lane) \
+ V(S390_S128Load16Lane) \
+ V(S390_S128Load32Lane) \
+ V(S390_S128Load64Lane) \
+ V(S390_S128Store8Lane) \
+ V(S390_S128Store16Lane) \
+ V(S390_S128Store32Lane) \
+ V(S390_S128Store64Lane) \
V(S390_StoreSimd128) \
V(S390_LoadSimd128) \
V(S390_StoreCompressTagged) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index afc28b1f8c..d7046507c7 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -135,9 +135,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadAndTestWord64:
case kS390_LoadAndTestFloat32:
case kS390_LoadAndTestFloat64:
- case kS390_CompressSigned:
- case kS390_CompressPointer:
- case kS390_CompressAny:
case kS390_F64x2Splat:
case kS390_F64x2ReplaceLane:
case kS390_F64x2Abs:
@@ -362,6 +359,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadDecompressTaggedSigned:
case kS390_LoadDecompressTaggedPointer:
case kS390_LoadDecompressAnyTagged:
+ case kS390_S128Load8Splat:
+ case kS390_S128Load16Splat:
+ case kS390_S128Load32Splat:
+ case kS390_S128Load64Splat:
+ case kS390_S128Load8x8S:
+ case kS390_S128Load8x8U:
+ case kS390_S128Load16x4S:
+ case kS390_S128Load16x4U:
+ case kS390_S128Load32x2S:
+ case kS390_S128Load32x2U:
+ case kS390_S128Load32Zero:
+ case kS390_S128Load64Zero:
+ case kS390_S128Load8Lane:
+ case kS390_S128Load16Lane:
+ case kS390_S128Load32Lane:
+ case kS390_S128Load64Lane:
return kIsLoadOperation;
case kS390_StoreWord8:
@@ -379,35 +392,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
+ case kS390_S128Store8Lane:
+ case kS390_S128Store16Lane:
+ case kS390_S128Store32Lane:
+ case kS390_S128Store64Lane:
return kHasSideEffect;
- case kS390_Word64AtomicExchangeUint8:
- case kS390_Word64AtomicExchangeUint16:
- case kS390_Word64AtomicExchangeUint32:
case kS390_Word64AtomicExchangeUint64:
- case kS390_Word64AtomicCompareExchangeUint8:
- case kS390_Word64AtomicCompareExchangeUint16:
- case kS390_Word64AtomicCompareExchangeUint32:
case kS390_Word64AtomicCompareExchangeUint64:
- case kS390_Word64AtomicAddUint8:
- case kS390_Word64AtomicAddUint16:
- case kS390_Word64AtomicAddUint32:
case kS390_Word64AtomicAddUint64:
- case kS390_Word64AtomicSubUint8:
- case kS390_Word64AtomicSubUint16:
- case kS390_Word64AtomicSubUint32:
case kS390_Word64AtomicSubUint64:
- case kS390_Word64AtomicAndUint8:
- case kS390_Word64AtomicAndUint16:
- case kS390_Word64AtomicAndUint32:
case kS390_Word64AtomicAndUint64:
- case kS390_Word64AtomicOrUint8:
- case kS390_Word64AtomicOrUint16:
- case kS390_Word64AtomicOrUint32:
case kS390_Word64AtomicOrUint64:
- case kS390_Word64AtomicXorUint8:
- case kS390_Word64AtomicXorUint16:
- case kS390_Word64AtomicXorUint32:
case kS390_Word64AtomicXorUint64:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index bcf5a8dfff..489065e65f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -106,7 +106,6 @@ class S390OperandGenerator final : public OperandGenerator {
return OpParameter<int64_t>(node->op());
else
UNIMPLEMENTED();
- return 0L;
}
bool CanBeImmediate(Node* node, OperandModes mode) {
@@ -272,8 +271,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
(S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
: OperandMode::kInt20Imm)
-ArchOpcode SelectLoadOpcode(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -466,7 +464,8 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
} else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
- canCombineWithLoad(SelectLoadOpcode(right))) {
+ canCombineWithLoad(
+ SelectLoadOpcode(LoadRepresentationOf(right->op())))) {
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
right, inputs, input_count, OpcodeImmMode(*opcode));
*opcode |= AddressingModeField::encode(mode);
@@ -695,23 +694,23 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
-void InstructionSelector::VisitLoad(Node* node) {
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
S390OperandGenerator g(this);
- InstructionCode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
Emit(opcode, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ InstructionCode opcode = SelectLoadOpcode(load_rep);
+ VisitLoad(node, node, opcode);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
@@ -2153,21 +2152,18 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2181,7 +2177,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs);
}
@@ -2189,40 +2186,40 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2248,7 +2245,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
size_t output_count = 0;
outputs[output_count++] = g.DefineSameAsFirst(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs);
}
@@ -2256,40 +2254,40 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2318,7 +2316,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
size_t temp_count = 0;
temps[temp_count++] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
temps);
}
@@ -2342,15 +2341,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2376,14 +2374,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC64_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
- kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC64_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kS390_Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC64_BINOP(Add)
VISIT_ATOMIC64_BINOP(Sub)
@@ -2393,14 +2391,14 @@ VISIT_ATOMIC64_BINOP(Xor)
#undef VISIT_ATOMIC64_BINOP
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
#define SIMD_TYPES(V) \
@@ -2789,18 +2787,107 @@ void InstructionSelector::EmitPrepareResults(
}
void InstructionSelector::VisitLoadLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kS390_S128Load8Lane;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kS390_S128Load16Lane;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kS390_S128Load32Lane;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kS390_S128Load64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoadTransform(Node* node) {
- // We should never reach here, see http://crrev.com/c/2050811
- UNREACHABLE();
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ ArchOpcode opcode;
+ switch (params.transformation) {
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kS390_S128Load8Splat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kS390_S128Load16Splat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kS390_S128Load32Splat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kS390_S128Load64Splat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kS390_S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kS390_S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kS390_S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kS390_S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kS390_S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kS390_S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kS390_S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kS390_S128Load64Zero;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitLoad(node, node, opcode);
}
void InstructionSelector::VisitStoreLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kS390_S128Store8Lane;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kS390_S128Store16Lane;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kS390_S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kS390_S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 60a40fb489..3e2298de3e 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
+ MachineRepresentation rep) {
+ if (order == std::memory_order_relaxed) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgb(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgw(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgl(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgq(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->AtomicStoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
+ MachineRepresentation rep);
+
+template <>
+void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
+ Immediate value,
+ MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
#ifdef V8_IS_TSAN
-class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
+void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
+ Register scratch, Operand operand,
+ StubCallMode mode, int size) {
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+ // The wasm OOB trap handler needs to be able to look up the faulting
+ // instruction pointer to handle the SIGSEGV raised by an OOB access. It
+ // will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
+ // redundant load here to give the trap handler a chance to handle any
+ // OOB SIGSEGVs.
+ if (trap_handler::IsTrapHandlerEnabled() &&
+ mode == StubCallMode::kCallWasmRuntimeStub) {
+ switch (size) {
+ case kInt8Size:
+ tasm->movb(scratch, operand);
+ break;
+ case kInt16Size:
+ tasm->movw(scratch, operand);
+ break;
+ case kInt32Size:
+ tasm->movl(scratch, operand);
+ break;
+ case kInt64Size:
+ tasm->movq(scratch, operand);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif
+}
+
+class OutOfLineTSANStore : public OutOfLineCode {
public:
- OutOfLineTSANRelaxedStore(CodeGenerator* gen, Operand operand, Register value,
- Register scratch0, StubCallMode stub_mode, int size)
+ OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
+ Register scratch0, StubCallMode stub_mode, int size,
+ std::memory_order order)
: OutOfLineCode(gen),
operand_(operand),
value_(value),
@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
stub_mode_(stub_mode),
#endif // V8_ENABLE_WEBASSEMBLY
size_(size),
+ memory_order_(order),
zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0));
}
@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallWasmRuntimeStub);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallWasmRuntimeStub,
+ memory_order_);
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallBuiltinPointer);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallBuiltinPointer, memory_order_);
}
private:
@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
StubCallMode const stub_mode_;
#endif // V8_ENABLE_WEBASSEMBLY
int size_;
+ const std::memory_order memory_order_;
Zone* zone_;
};
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {
+void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
+ Operand operand, Register value_reg,
+ X64OperandConverter& i, StubCallMode mode, int size,
+ std::memory_order order) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0);
- auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>(
- codegen, operand, value_reg, scratch0, mode, size);
+ auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
+ scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit());
}
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
+ X64OperandConverter& i) {
+ return value;
+}
+
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
+ X64OperandConverter& i);
+
+template <>
+Register GetTSANValueRegister<std::memory_order_relaxed>(
+ TurboAssembler* tasm, Immediate value, X64OperandConverter& i) {
+ Register value_reg = i.TempRegister(1);
+ tasm->movq(value_reg, value);
+ return value_reg;
+}
+
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
-
- Register value_reg = i.TempRegister(1);
- tasm->movq(value_reg, value);
- EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode,
- size);
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ if (codegen->code_kind() != CodeKind::FOR_TESTING) {
+ int size = ElementSizeInBytes(rep);
+ EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
+ stub_call_mode, size);
+ Register value_reg = GetTSANValueRegister<order>(tasm, value, i);
+ EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
+ size, order);
+ } else {
+ EmitStore<order>(tasm, operand, value, rep);
+ }
}
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
@@ -453,10 +592,10 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
Zone* zone_;
};
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
@@ -472,20 +611,20 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#else
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {}
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
+ DCHECK(order == std::memory_order_relaxed ||
+ order == std::memory_order_seq_cst);
+ EmitStore<order>(tasm, operand, value, rep);
+}
+
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {}
#endif // V8_IS_TSAN
#if V8_ENABLE_WEBASSEMBLY
@@ -569,16 +708,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->andq(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_UNOP(asm_instr) \
@@ -871,24 +1000,32 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_PINSR(ASM_INSTR) \
- do { \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- XMMRegister dst = i.OutputSimd128Register(); \
- XMMRegister src = i.InputSimd128Register(0); \
- uint8_t laneidx = i.InputUint8(1); \
- if (HasAddressingMode(instr)) { \
- __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx); \
- break; \
- } \
- if (instr->InputAt(2)->IsFPRegister()) { \
- __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
- __ ASM_INSTR(dst, src, kScratchRegister, laneidx); \
- } else if (instr->InputAt(2)->IsRegister()) { \
- __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx); \
- } else { \
- __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx); \
- } \
+#define ASSEMBLE_PINSR(ASM_INSTR) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ XMMRegister src = i.InputSimd128Register(0); \
+ uint8_t laneidx = i.InputUint8(1); \
+ uint32_t load_offset; \
+ if (HasAddressingMode(instr)) { \
+ __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsFPRegister()) { \
+ __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
+ __ ASM_INSTR(dst, src, kScratchRegister, laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsRegister()) { \
+ __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx, &load_offset); \
+ } else { \
+ __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx, &load_offset); \
+ } \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \
+ } while (false)
+
+#define ASSEMBLE_SEQ_CST_STORE(rep) \
+ do { \
+ Register value = i.InputRegister(0); \
+ Operand operand = i.MemoryOperand(1); \
+ EmitTSANAwareStore<std::memory_order_seq_cst>( \
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
+ rep); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1019,22 +1156,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, not_zero);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(rbx);
- __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
- __ cmpq(kJavaScriptCallCodeStartRegister, rbx);
- __ Move(rbx, -1);
- __ cmovq(equal, kSpeculationPoisonRegister, rbx);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
- __ andq(kContextRegister, kSpeculationPoisonRegister);
- __ andq(rsp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1052,11 +1173,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1078,19 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ Call(wasm_code, constant.rmode());
- }
+ __ Call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1107,12 +1215,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(kScratchRegister);
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1130,11 +1233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1147,11 +1246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1344,7 +1439,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(result, result);
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1356,7 +1452,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ StoreTaggedField(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ } else {
+ DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
+ EmitTSANAwareStore<std::memory_order_seq_cst>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1364,14 +1469,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ andq(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kX64MFence:
__ mfence();
break;
@@ -1646,22 +1745,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psrlq(tmp, byte{33});
- __ Andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psllq(tmp, byte{31});
- __ Xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Sqrt:
ASSEMBLE_SSE_UNOP(sqrtss);
break;
@@ -1858,16 +1941,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kX64F64x2Abs:
- case kSSEFloat64Abs: {
- __ Abspd(i.OutputDoubleRegister());
- break;
- }
- case kX64F64x2Neg:
- case kSSEFloat64Neg: {
- __ Negpd(i.OutputDoubleRegister());
- break;
- }
case kSSEFloat64Sqrt:
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
@@ -2120,56 +2193,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulsd depending on the result.
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kAVXFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 33);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Abs: {
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 31);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Neg: {
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 1);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Abs:
+ case kX64Float64Abs: {
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 63);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Neg:
+ case kX64Float64Neg: {
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kSSEFloat64SilenceNaN:
@@ -2180,24 +2219,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2205,29 +2240,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index)));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
} else {
Register value(i.InputRegister(index));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2237,7 +2269,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2245,16 +2276,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index)));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
} else {
Register value(i.InputRegister(index));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@@ -2263,8 +2293,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt32Size);
} else {
if (HasRegisterInput(instr, 0)) {
__ movl(i.OutputRegister(), i.InputRegister(0));
@@ -2278,48 +2308,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
} else {
Register value(i.InputRegister(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqCompressTagged: {
@@ -2328,14 +2353,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
} else {
Register value(i.InputRegister(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
}
break;
}
@@ -2344,24 +2369,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->HasOutput()) {
Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt64Size);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
} else {
Register value(i.InputRegister(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2376,17 +2400,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- // If we have to poison the loaded value, we load into a general
- // purpose register first, mask it with the poison, and move the
- // value from the general purpose register into the double register.
- __ movq(kScratchRegister, i.MemoryOperand());
- __ andq(kScratchRegister, kSpeculationPoisonRegister);
- __ Movq(i.OutputDoubleRegister(), kScratchRegister);
- } else {
- __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
- }
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -2667,27 +2681,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2ConvertLowI32x4S: {
@@ -2696,7 +2698,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F64x2ConvertLowI32x4U: {
__ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64F64x2PromoteLowF32x4: {
@@ -2709,12 +2711,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4TruncSatF64x2SZero: {
__ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4TruncSatF64x2UZero: {
__ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64F32x4Splat: {
@@ -2868,27 +2872,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Pmin: {
@@ -3084,21 +3076,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4SConvertF32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- // NAN->0
- __ Movaps(kScratchDoubleReg, dst);
- __ Cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4SConvertI16x8Low: {
@@ -3252,21 +3232,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ExtAddPairwiseI16x8S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- // pmaddwd multiplies signed words in src1 and src2, producing signed
- // doublewords, then adds pairwise.
- // src1 = |a|b|c|d|e|f|g|h|
- // src2 = |1|1|1|1|1|1|1|1|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- __ Pmaddwd(dst, src1, src2);
+ __ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I32x4ExtAddPairwiseI16x8U: {
__ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64S128Const: {
@@ -3293,12 +3266,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
+ __ I16x8Splat(dst, i.InputRegister(0));
} else {
- __ Movd(dst, i.InputOperand(0));
+ __ I16x8Splat(dst, i.InputOperand(0));
}
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kX64I16x8ExtractLaneS: {
@@ -3481,43 +3452,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ExtAddPairwiseI8x16S: {
__ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I16x8ExtAddPairwiseI8x16U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- __ Pmaddubsw(dst, src1, src2);
+ __ I16x8ExtAddPairwiseI8x16U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I16x8Q15MulRSatS: {
__ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- if (HasRegisterInput(instr, 0)) {
- __ vmovd(kScratchDoubleReg, i.InputRegister(0));
- __ vpbroadcastb(dst, kScratchDoubleReg);
- } else {
- __ vpbroadcastb(dst, i.InputOperand(0));
- }
+ if (HasRegisterInput(instr, 0)) {
+ __ I8x16Splat(dst, i.InputRegister(0), kScratchDoubleReg);
} else {
- if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
- } else {
- __ Movd(dst, i.InputOperand(0));
- }
- __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ __ I8x16Splat(dst, i.InputOperand(0), kScratchDoubleReg);
}
-
break;
}
case kX64Pextrb: {
@@ -3586,66 +3541,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask and additional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- // Take shift value modulo 8.
- __ movq(tmp, i.InputRegister(1));
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): subq here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ subq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psllw(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
case kX64I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3701,34 +3616,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- // Unpack the bytes into words, do logical shifts, and repack.
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, byte{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3834,9 +3729,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
- bool omit_add = MiscField::decode(instr->opcode());
__ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), omit_add);
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ kScratchRegister, MiscField::decode(instr->opcode()));
break;
}
case kX64I8x16Shuffle: {
@@ -3888,45 +3783,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Popcnt: {
__ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.TempSimd128Register(0));
+ i.TempSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastb(dst, i.MemoryOperand());
- } else {
- __ Pinsrb(dst, dst, i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
- }
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load16Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastw(dst, i.MemoryOperand());
- } else {
- __ Pinsrw(dst, dst, i.MemoryOperand(), 0);
- __ Pshuflw(dst, dst, uint8_t{0});
- __ Punpcklqdq(dst, dst);
- }
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load32Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
- } else {
- __ movss(i.OutputSimd128Register(), i.MemoryOperand());
- __ shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
- byte{0});
- }
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64S128Load64Splat: {
@@ -4049,10 +3924,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4070,10 +3945,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4232,156 +4107,180 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicStoreWord8: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord8);
+ break;
+ }
+ case kAtomicStoreWord16: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord16);
+ break;
+ }
+ case kAtomicStoreWord32: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord32);
+ break;
+ }
+ case kX64Word64AtomicStoreWord64: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord64);
+ break;
+ }
+ case kAtomicExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movsxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- break;
- }
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movsxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movzxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movsxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movzxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
- break;
- ATOMIC_BINOP_CASE(Add, addl)
- ATOMIC_BINOP_CASE(Sub, subl)
- ATOMIC_BINOP_CASE(And, andl)
- ATOMIC_BINOP_CASE(Or, orl)
- ATOMIC_BINOP_CASE(Xor, xorl)
-#undef ATOMIC_BINOP_CASE
- case kX64Word64AtomicExchangeUint8: {
- __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint16: {
- __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint32: {
- __ xchgl(i.InputRegister(0), i.MemoryOperand(1));
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord64) {
+ // Zero-extend the 32 bit value to 64 bit.
+ __ movl(rax, rax);
+ }
break;
}
case kX64Word64AtomicExchangeUint64: {
__ xchgq(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kX64Word64AtomicCompareExchangeUint8: {
- __ lock();
- __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint16: {
- __ lock();
- __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint32: {
- __ lock();
- __ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- // Zero-extend the 32 bit value to 64 bit.
- __ movl(rax, rax);
- break;
- }
case kX64Word64AtomicCompareExchangeUint64: {
__ lock();
__ cmpxchgq(i.MemoryOperand(2), i.InputRegister(1));
break;
}
-#define ATOMIC64_BINOP_CASE(op, inst) \
- case kX64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
- __ movzxbq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movw, cmpxchgw); \
- __ movzxwq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movl, cmpxchgl); \
- break; \
- case kX64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movq, cmpxchgq); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movsxbl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movzxbl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movb, cmpxchgb); \
+ __ movzxbq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movsxwl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movzxwl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movw, cmpxchgw); \
+ __ movzxwq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movl, cmpxchgl); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movl, cmpxchgl); \
+ break; \
+ } \
+ break; \
+ case kX64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movq, cmpxchgq); \
break;
- ATOMIC64_BINOP_CASE(Add, addq)
- ATOMIC64_BINOP_CASE(Sub, subq)
- ATOMIC64_BINOP_CASE(And, andq)
- ATOMIC64_BINOP_CASE(Or, orq)
- ATOMIC64_BINOP_CASE(Xor, xorq)
-#undef ATOMIC64_BINOP_CASE
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ ATOMIC_BINOP_CASE(Add, addl, addq)
+ ATOMIC_BINOP_CASE(Sub, subl, subq)
+ ATOMIC_BINOP_CASE(And, andl, andq)
+ ATOMIC_BINOP_CASE(Or, orl, orq)
+ ATOMIC_BINOP_CASE(Xor, xorl, xorq)
+#undef ATOMIC_BINOP_CASE
+
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
}
return kSuccess;
@@ -4407,6 +4306,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
+#undef ASSEMBLE_SEQ_CST_STORE
namespace {
@@ -4462,19 +4362,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ Move(kScratchRegister, 0);
- __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
- kScratchRegister);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@@ -4716,7 +4603,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4876,18 +4762,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index eba23dcfa9..e7fe45c5de 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -11,413 +11,389 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Rol) \
- V(X64Rol32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(X64Bswap) \
- V(X64Bswap32) \
- V(X64MFence) \
- V(X64LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
- V(X64F64x2Qfma) \
- V(X64F64x2Qfms) \
- V(X64F64x2Pmin) \
- V(X64F64x2Pmax) \
- V(X64F64x2Round) \
- V(X64F64x2ConvertLowI32x4S) \
- V(X64F64x2ConvertLowI32x4U) \
- V(X64F64x2PromoteLowF32x4) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4SConvertI32x4) \
- V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64F32x4Qfma) \
- V(X64F32x4Qfms) \
- V(X64F32x4Pmin) \
- V(X64F32x4Pmax) \
- V(X64F32x4Round) \
- V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
- V(X64I64x2ExtMulLowI32x4S) \
- V(X64I64x2ExtMulHighI32x4S) \
- V(X64I64x2ExtMulLowI32x4U) \
- V(X64I64x2ExtMulHighI32x4U) \
- V(X64I64x2SConvertI32x4Low) \
- V(X64I64x2SConvertI32x4High) \
- V(X64I64x2UConvertI32x4Low) \
- V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4SConvertF32x4) \
- V(X64I32x4SConvertI16x8Low) \
- V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4UConvertF32x4) \
- V(X64I32x4UConvertI16x8Low) \
- V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
- V(X64I32x4DotI16x8S) \
- V(X64I32x4ExtMulLowI16x8S) \
- V(X64I32x4ExtMulHighI16x8S) \
- V(X64I32x4ExtMulLowI16x8U) \
- V(X64I32x4ExtMulHighI16x8U) \
- V(X64I32x4ExtAddPairwiseI16x8S) \
- V(X64I32x4ExtAddPairwiseI16x8U) \
- V(X64I32x4TruncSatF64x2SZero) \
- V(X64I32x4TruncSatF64x2UZero) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
- V(X64I16x8SConvertI8x16Low) \
- V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
- V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8UConvertI8x16Low) \
- V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
- V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSatU) \
- V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
- V(X64I16x8ExtMulLowI8x16S) \
- V(X64I16x8ExtMulHighI8x16S) \
- V(X64I16x8ExtMulLowI8x16U) \
- V(X64I16x8ExtMulHighI8x16U) \
- V(X64I16x8ExtAddPairwiseI8x16S) \
- V(X64I16x8ExtAddPairwiseI8x16U) \
- V(X64I16x8Q15MulRSatS) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
- V(X64Pinsrb) \
- V(X64Pinsrw) \
- V(X64Pinsrd) \
- V(X64Pinsrq) \
- V(X64Pextrb) \
- V(X64Pextrw) \
- V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
- V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSatU) \
- V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
- V(X64S128Const) \
- V(X64S128Zero) \
- V(X64S128AllOnes) \
- V(X64S128Not) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Select) \
- V(X64S128AndNot) \
- V(X64I8x16Swizzle) \
- V(X64I8x16Shuffle) \
- V(X64I8x16Popcnt) \
- V(X64S128Load8Splat) \
- V(X64S128Load16Splat) \
- V(X64S128Load32Splat) \
- V(X64S128Load64Splat) \
- V(X64S128Load8x8S) \
- V(X64S128Load8x8U) \
- V(X64S128Load16x4S) \
- V(X64S128Load16x4U) \
- V(X64S128Load32x2S) \
- V(X64S128Load32x2U) \
- V(X64S128Store32Lane) \
- V(X64S128Store64Lane) \
- V(X64Shufps) \
- V(X64S32x4Rotate) \
- V(X64S32x4Swizzle) \
- V(X64S32x4Shuffle) \
- V(X64S16x8Blend) \
- V(X64S16x8HalfShuffle1) \
- V(X64S16x8HalfShuffle2) \
- V(X64S8x16Alignr) \
- V(X64S16x8Dup) \
- V(X64S8x16Dup) \
- V(X64S16x8UnzipHigh) \
- V(X64S16x8UnzipLow) \
- V(X64S8x16UnzipHigh) \
- V(X64S8x16UnzipLow) \
- V(X64S64x2UnpackHigh) \
- V(X64S32x4UnpackHigh) \
- V(X64S16x8UnpackHigh) \
- V(X64S8x16UnpackHigh) \
- V(X64S64x2UnpackLow) \
- V(X64S32x4UnpackLow) \
- V(X64S16x8UnpackLow) \
- V(X64S8x16UnpackLow) \
- V(X64S8x16TransposeLow) \
- V(X64S8x16TransposeHigh) \
- V(X64S8x8Reverse) \
- V(X64S8x4Reverse) \
- V(X64S8x2Reverse) \
- V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
- V(X64Word64AtomicAddUint8) \
- V(X64Word64AtomicAddUint16) \
- V(X64Word64AtomicAddUint32) \
- V(X64Word64AtomicAddUint64) \
- V(X64Word64AtomicSubUint8) \
- V(X64Word64AtomicSubUint16) \
- V(X64Word64AtomicSubUint32) \
- V(X64Word64AtomicSubUint64) \
- V(X64Word64AtomicAndUint8) \
- V(X64Word64AtomicAndUint16) \
- V(X64Word64AtomicAndUint32) \
- V(X64Word64AtomicAndUint64) \
- V(X64Word64AtomicOrUint8) \
- V(X64Word64AtomicOrUint16) \
- V(X64Word64AtomicOrUint32) \
- V(X64Word64AtomicOrUint64) \
- V(X64Word64AtomicXorUint8) \
- V(X64Word64AtomicXorUint16) \
- V(X64Word64AtomicXorUint32) \
- V(X64Word64AtomicXorUint64) \
- V(X64Word64AtomicExchangeUint8) \
- V(X64Word64AtomicExchangeUint16) \
- V(X64Word64AtomicExchangeUint32) \
- V(X64Word64AtomicExchangeUint64) \
- V(X64Word64AtomicCompareExchangeUint8) \
- V(X64Word64AtomicCompareExchangeUint16) \
- V(X64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
+ V(X64MFence) \
+ V(X64LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(X64Float64Abs) \
+ V(X64Float64Neg) \
+ V(X64Float32Abs) \
+ V(X64Float32Neg) \
+ V(X64Movsxbl) \
+ V(X64Movzxbl) \
+ V(X64Movsxbq) \
+ V(X64Movzxbq) \
+ V(X64Movb) \
+ V(X64Movsxwl) \
+ V(X64Movzxwl) \
+ V(X64Movsxwq) \
+ V(X64Movzxwq) \
+ V(X64Movw) \
+ V(X64Movl) \
+ V(X64Movsxlq) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movdqu) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
+ V(X64F64x2Add) \
+ V(X64F64x2Sub) \
+ V(X64F64x2Mul) \
+ V(X64F64x2Div) \
+ V(X64F64x2Min) \
+ V(X64F64x2Max) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F64x2PromoteLowF32x4) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Div) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
+ V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2ExtMulLowI32x4S) \
+ V(X64I64x2ExtMulHighI32x4S) \
+ V(X64I64x2ExtMulLowI32x4U) \
+ V(X64I64x2ExtMulHighI32x4U) \
+ V(X64I64x2SConvertI32x4Low) \
+ V(X64I64x2SConvertI32x4High) \
+ V(X64I64x2UConvertI32x4Low) \
+ V(X64I64x2UConvertI32x4High) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
+ V(X64I32x4ExtMulLowI16x8S) \
+ V(X64I32x4ExtMulHighI16x8S) \
+ V(X64I32x4ExtMulLowI16x8U) \
+ V(X64I32x4ExtMulHighI16x8U) \
+ V(X64I32x4ExtAddPairwiseI16x8S) \
+ V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLaneS) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSatS) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSatS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
+ V(X64I16x8ExtMulLowI8x16S) \
+ V(X64I16x8ExtMulHighI8x16S) \
+ V(X64I16x8ExtMulLowI8x16U) \
+ V(X64I16x8ExtMulHighI8x16U) \
+ V(X64I16x8ExtAddPairwiseI8x16S) \
+ V(X64I16x8ExtAddPairwiseI8x16U) \
+ V(X64I16x8Q15MulRSatS) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLaneS) \
+ V(X64Pinsrb) \
+ V(X64Pinsrw) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
+ V(X64I8x16SConvertI16x8) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSatS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSatS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
+ V(X64I8x16ShrU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
+ V(X64S128Const) \
+ V(X64S128Zero) \
+ V(X64S128AllOnes) \
+ V(X64S128Not) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Select) \
+ V(X64S128AndNot) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane) \
+ V(X64Shufps) \
+ V(X64S32x4Rotate) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
+ V(X64V128AnyTrue) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 4fada93a31..d5f33d86bc 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -62,8 +62,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat32ToFloat64:
@@ -73,8 +71,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32Max:
@@ -114,10 +110,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
case kX64BitcastFI:
case kX64BitcastDL:
case kX64BitcastIF:
@@ -422,33 +418,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence:
return kHasSideEffect;
- case kX64Word64AtomicAddUint8:
- case kX64Word64AtomicAddUint16:
- case kX64Word64AtomicAddUint32:
+ case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64:
- case kX64Word64AtomicSubUint8:
- case kX64Word64AtomicSubUint16:
- case kX64Word64AtomicSubUint32:
case kX64Word64AtomicSubUint64:
- case kX64Word64AtomicAndUint8:
- case kX64Word64AtomicAndUint16:
- case kX64Word64AtomicAndUint32:
case kX64Word64AtomicAndUint64:
- case kX64Word64AtomicOrUint8:
- case kX64Word64AtomicOrUint16:
- case kX64Word64AtomicOrUint32:
case kX64Word64AtomicOrUint64:
- case kX64Word64AtomicXorUint8:
- case kX64Word64AtomicXorUint16:
- case kX64Word64AtomicXorUint32:
case kX64Word64AtomicXorUint64:
- case kX64Word64AtomicExchangeUint8:
- case kX64Word64AtomicExchangeUint16:
- case kX64Word64AtomicExchangeUint32:
case kX64Word64AtomicExchangeUint64:
- case kX64Word64AtomicCompareExchangeUint8:
- case kX64Word64AtomicCompareExchangeUint16:
- case kX64Word64AtomicCompareExchangeUint32:
case kX64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -472,18 +448,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kX64Imul32:
case kX64ImulHigh32:
case kX64UmulHigh32:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
case kSSEFloat64Sub:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
return 3;
case kSSEFloat32Mul:
case kSSEFloat32ToFloat64:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 53ee75064b..2f44f0dee5 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -250,6 +250,7 @@ class X64OperandGenerator final : public OperandGenerator {
};
namespace {
+
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
@@ -340,6 +341,30 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
UNREACHABLE();
}
+ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ case MachineRepresentation::kWord64:
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -471,9 +496,6 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= AccessModeField::encode(kMemoryAccessProtected);
- } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
@@ -484,19 +506,39 @@ void InstructionSelector::VisitLoad(Node* node) {
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitStore(Node* node) {
- X64OperandGenerator g(this);
+namespace {
+
+// Shared routine for Word32/Word64 Atomic Exchange
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ X64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
@@ -513,16 +555,13 @@ void InstructionSelector::VisitStore(Node* node) {
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
+ arraysize(temps), temps);
} else {
- if ((ElementSizeLog2Of(store_rep.representation()) <
- kSystemPointerSizeLog2) &&
- value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
- value = value->InputAt(0);
- }
#ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
@@ -536,22 +575,54 @@ void InstructionSelector::VisitStore(Node* node) {
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
+ // Release and non-atomic stores emit MOV and sequentially consistent stores
+ // emit XCHG.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
+ ArchOpcode opcode;
+ AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
- node, inputs, &input_count, reg_kind);
- InstructionOperand value_operand = g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : g.UseRegister(value, reg_kind);
- inputs[input_count++] = value_operand;
- ArchOpcode opcode = GetStoreOpcode(store_rep);
+
+ if (is_seqcst) {
+ // SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
+ // would for XCHG. XCHG can't encode the value as an immediate and has
+ // fewer addressing modes available.
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] =
+ g.GetEffectiveIndexOperand(index, &addressing_mode);
+ opcode = GetSeqCstStoreOpcode(store_rep);
+ } else {
+ if ((ElementSizeLog2Of(store_rep.representation()) <
+ kSystemPointerSizeLog2) &&
+ value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ value = value->InputAt(0);
+ }
+
+ addressing_mode = g.GetEffectiveAddressMemoryOperand(
+ node, inputs, &input_count, reg_kind);
+ InstructionOperand value_operand = g.CanBeImmediate(value)
+ ? g.UseImmediate(value)
+ : g.UseRegister(value, reg_kind);
+ inputs[input_count++] = value_operand;
+ opcode = GetStoreOpcode(store_rep);
+ }
+
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs, temp_count, temps);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs, temp_count, temps);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
@@ -1502,8 +1573,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad: {
+ case IrOpcode::kProtectedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1622,15 +1692,12 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
X64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -1770,7 +1837,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Abs);
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1814,7 +1881,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Abs);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
@@ -1822,11 +1889,11 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Neg);
}
void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Neg);
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
@@ -2294,7 +2361,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
// Shared routine for Word32/Word64 Atomic Binops
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2305,14 +2372,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
// Shared routine for Word32/Word64 Atomic CmpExchg
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2324,23 +2392,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-
-// Shared routine for Word32/Word64 Atomic Exchange
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegister(value), g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
@@ -2711,131 +2764,114 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ DCHECK(IsIntegral(load_rep.representation()) ||
+ IsAnyTagged(load_rep.representation()) ||
+ (COMPRESS_POINTERS_BOOL &&
+ CanBeCompressedPointer(load_rep.representation())));
+ DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
+ DCHECK(!load_rep.IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ DCHECK(!atomic_load_params.representation().IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(atomic_load_params.representation()));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 4);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kX64Word64AtomicExchangeUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX64Word64AtomicExchangeUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX64Word64AtomicExchangeUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kX64Word64AtomicExchangeUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 8);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -2856,15 +2892,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2889,14 +2924,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
- kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kX64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3053,6 +3088,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_NARROW_SHIFT_OPCODES(V) \
V(I8x16Shl) \
+ V(I8x16ShrS) \
V(I8x16ShrU)
void InstructionSelector::VisitS128Const(Node* node) {
@@ -3182,19 +3218,19 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
-#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)), \
- arraysize(temps), temps); \
- } else { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand output = \
+ IsSupported(AVX) ? g.UseRegister(node) : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, output, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, output, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
#undef VISIT_SIMD_NARROW_SHIFT
@@ -3257,15 +3293,11 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
}
void InstructionSelector::VisitF64x2Abs(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Abs);
}
void InstructionSelector::VisitF64x2Neg(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Neg);
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
@@ -3274,12 +3306,11 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-#define VISIT_SIMD_QFMOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.UseRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); \
}
VISIT_SIMD_QFMOP(F64x2Qfma)
VISIT_SIMD_QFMOP(F64x2Qfms)
@@ -3321,7 +3352,8 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
+ Emit(kX64I32x4SConvertF32x4,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -3333,19 +3365,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- X64OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
- } else {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
- }
-}
-
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index a864012a7a..1515340503 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -135,7 +135,6 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
bool condition_value;
// If we know the condition we can discard the branch.
if (from_input.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -215,7 +214,6 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* branch;
// If we know the condition we can discard the branch.
if (conditions.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
@@ -410,21 +408,6 @@ bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() {
}
#endif
-void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
- // Check if {branch} is dead because we might have a stale side-table entry.
- if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
- branch->opcode() != IrOpcode::kTrapIf &&
- branch->opcode() != IrOpcode::kTrapUnless) {
- IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op());
- IsSafetyCheck combined_safety =
- CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op()));
- if (branch_safety != combined_safety) {
- NodeProperties::ChangeOp(
- branch, common()->MarkAsSafetyCheck(branch->op(), combined_safety));
- }
- }
-}
-
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 9078c39038..93bacbff7b 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -114,7 +114,6 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
Node* current_condition, Node* current_branch,
bool is_true_branch, bool in_new_block);
- void MarkAsSafetyCheckIfNeeded(Node* branch, Node* node);
Node* dead() const { return dead_; }
Graph* graph() const;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 985a256c57..019f0bc954 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -141,9 +141,8 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
- return NewNode(common()->Branch(hint, is_safety_check), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@@ -1053,7 +1052,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
shared_info_(shared_info),
bytecode_array_(shared_info.GetBytecodeArray()),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value().value()),
+ feedback_vector_(feedback_cell.feedback_vector().value()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -3959,7 +3958,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3971,7 +3970,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -3997,8 +3996,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -4012,8 +4010,7 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 5950541111..e62babccf1 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -100,6 +100,18 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS \
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == loong64 ================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit() | s8.bit() | fp.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
+ f30.bit() | f31.bit()
+
#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 2cbcce236f..d27744072a 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -48,8 +48,7 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ CodeKind kind, const char* name, Builtin builtin)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@@ -57,29 +56,26 @@ CodeAssemblerState::CodeAssemblerState(
Linkage::GetStubCallDescriptor(
zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, CodeKind kind,
- const char* name,
- PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ const char* name, Builtin builtin)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
CallDescriptor::kCanUseRoots),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin)
: raw_assembler_(new RawMachineAssembler(
isolate, zone->New<Graph>(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements(), poisoning_level)),
+ InstructionSelector::AlignmentRequirements())),
kind_(kind),
name_(name),
builtin_(builtin),
@@ -169,10 +165,6 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
return raw_assembler()->machine()->Word32ShiftIsSafe();
}
-PoisoningMitigationLevel CodeAssembler::poisoning_level() const {
- return raw_assembler()->poisoning_level();
-}
-
// static
Handle<Code> CodeAssembler::GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
@@ -187,7 +179,7 @@ Handle<Code> CodeAssembler::GenerateCode(
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_,
rasm->source_positions(), state->kind_, state->name_,
- state->builtin_, rasm->poisoning_level(), options, profile_data)
+ state->builtin_, options, profile_data)
.ToHandleChecked();
state->code_generated_ = true;
@@ -565,15 +557,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(TNode<Object> value) {
- return UncheckedCast<Object>(
- raw_assembler()->TaggedPoisonOnSpeculation(value));
-}
-
-TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
- return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
-}
-
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
@@ -677,45 +660,44 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
-Node* CodeAssembler::Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base) {
+ return raw_assembler()->Load(type, base);
}
-Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, offset, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) {
+ return raw_assembler()->Load(type, base, offset);
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
- LoadSensitivity needs_poisoning) {
- return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base) {
+ return BitcastWordToTagged(Load<RawPtrT>(base));
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning) {
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
// Please use LoadFromObject(MachineType::MapInHeader(), object,
// IntPtrConstant(-kHeapObjectTag)) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
+ return BitcastWordToTagged(Load<RawPtrT>(base, offset));
}
-Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
- TNode<WordT> offset) {
+Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return raw_assembler()->AtomicLoad(type, base, offset);
+ return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
+ offset);
}
template <class Type>
-TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
+TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order,
+ TNode<RawPtrT> base,
TNode<WordT> offset) {
- return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
+ return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(
+ AtomicLoadParameters(MachineType::Uint64(), order), base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset) {
@@ -880,16 +862,22 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
BitcastTaggedToWord(tagged_value));
}
-void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
+void CodeAssembler::AtomicStore(MachineRepresentation rep,
+ AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- raw_assembler()->AtomicStore(rep, base, offset, value);
+ raw_assembler()->AtomicStore(
+ AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value);
}
-void CodeAssembler::AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value,
+void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
TNode<UintPtrT> value_high) {
- raw_assembler()->AtomicStore64(base, offset, value, value_high);
+ raw_assembler()->AtomicStore64(
+ AtomicStoreParameters(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value, value_high);
}
#define ATOMIC_FUNCTION(name) \
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0e6872aa66..7a22086260 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/base/optional.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
@@ -725,47 +726,36 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> LoadFramePointer();
TNode<RawPtrT> LoadParentFramePointer();
- // Poison |value| on speculative paths.
- TNode<Object> TaggedPoisonOnSpeculation(TNode<Object> value);
- TNode<WordT> WordPoisonOnSpeculation(TNode<WordT> value);
-
// Load raw memory location.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base);
template <class Type>
TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK(
IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
}
- Node* Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base, Node* offset);
template <class Type>
- TNode<Type> Load(Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, needs_poisoning));
+ TNode<Type> Load(Node* base) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base));
}
template <class Type>
- TNode<Type> Load(Node* base, TNode<WordT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
+ TNode<Type> Load(Node* base, TNode<WordT> offset) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
+ TNode<Type> AtomicLoad(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset) {
return UncheckedCast<Type>(
- AtomicLoad(MachineTypeOf<Type>::value, base, offset));
+ AtomicLoad(MachineTypeOf<Type>::value, order, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
+ TNode<Type> AtomicLoad64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
- TNode<Object> LoadFullTagged(
- Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- TNode<Object> LoadFullTagged(
- Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<Object> LoadFullTagged(Node* base);
+ TNode<Object> LoadFullTagged(Node* base, TNode<IntPtrT> offset);
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
@@ -822,12 +812,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<HeapObject> object,
int offset, Node* value);
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
- void AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
- TNode<WordT> offset, TNode<Word32T> value);
+ void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset,
+ TNode<Word32T> value);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
- void AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value, TNode<UintPtrT> value_high);
+ void AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
+ TNode<UintPtrT> value_high);
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
TNode<UintPtrT> offset, TNode<Word32T> value);
@@ -1225,7 +1217,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
TNode<Object> CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
@@ -1235,7 +1227,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
Node* ConstructJSWithTarget(Callable const& callable, Node* context,
Node* function, Node* new_target, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
TNode<Code> target = HeapConstant(callable.code());
@@ -1312,7 +1304,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void UnregisterCallGenerationCallbacks();
bool Word32ShiftIsSafe() const;
- PoisoningMitigationLevel poisoning_level() const;
bool IsJSFunctionCall() const;
@@ -1367,7 +1358,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs);
- Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
+ Node* AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset);
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset);
@@ -1595,13 +1587,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin = Builtin::kNoBuiltinId);
+ const char* name, Builtin builtin = Builtin::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin = Builtin::kNoBuiltinId);
~CodeAssemblerState();
@@ -1628,8 +1618,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin);
+ const char* name, Builtin builtin);
void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label);
void PopExceptionHandler();
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index b370a673b9..329ccc7e86 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -28,18 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
- switch (is_safety_check) {
- case IsSafetyCheck::kCriticalSafetyCheck:
- return os << "CriticalSafetyCheck";
- case IsSafetyCheck::kSafetyCheck:
- return os << "SafetyCheck";
- case IsSafetyCheck::kNoSafetyCheck:
- return os << "NoSafetyCheck";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
switch (trap_id) {
#define TRAP_CASE(Name) \
@@ -59,22 +47,12 @@ TrapId TrapIdOf(const Operator* const op) {
return OpParameter<TrapId>(op);
}
-std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
- return os << info.hint << ", " << info.is_safety_check;
-}
-
-const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
- DCHECK_EQ(IrOpcode::kBranch, op->opcode());
- return OpParameter<BranchOperatorInfo>(op);
-}
-
BranchHint BranchHintOf(const Operator* const op) {
switch (op->opcode()) {
- case IrOpcode::kBranch:
- return BranchOperatorInfoOf(op).hint;
case IrOpcode::kIfValue:
return IfValueParametersOf(op).hint();
case IrOpcode::kIfDefault:
+ case IrOpcode::kBranch:
return OpParameter<BranchHint>(op);
default:
UNREACHABLE();
@@ -90,8 +68,7 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback() &&
- lhs.is_safety_check() == rhs.is_safety_check();
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -100,13 +77,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
size_t hash_value(DeoptimizeParameters p) {
FeedbackSource::Hash feebdack_hash;
- return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()),
- p.is_safety_check());
+ return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ", " << p.reason() << ", " << p.is_safety_check()
- << ", " << p.feedback();
+ return os << p.kind() << ", " << p.reason() << ", " << p.feedback();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -117,32 +92,6 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
- if (op->opcode() == IrOpcode::kBranch) {
- return BranchOperatorInfoOf(op).is_safety_check;
- }
- return DeoptimizeParametersOf(op).is_safety_check();
-}
-
-const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(
- const Operator* op, IsSafetyCheck safety_check) {
- if (op->opcode() == IrOpcode::kBranch) {
- BranchOperatorInfo info = BranchOperatorInfoOf(op);
- if (info.is_safety_check == safety_check) return op;
- return Branch(info.hint, safety_check);
- }
- DeoptimizeParameters p = DeoptimizeParametersOf(op);
- if (p.is_safety_check() == safety_check) return op;
- switch (op->opcode()) {
- case IrOpcode::kDeoptimizeIf:
- return DeoptimizeIf(p.kind(), p.reason(), p.feedback(), safety_check);
- case IrOpcode::kDeoptimizeUnless:
- return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(), safety_check);
- default:
- UNREACHABLE();
- }
-}
-
const Operator* CommonOperatorBuilder::DelayedStringConstant(
const StringConstantBase* str) {
return zone()->New<Operator1<const StringConstantBase*>>(
@@ -478,16 +427,10 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
#define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged)
-#define CACHED_BRANCH_LIST(V) \
- V(None, CriticalSafetyCheck) \
- V(True, CriticalSafetyCheck) \
- V(False, CriticalSafetyCheck) \
- V(None, SafetyCheck) \
- V(True, SafetyCheck) \
- V(False, SafetyCheck) \
- V(None, NoSafetyCheck) \
- V(True, NoSafetyCheck) \
- V(False, NoSafetyCheck)
+#define CACHED_BRANCH_LIST(V) \
+ V(None) \
+ V(True) \
+ V(False)
#define CACHED_RETURN_LIST(V) \
V(1) \
@@ -541,28 +484,22 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
-#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero, NoSafetyCheck) \
- V(Eager, DivisionByZero, SafetyCheck) \
- V(Eager, Hole, NoSafetyCheck) \
- V(Eager, Hole, SafetyCheck) \
- V(Eager, MinusZero, NoSafetyCheck) \
- V(Eager, MinusZero, SafetyCheck) \
- V(Eager, Overflow, NoSafetyCheck) \
- V(Eager, Overflow, SafetyCheck) \
- V(Eager, Smi, SafetyCheck)
-
-#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision, NoSafetyCheck) \
- V(Eager, LostPrecision, SafetyCheck) \
- V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
- V(Eager, LostPrecisionOrNaN, SafetyCheck) \
- V(Eager, NotAHeapNumber, SafetyCheck) \
- V(Eager, NotANumberOrOddball, SafetyCheck) \
- V(Eager, NotASmi, SafetyCheck) \
- V(Eager, OutOfBounds, SafetyCheck) \
- V(Eager, WrongInstanceType, SafetyCheck) \
- V(Eager, WrongMap, SafetyCheck)
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(Eager, DivisionByZero) \
+ V(Eager, Hole) \
+ V(Eager, MinusZero) \
+ V(Eager, Overflow) \
+ V(Eager, Smi)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(Eager, LostPrecision) \
+ V(Eager, LostPrecisionOrNaN) \
+ V(Eager, NotAHeapNumber) \
+ V(Eager, NotANumberOrOddball) \
+ V(Eager, NotASmi) \
+ V(Eager, OutOfBounds) \
+ V(Eager, WrongInstanceType) \
+ V(Eager, WrongMap)
#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
V(DynamicCheckMaps) \
@@ -668,18 +605,17 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
- template <BranchHint hint, IsSafetyCheck is_safety_check>
- struct BranchOperator final : public Operator1<BranchOperatorInfo> {
+ template <BranchHint hint>
+ struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
- : Operator1<BranchOperatorInfo>( // --
- IrOpcode::kBranch, Operator::kKontrol, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- BranchOperatorInfo{hint, is_safety_check}) {} // parameter
+ : Operator1<BranchHint>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ hint) {} // parameter
};
-#define CACHED_BRANCH(Hint, IsCheck) \
- BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
- kBranch##Hint##IsCheck##Operator;
+#define CACHED_BRANCH(Hint) \
+ BranchOperator<BranchHint::k##Hint> kBranch##Hint##Operator;
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -757,8 +693,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- IsSafetyCheck::kNoSafetyCheck)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -766,8 +701,7 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -775,18 +709,15 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -795,14 +726,12 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
+ DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -815,8 +744,7 @@ struct CommonOperatorGlobalCache final {
"DynamicCheckMapsWithDeoptUnless", // name
6, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
- FeedbackSource(),
- IsSafetyCheck::kCriticalSafetyCheck)) {}
+ FeedbackSource())) {}
};
#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
@@ -985,12 +913,10 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) {
1, 0, source);
}
-const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
- IsSafetyCheck is_safety_check) {
-#define CACHED_BRANCH(Hint, IsCheck) \
- if (hint == BranchHint::k##Hint && \
- is_safety_check == IsSafetyCheck::k##IsCheck) { \
- return &cache_.kBranch##Hint##IsCheck##Operator; \
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
+#define CACHED_BRANCH(Hint) \
+ if (hint == BranchHint::k##Hint) { \
+ return &cache_.kBranch##Hint##Operator; \
}
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -1008,8 +934,7 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback,
- IsSafetyCheck::kNoSafetyCheck);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1020,17 +945,16 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1041,17 +965,16 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1664,17 +1587,6 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op) {
return OpParameter<FrameStateInfo>(op);
}
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck a, IsSafetyCheck b) {
- if (a == IsSafetyCheck::kCriticalSafetyCheck ||
- b == IsSafetyCheck::kCriticalSafetyCheck) {
- return IsSafetyCheck::kCriticalSafetyCheck;
- }
- if (a == IsSafetyCheck::kSafetyCheck || b == IsSafetyCheck::kSafetyCheck) {
- return IsSafetyCheck::kSafetyCheck;
- }
- return IsSafetyCheck::kNoSafetyCheck;
-}
-
#undef COMMON_CACHED_OP_LIST
#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index fa49d3b992..f691c1fbf4 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -51,20 +51,6 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
-enum class IsSafetyCheck : uint8_t {
- kCriticalSafetyCheck,
- kSafetyCheck,
- kNoSafetyCheck
-};
-
-// Get the more critical safety check of the two arguments.
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck, IsSafetyCheck);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
-inline size_t hash_value(IsSafetyCheck is_safety_check) {
- return static_cast<size_t>(is_safety_check);
-}
-
enum class TrapId : uint32_t {
#define DEF_ENUM(Name, ...) k##Name,
FOREACH_WASM_TRAPREASON(DEF_ENUM)
@@ -78,24 +64,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id);
TrapId TrapIdOf(const Operator* const op);
-struct BranchOperatorInfo {
- BranchHint hint;
- IsSafetyCheck is_safety_check;
-};
-
-inline size_t hash_value(const BranchOperatorInfo& info) {
- return base::hash_combine(info.hint, info.is_safety_check);
-}
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
-
-inline bool operator==(const BranchOperatorInfo& a,
- const BranchOperatorInfo& b) {
- return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
-}
-
-V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
- const Operator* const) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
V8_WARN_UNUSED_RESULT;
@@ -106,23 +74,17 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check)
- : kind_(kind),
- reason_(reason),
- feedback_(feedback),
- is_safety_check_(is_safety_check) {}
+ FeedbackSource const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const FeedbackSource& feedback() const { return feedback_; }
- IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
FeedbackSource const feedback_;
- IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -135,8 +97,6 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const)
V8_WARN_UNUSED_RESULT;
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-
class SelectParameters final {
public:
explicit SelectParameters(MachineRepresentation representation,
@@ -479,8 +439,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Unreachable();
const Operator* StaticAssert(const char* source);
const Operator* End(size_t control_input_count);
- const Operator* Branch(BranchHint = BranchHint::kNone,
- IsSafetyCheck = IsSafetyCheck::kSafetyCheck);
+ const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@@ -492,14 +451,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
- const Operator* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- const Operator* DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
// DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
// the condition is false, which may then either deoptimize or resume
// execution.
@@ -577,9 +532,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const wasm::FunctionSig* signature);
#endif // V8_ENABLE_WEBASSEMBLY
- const Operator* MarkAsSafetyCheck(const Operator* op,
- IsSafetyCheck safety_check);
-
const Operator* DelayedStringConstant(const StringConstantBase* str);
private:
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index dc2db32753..27720c80ed 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -5,7 +5,6 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/base/optional.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -19,18 +18,84 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define DEPENDENCY_LIST(V) \
+ V(ConsistentJSFunctionView) \
+ V(ConstantInDictionaryPrototypeChain) \
+ V(ElementsKind) \
+ V(FieldConstness) \
+ V(FieldRepresentation) \
+ V(FieldType) \
+ V(GlobalProperty) \
+ V(InitialMap) \
+ V(InitialMapInstanceSizePrediction) \
+ V(OwnConstantDataProperty) \
+ V(OwnConstantDictionaryProperty) \
+ V(OwnConstantElement) \
+ V(PretenureMode) \
+ V(Protector) \
+ V(PrototypeProperty) \
+ V(StableMap) \
+ V(Transition)
+
CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {
broker->set_dependencies(this);
}
+namespace {
+
+enum CompilationDependencyKind {
+#define V(Name) k##Name,
+ DEPENDENCY_LIST(V)
+#undef V
+};
+
+#define V(Name) class Name##Dependency;
+DEPENDENCY_LIST(V)
+#undef V
+
+const char* CompilationDependencyKindToString(CompilationDependencyKind kind) {
+#define V(Name) #Name "Dependency",
+ static const char* const names[] = {DEPENDENCY_LIST(V)};
+#undef V
+ return names[kind];
+}
+
+} // namespace
+
+class CompilationDependency : public ZoneObject {
+ public:
+ explicit CompilationDependency(CompilationDependencyKind kind) : kind(kind) {}
+
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(Handle<Code> code) const = 0;
+
+#ifdef DEBUG
+#define V(Name) \
+ bool Is##Name() const { return kind == k##Name; } \
+ V8_ALLOW_UNUSED const Name##Dependency* As##Name() const;
+ DEPENDENCY_LIST(V)
+#undef V
+#endif
+
+ const char* ToString() const {
+ return CompilationDependencyKindToString(kind);
+ }
+
+ const CompilationDependencyKind kind;
+};
+
+namespace {
+
class InitialMapDependency final : public CompilationDependency {
public:
InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function,
const MapRef& initial_map)
- : function_(function), initial_map_(initial_map) {
- }
+ : CompilationDependency(kInitialMap),
+ function_(function),
+ initial_map_(initial_map) {}
bool IsValid() const override {
Handle<JSFunction> function = function_.object();
@@ -55,7 +120,9 @@ class PrototypePropertyDependency final : public CompilationDependency {
PrototypePropertyDependency(JSHeapBroker* broker,
const JSFunctionRef& function,
const ObjectRef& prototype)
- : function_(function), prototype_(prototype) {
+ : CompilationDependency(kPrototypeProperty),
+ function_(function),
+ prototype_(prototype) {
DCHECK(function_.has_instance_prototype(broker->dependencies()));
DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies()));
DCHECK(function_.instance_prototype(broker->dependencies())
@@ -92,7 +159,8 @@ class PrototypePropertyDependency final : public CompilationDependency {
class StableMapDependency final : public CompilationDependency {
public:
- explicit StableMapDependency(const MapRef& map) : map_(map) {}
+ explicit StableMapDependency(const MapRef& map)
+ : CompilationDependency(kStableMap), map_(map) {}
bool IsValid() const override {
// TODO(v8:11670): Consider turn this back into a CHECK inside the
@@ -117,7 +185,8 @@ class ConstantInDictionaryPrototypeChainDependency final
explicit ConstantInDictionaryPrototypeChainDependency(
const MapRef receiver_map, const NameRef property_name,
const ObjectRef constant, PropertyKind kind)
- : receiver_map_(receiver_map),
+ : CompilationDependency(kConstantInDictionaryPrototypeChain),
+ receiver_map_(receiver_map),
property_name_{property_name},
constant_{constant},
kind_{kind} {
@@ -240,7 +309,8 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
const MapRef& map,
Representation representation,
FieldIndex index, const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDataProperty),
+ broker_(broker),
holder_(holder),
map_(map),
representation_(representation),
@@ -294,7 +364,8 @@ class OwnConstantDictionaryPropertyDependency final
const JSObjectRef& holder,
InternalIndex index,
const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDictionaryProperty),
+ broker_(broker),
holder_(holder),
map_(holder.map()),
index_(index),
@@ -345,7 +416,7 @@ class OwnConstantDictionaryPropertyDependency final
class ConsistentJSFunctionViewDependency final : public CompilationDependency {
public:
explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function)
- : function_(function) {}
+ : CompilationDependency(kConsistentJSFunctionView), function_(function) {}
bool IsValid() const override {
return function_.IsConsistentWithHeapState();
@@ -353,17 +424,14 @@ class ConsistentJSFunctionViewDependency final : public CompilationDependency {
void Install(Handle<Code> code) const override {}
-#ifdef DEBUG
- bool IsConsistentJSFunctionViewDependency() const override { return true; }
-#endif
-
private:
const JSFunctionRef function_;
};
class TransitionDependency final : public CompilationDependency {
public:
- explicit TransitionDependency(const MapRef& map) : map_(map) {
+ explicit TransitionDependency(const MapRef& map)
+ : CompilationDependency(kTransition), map_(map) {
DCHECK(map_.CanBeDeprecated());
}
@@ -383,7 +451,9 @@ class PretenureModeDependency final : public CompilationDependency {
public:
PretenureModeDependency(const AllocationSiteRef& site,
AllocationType allocation)
- : site_(site), allocation_(allocation) {}
+ : CompilationDependency(kPretenureMode),
+ site_(site),
+ allocation_(allocation) {}
bool IsValid() const override {
return allocation_ == site_.object()->GetAllocationType();
@@ -396,10 +466,6 @@ class PretenureModeDependency final : public CompilationDependency {
DependentCode::kAllocationSiteTenuringChangedGroup);
}
-#ifdef DEBUG
- bool IsPretenureModeDependency() const override { return true; }
-#endif
-
private:
AllocationSiteRef site_;
AllocationType allocation_;
@@ -409,7 +475,10 @@ class FieldRepresentationDependency final : public CompilationDependency {
public:
FieldRepresentationDependency(const MapRef& map, InternalIndex descriptor,
Representation representation)
- : map_(map), descriptor_(descriptor), representation_(representation) {}
+ : CompilationDependency(kFieldRepresentation),
+ map_(map),
+ descriptor_(descriptor),
+ representation_(representation) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -433,12 +502,9 @@ class FieldRepresentationDependency final : public CompilationDependency {
DependentCode::kFieldRepresentationGroup);
}
-#ifdef DEBUG
- bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const override {
+ bool DependsOn(const Handle<Map>& receiver_map) const {
return map_.object().equals(receiver_map);
}
-#endif
private:
MapRef map_;
@@ -450,7 +516,10 @@ class FieldTypeDependency final : public CompilationDependency {
public:
FieldTypeDependency(const MapRef& map, InternalIndex descriptor,
const ObjectRef& type)
- : map_(map), descriptor_(descriptor), type_(type) {}
+ : CompilationDependency(kFieldType),
+ map_(map),
+ descriptor_(descriptor),
+ type_(type) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -481,7 +550,9 @@ class FieldTypeDependency final : public CompilationDependency {
class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& map, InternalIndex descriptor)
- : map_(map), descriptor_(descriptor) {}
+ : CompilationDependency(kFieldConstness),
+ map_(map),
+ descriptor_(descriptor) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -515,7 +586,10 @@ class GlobalPropertyDependency final : public CompilationDependency {
public:
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
- : cell_(cell), type_(type), read_only_(read_only) {
+ : CompilationDependency(kGlobalProperty),
+ cell_(cell),
+ type_(type),
+ read_only_(read_only) {
DCHECK_EQ(type_, cell_.property_details().cell_type());
DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
@@ -545,7 +619,8 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
- explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {}
+ explicit ProtectorDependency(const PropertyCellRef& cell)
+ : CompilationDependency(kProtector), cell_(cell) {}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
@@ -565,7 +640,7 @@ class ProtectorDependency final : public CompilationDependency {
class ElementsKindDependency final : public CompilationDependency {
public:
ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
- : site_(site), kind_(kind) {
+ : CompilationDependency(kElementsKind), site_(site), kind_(kind) {
DCHECK(AllocationSite::ShouldTrack(kind_));
}
@@ -596,7 +671,10 @@ class OwnConstantElementDependency final : public CompilationDependency {
public:
OwnConstantElementDependency(const JSObjectRef& holder, uint32_t index,
const ObjectRef& element)
- : holder_(holder), index_(index), element_(element) {}
+ : CompilationDependency(kOwnConstantElement),
+ holder_(holder),
+ index_(index),
+ element_(element) {}
bool IsValid() const override {
DisallowGarbageCollection no_gc;
@@ -624,7 +702,9 @@ class InitialMapInstanceSizePredictionDependency final
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
- : function_(function), instance_size_(instance_size) {}
+ : CompilationDependency(kInitialMapInstanceSizePrediction),
+ function_(function),
+ instance_size_(instance_size) {}
bool IsValid() const override {
// The dependency is valid if the prediction is the same as the current
@@ -651,6 +731,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
+} // namespace
+
void CompilationDependencies::RecordDependency(
CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
@@ -795,9 +877,19 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty(
broker_, holder, index, value));
}
+V8_INLINE void TraceInvalidCompilationDependency(
+ const CompilationDependency* d) {
+ DCHECK(FLAG_trace_compilation_dependencies);
+ DCHECK(!d->IsValid());
+ PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
+}
+
bool CompilationDependencies::Commit(Handle<Code> code) {
for (auto dep : dependencies_) {
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -812,6 +904,9 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// can call EnsureHasInitialMap, which can invalidate a StableMapDependency
// on the prototype object's map.
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -838,8 +933,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
#ifdef DEBUG
for (auto dep : dependencies_) {
CHECK_IMPLIES(!dep->IsValid(),
- dep->IsPretenureModeDependency() ||
- dep->IsConsistentJSFunctionViewDependency());
+ dep->IsPretenureMode() || dep->IsConsistentJSFunctionView());
}
#endif
@@ -848,6 +942,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
}
namespace {
+
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
@@ -862,8 +957,19 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
if (last_prototype.has_value() && proto.equals(*last_prototype)) break;
}
}
+
} // namespace
+#ifdef DEBUG
+#define V(Name) \
+ const Name##Dependency* CompilationDependency::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return static_cast<const Name##Dependency*>(this); \
+ }
+DEPENDENCY_LIST(V)
+#undef V
+#endif // DEBUG
+
void CompilationDependencies::DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype) {
@@ -944,6 +1050,17 @@ CompilationDependencies::FieldTypeDependencyOffTheRecord(
return zone_->New<FieldTypeDependency>(map, descriptor, type);
}
+#ifdef DEBUG
+// static
+bool CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map) {
+ return dep->IsFieldRepresentation() &&
+ dep->AsFieldRepresentation()->DependsOn(receiver_map);
+}
+#endif // DEBUG
+
+#undef DEPENDENCY_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index be507c6843..f4b49878c8 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -154,6 +154,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
const MapRef& map, InternalIndex descriptor,
const ObjectRef& /* Contains a FieldType underneath. */ type) const;
+#ifdef DEBUG
+ static bool IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map);
+#endif // DEBUG
+
private:
Zone* const zone_;
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
deleted file mode 100644
index 852c7b7640..0000000000
--- a/deps/v8/src/compiler/compilation-dependency.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
-#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
-
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class MaybeObjectHandle;
-
-namespace compiler {
-
-class CompilationDependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(Handle<Code> code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
- virtual bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const {
- return false;
- }
- virtual bool IsConsistentJSFunctionViewDependency() const { return false; }
-#endif
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/deps/v8/src/compiler/decompression-optimizer.cc b/deps/v8/src/compiler/decompression-optimizer.cc
index 79e77fcee6..c0068489f7 100644
--- a/deps/v8/src/compiler/decompression-optimizer.cc
+++ b/deps/v8/src/compiler/decompression-optimizer.cc
@@ -15,8 +15,7 @@ namespace {
bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
- return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad ||
- opcode == IrOpcode::kProtectedLoad ||
+ return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable;
}
@@ -212,10 +211,6 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
NodeProperties::ChangeOp(node,
machine()->LoadImmutable(compressed_load_rep));
break;
- case IrOpcode::kPoisonedLoad:
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(compressed_load_rep));
- break;
case IrOpcode::kProtectedLoad:
NodeProperties::ChangeOp(node,
machine()->ProtectedLoad(compressed_load_rep));
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d7a0ca62dd..83eb6c215c 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -36,7 +36,6 @@ namespace internal {
namespace compiler {
enum class MaintainSchedule { kMaintain, kDiscard };
-enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
class EffectControlLinearizer {
public:
@@ -44,13 +43,11 @@ class EffectControlLinearizer {
JSGraphAssembler* graph_assembler, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
MaintainSchedule maintain_schedule,
JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
- mask_array_index_(mask_array_index),
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
@@ -80,7 +77,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
- Node* LowerPoisonIndex(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
void LowerDynamicCheckMaps(Node* node, Node* frame_state);
@@ -338,7 +334,6 @@ class EffectControlLinearizer {
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
- MaskArrayIndexEnable mask_array_index_;
MaintainSchedule maintain_schedule_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
@@ -966,9 +961,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
- case IrOpcode::kPoisonIndex:
- result = LowerPoisonIndex(node);
- break;
case IrOpcode::kCheckClosure:
result = LowerCheckClosure(node, frame_state);
break;
@@ -1788,14 +1780,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
- Node* index = node->InputAt(0);
- if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) {
- index = __ Word32PoisonOnSpeculation(index);
- }
- return index;
-}
-
Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* frame_state) {
Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
@@ -1831,8 +1815,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Word32And(bitfield3,
__ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
@@ -1842,7 +1825,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
__ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
- check, frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ check, frame_state);
}
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
@@ -1886,7 +1869,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ TaggedEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -1908,7 +1891,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -2528,8 +2511,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* check = __ Uint32LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -2574,8 +2557,8 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* check = __ Uint64LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -3696,9 +3679,14 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
- return ChangeIntPtrToSmi(
+ Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
+ return arguments_length;
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
@@ -3711,6 +3699,10 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -4263,12 +4255,10 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
#endif // V8_INTL_SUPPORT
@@ -5776,8 +5766,7 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
- return __ LoadElement(AccessBuilder::ForTypedArrayElement(
- array_type, true, LoadSensitivity::kCritical),
+ return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
data_ptr, index);
}
@@ -6796,26 +6785,13 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
#undef __
-namespace {
-
-MaskArrayIndexEnable MaskArrayForPoisonLevel(
- PoisoningMitigationLevel poison_level) {
- return (poison_level != PoisoningMitigationLevel::kDontPoison)
- ? MaskArrayIndexEnable::kMaskArrayIndex
- : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
-}
-
-} // namespace
-
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kDiscard, broker);
linearizer.Run();
}
@@ -6824,16 +6800,13 @@ void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
schedule);
EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kMaintain, broker);
- MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler,
- poison_level);
+ MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler);
SelectLowering select_lowering(&graph_assembler, js_graph->graph());
graph_assembler.AddInlineReducer(&memory_lowering);
graph_assembler.AddInlineReducer(&select_lowering);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index fca4899263..97467391e2 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -26,7 +26,7 @@ class JSHeapBroker;
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
// Performs effect control linearization lowering in addition to machine
// lowering, producing a scheduled graph that is ready for instruction
@@ -34,7 +34,7 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl(
V8_EXPORT_PRIVATE void LowerToMachineSchedule(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index bbc2049ae5..c5199f1e64 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -214,8 +214,11 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
ContinuationFrameStateMode mode) {
// Depending on {mode}, final parameters are added by the deoptimizer
// and aren't explicitly passed in the frame state.
- DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1, // add receiver
- stack_parameter_count + DeoptimizerParameterCountFor(mode));
+ DCHECK_EQ(
+ Builtins::GetStackParameterCount(name) +
+ (kJSArgcIncludesReceiver ? 0
+ : 1), // Add receiver if it is not included.
+ stack_parameter_count + DeoptimizerParameterCountFor(mode));
Node* argc = jsgraph->Constant(Builtins::GetStackParameterCount(name));
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 26ae88362d..6bfd6f8c22 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -829,46 +829,36 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) {
effect(), control()));
}
-Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
- return AddNode(graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
- effect(), control()));
-}
-
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeKind::kEager, reason,
- feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeIf(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
+ Node* condition, Node* frame_state) {
return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
- frame_state, is_safety_check);
+ frame_state);
}
Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
@@ -924,8 +914,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint,
- IsSafetyCheck::kCriticalSafetyCheck);
+ BranchImpl(condition, if_true, if_false, hint);
}
void GraphAssembler::RecordBranchInBlockUpdater(Node* branch,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 5efe6dd9c3..c9ddd63e71 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -330,24 +330,16 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* Word32PoisonOnSpeculation(Node* value);
-
- Node* DeoptimizeIf(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
Node* map, Node* handler,
Node* feedback_vector,
@@ -557,7 +549,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check, Vars...);
+ BranchHint hint, Vars...);
void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control,
Node* if_false_control,
BasicBlock* if_true_block,
@@ -742,8 +734,7 @@ void GraphAssembler::Branch(Node* condition,
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
@@ -751,20 +742,17 @@ void GraphAssembler::BranchWithHint(
Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
Vars... vars) {
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check,
- Vars... vars) {
+ BranchHint hint, Vars... vars) {
DCHECK_NOT_NULL(control());
- Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check),
- condition, control());
+ Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
Node* if_true_control = control_ =
graph()->NewNode(common()->IfTrue(), branch);
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index 1688a14a04..c246430de2 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -14,7 +14,6 @@
#include "src/base/platform/platform.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/protectors-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -41,7 +40,7 @@ namespace compiler {
//
// kBackgroundSerializedHeapObject: The underlying V8 object is a HeapObject
// and the data is an instance of the corresponding (most-specific) subclass,
-// e.g. JSFunctionData, which provides serialized information about the
+// e.g. JSFunctionData, which provides serialized information about the
// object. Allows serialization from the background thread.
//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
@@ -257,13 +256,9 @@ bool PropertyCellData::Cache(JSHeapBroker* broker) {
}
}
- if (property_details.cell_type() == PropertyCellType::kConstant) {
- Handle<Object> value_again =
- broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
- if (*value != *value_again) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
+ if (property_details.cell_type() == PropertyCellType::kInTransition) {
+ DCHECK(!broker->IsMainThread());
+ return false;
}
ObjectData* value_data = broker->TryGetOrCreateData(value);
@@ -317,17 +312,6 @@ class JSObjectData : public JSReceiverData {
return object_create_map_;
}
- ObjectData* GetOwnConstantElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnFastDataProperty(
- JSHeapBroker* broker, Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy);
-
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
@@ -349,21 +333,6 @@ class JSObjectData : public JSReceiverData {
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object as non-writable and
- // non-configurable, or (2) are known not to (possibly they don't exist at
- // all). In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
- // Properties that either:
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- // For simplicity, this may in theory overlap with inobject_fields_.
- // For fast mode objects, the keys of the map are the property_index() values
- // of the respective property FieldIndex'es. For slow mode objects, the keys
- // are the dictionary indicies.
- ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
@@ -390,18 +359,6 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
namespace {
-base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
- Handle<Object> receiver,
- uint32_t index,
- bool constant_only) {
- LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA &&
- (!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
- return MakeRef(broker, it.GetDataValue());
- }
- return base::nullopt;
-}
-
base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
JSHeapBroker* broker, JSObjectRef holder, Representation representation,
FieldIndex field_index) {
@@ -496,70 +453,6 @@ base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap(
} // namespace
-ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
- uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_constant_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, true);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_constant_elements_.push_back({index, result});
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
- Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(field_index.property_index());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about fast property with index "
- << field_index.property_index() << " on "
- << this);
- return nullptr;
- }
-
- // This call will always succeed on the main thread.
- CHECK(broker->IsMainThread());
- JSObjectRef object_ref = MakeRef(broker, Handle<JSObject>::cast(object()));
- ObjectRef property = GetOwnFastDataPropertyFromHeap(
- broker, object_ref, representation, field_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(field_index.property_index(), result));
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(dict_index.as_int());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about dictionary property with index "
- << dict_index.as_int() << " on " << this);
- return nullptr;
- }
-
- ObjectRef property = GetOwnDictionaryPropertyFromHeap(
- broker, Handle<JSObject>::cast(object()), dict_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(dict_index.as_int(), result));
- return result;
-}
-
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -625,28 +518,6 @@ class JSBoundFunctionData : public JSObjectData {
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
-
- bool Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
-
- ObjectData* bound_target_function() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_target_function_;
- }
- ObjectData* bound_this() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_this_;
- }
- ObjectData* bound_arguments() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_arguments_;
- }
-
- private:
- bool serialized_ = false;
-
- ObjectData* bound_target_function_ = nullptr;
- ObjectData* bound_this_ = nullptr;
- ObjectData* bound_arguments_ = nullptr;
};
class JSFunctionData : public JSObjectData {
@@ -659,10 +530,6 @@ class JSFunctionData : public JSObjectData {
bool IsConsistentWithHeapState(JSHeapBroker* broker) const;
- bool has_feedback_vector() const {
- DCHECK(serialized_);
- return has_feedback_vector_;
- }
bool has_initial_map() const {
DCHECK(serialized_);
return has_initial_map_;
@@ -680,10 +547,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return context_;
}
- ObjectData* native_context() const {
- DCHECK(serialized_);
- return native_context_;
- }
MapData* initial_map() const {
DCHECK(serialized_);
return initial_map_;
@@ -700,10 +563,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return feedback_cell_;
}
- ObjectData* feedback_vector() const {
- DCHECK(serialized_);
- return feedback_vector_;
- }
int initial_map_instance_size_with_min_slack() const {
DCHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -740,19 +599,16 @@ class JSFunctionData : public JSObjectData {
using UsedFields = base::Flags<UsedField>;
UsedFields used_fields_;
- bool has_feedback_vector_ = false;
ObjectData* prototype_or_initial_map_ = nullptr;
bool has_initial_map_ = false;
bool has_instance_prototype_ = false;
bool PrototypeRequiresRuntimeLookup_ = false;
ObjectData* context_ = nullptr;
- ObjectData* native_context_ = nullptr; // Derives from context_.
MapData* initial_map_ = nullptr; // Derives from prototype_or_initial_map_.
ObjectData* instance_prototype_ =
nullptr; // Derives from prototype_or_initial_map_.
ObjectData* shared_ = nullptr;
- ObjectData* feedback_vector_ = nullptr; // Derives from feedback_cell.
ObjectData* feedback_cell_ = nullptr;
int initial_map_instance_size_with_min_slack_; // Derives from
// prototype_or_initial_map_.
@@ -809,10 +665,6 @@ class MapData : public HeapObjectData {
return is_abandoned_prototype_map_;
}
- // Extra information.
- void SerializeRootMap(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* FindRootMap() const;
-
void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
@@ -840,8 +692,7 @@ class MapData : public HeapObjectData {
bool has_extra_serialized_data() const {
return serialized_constructor_ || serialized_backpointer_ ||
- serialized_prototype_ || serialized_root_map_ ||
- serialized_for_element_store_;
+ serialized_prototype_ || serialized_for_element_store_;
}
private:
@@ -881,9 +732,6 @@ class MapData : public HeapObjectData {
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
- bool serialized_root_map_ = false;
- ObjectData* root_map_ = nullptr;
-
bool serialized_for_element_store_ = false;
};
@@ -938,16 +786,13 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
// guaranteed to see an initialized JSFunction object, and after
// initialization fields remain in a valid state.
- Context context = function->context(kRelaxedLoad);
- context_ = broker->GetOrCreateData(context, kAssumeMemoryFence);
- CHECK(context_->IsContext());
+ ContextRef context =
+ MakeRefAssumeMemoryFence(broker, function->context(kRelaxedLoad));
+ context_ = context.data();
- native_context_ = broker->GetOrCreateData(context.map().native_context(),
- kAssumeMemoryFence);
- CHECK(native_context_->IsNativeContext());
-
- SharedFunctionInfo shared = function->shared(kRelaxedLoad);
- shared_ = broker->GetOrCreateData(shared, kAssumeMemoryFence);
+ SharedFunctionInfoRef shared =
+ MakeRefAssumeMemoryFence(broker, function->shared(kRelaxedLoad));
+ shared_ = shared.data();
if (function->has_prototype_slot()) {
prototype_or_initial_map_ = broker->GetOrCreateData(
@@ -981,9 +826,10 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
if (has_initial_map_) {
has_instance_prototype_ = true;
- instance_prototype_ = broker->GetOrCreateData(
- Handle<Map>::cast(initial_map_->object())->prototype(),
- kAssumeMemoryFence);
+ instance_prototype_ =
+ MakeRefAssumeMemoryFence(
+ broker, Handle<Map>::cast(initial_map_->object())->prototype())
+ .data();
} else if (prototype_or_initial_map_->IsHeapObject() &&
!Handle<HeapObject>::cast(prototype_or_initial_map_->object())
->IsTheHole()) {
@@ -994,15 +840,9 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
PrototypeRequiresRuntimeLookup_ = function->PrototypeRequiresRuntimeLookup();
- FeedbackCell feedback_cell = function->raw_feedback_cell(kAcquireLoad);
- feedback_cell_ = broker->GetOrCreateData(feedback_cell, kAssumeMemoryFence);
-
- ObjectData* maybe_feedback_vector = broker->GetOrCreateData(
- feedback_cell.value(kAcquireLoad), kAssumeMemoryFence);
- if (shared.is_compiled() && maybe_feedback_vector->IsFeedbackVector()) {
- has_feedback_vector_ = true;
- feedback_vector_ = maybe_feedback_vector;
- }
+ FeedbackCellRef feedback_cell = MakeRefAssumeMemoryFence(
+ broker, function->raw_feedback_cell(kAcquireLoad));
+ feedback_cell_ = feedback_cell.data();
#ifdef DEBUG
serialized_ = true;
@@ -1016,7 +856,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
Handle<JSFunction> f = Handle<JSFunction>::cast(object());
CHECK_EQ(*context_->object(), f->context());
- CHECK_EQ(*native_context_->object(), f->native_context());
CHECK_EQ(*shared_->object(), f->shared());
if (f->has_prototype_slot()) {
@@ -1080,22 +919,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
return false;
}
- if (has_used_field(kHasFeedbackVector) &&
- has_feedback_vector_ != f->has_feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::has_feedback_vector");
- return false;
- }
-
- if (has_feedback_vector_) {
- if (has_used_field(kFeedbackVector) &&
- *feedback_vector_->object() != f->feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::feedback_vector");
- return false;
- }
- } else {
- DCHECK_NULL(feedback_vector_);
- }
-
return true;
}
@@ -1269,61 +1092,16 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
-bool JSBoundFunctionData::Serialize(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- DCHECK(!broker->is_concurrent_inlining());
-
- if (serialized_) return true;
- if (broker->StackHasOverflowed()) return false;
-
- TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
- Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
-
- // We don't immediately set {serialized_} in order to correctly handle the
- // case where a recursive call to this method reaches the stack limit.
-
- DCHECK_NULL(bound_target_function_);
- bound_target_function_ =
- broker->GetOrCreateData(function->bound_target_function());
- bool serialized_nested = true;
- if (!bound_target_function_->should_access_heap()) {
- if (bound_target_function_->IsJSBoundFunction()) {
- serialized_nested =
- bound_target_function_->AsJSBoundFunction()->Serialize(broker, tag);
- }
- }
- if (!serialized_nested) {
- // We couldn't serialize all nested bound functions due to stack
- // overflow. Give up.
- DCHECK(!serialized_);
- bound_target_function_ = nullptr; // Reset to sync with serialized_.
- return false;
- }
-
- serialized_ = true;
-
- DCHECK_NULL(bound_arguments_);
- bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
-
- DCHECK_NULL(bound_this_);
- bound_this_ = broker->GetOrCreateData(function->bound_this());
-
- return true;
-}
-
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object, ObjectDataKind kind)
: JSReceiverData(broker, storage, object, kind),
- inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()),
- own_properties_(broker->zone()) {}
+ inobject_fields_(broker->zone()) {}
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- own_elements_(broker->zone()) {}
+ : JSObjectData(broker, storage, object, kind) {}
void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* length() const {
@@ -1331,19 +1109,9 @@ class JSArrayData : public JSObjectData {
return length_;
}
- ObjectData* GetOwnElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
private:
bool serialized_ = false;
ObjectData* length_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
};
void JSArrayData::Serialize(JSHeapBroker* broker,
@@ -1358,52 +1126,11 @@ void JSArrayData::Serialize(JSHeapBroker* broker,
length_ = broker->GetOrCreateData(jsarray->length());
}
-ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, false);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_elements_.push_back({index, result});
- return result;
-}
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- properties_(broker->zone()) {
- if (!broker->is_concurrent_inlining()) {
- is_detached_ = object->IsDetached();
- }
- }
-
- bool IsDetached() const {
- return is_detached_;
- }
-
- ObjectData* GetPropertyCell(
- JSHeapBroker* broker, ObjectData* name,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- // Only valid if not concurrent inlining.
- bool is_detached_ = false;
-
- // Properties that either
- // (1) are known to exist as property cells on the global object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
+ : JSObjectData(broker, storage, object, kind) {}
};
class JSGlobalProxyData : public JSObjectData {
@@ -1413,46 +1140,6 @@ class JSGlobalProxyData : public JSObjectData {
: JSObjectData(broker, storage, object, kind) {}
};
-namespace {
-
-base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
- Handle<Name> name) {
- base::Optional<PropertyCell> maybe_cell =
- ConcurrentLookupIterator::TryGetPropertyCell(
- broker->isolate(), broker->local_isolate_or_isolate(),
- broker->target_native_context().global_object().object(), name);
- if (!maybe_cell.has_value()) return {};
- return TryMakeRef(broker, *maybe_cell);
-}
-
-} // namespace
-
-ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
- ObjectData* name,
- SerializationPolicy policy) {
- CHECK_NOT_NULL(name);
- for (auto const& p : properties_) {
- if (p.first == name) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about global property " << name);
- return nullptr;
- }
-
- ObjectData* result = nullptr;
- base::Optional<PropertyCellRef> cell =
- GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
- if (cell.has_value()) {
- result = cell->data();
- if (!result->should_access_heap()) {
- result->AsPropertyCell()->Cache(broker);
- }
- }
- properties_.push_back({name, result});
- return result;
-}
-
#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
@@ -1540,19 +1227,6 @@ bool MapData::TrySerializePrototype(JSHeapBroker* broker,
return true;
}
-void MapData::SerializeRootMap(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_root_map_) return;
- serialized_root_map_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeRootMap");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(root_map_);
- root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
-}
-
-ObjectData* MapData::FindRootMap() const { return root_map_; }
-
bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
NotConcurrentInliningTag tag,
int max_depth) {
@@ -1693,8 +1367,6 @@ void JSHeapBroker::InitializeAndStartSerializing() {
SetTargetNativeContextRef(target_native_context().object());
if (!is_concurrent_inlining()) {
- target_native_context().Serialize(NotConcurrentInliningTag{this});
-
Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
@@ -1838,6 +1510,19 @@ int ObjectRef::AsSmi() const {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
+bool MapRef::CanInlineElementAccess() const {
+ if (!IsJSObjectMap()) return false;
+ if (is_access_check_needed()) return false;
+ if (has_indexed_interceptor()) return false;
+ ElementsKind kind = elements_kind();
+ if (IsFastElementsKind(kind)) return true;
+ if (IsTypedArrayElementsKind(kind) && kind != BIGUINT64_ELEMENTS &&
+ kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
+ return false;
+}
+
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
const ElementsKind current_kind = elements_kind();
if (kind == current_kind) return *this;
@@ -1931,6 +1616,11 @@ void RecordConsistentJSFunctionViewDependencyIfNeeded(
} // namespace
+base::Optional<FeedbackVectorRef> JSFunctionRef::feedback_vector(
+ CompilationDependencies* dependencies) const {
+ return raw_feedback_cell(dependencies).feedback_vector();
+}
+
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const {
if (data_->should_access_heap()) {
@@ -2096,25 +1786,21 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
}
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy) const {
- if (broker()->is_concurrent_inlining()) {
- String maybe_char;
- auto result = ConcurrentLookupIterator::TryGetOwnChar(
- &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
- index);
-
- if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
- << *this << " at index " << index);
- return {};
- }
+ uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ String maybe_char;
+ auto result = ConcurrentLookupIterator::TryGetOwnChar(
+ &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
+ index);
- DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
- return TryMakeRef(broker(), maybe_char);
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
+ << *this << " at index " << index);
+ return {};
}
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return TryMakeRef(broker(), maybe_char);
}
bool StringRef::SupportedStringKind() const {
@@ -2165,8 +1851,6 @@ int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
return object()->constant_elements().length();
}
-ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); }
-
base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
Handle<Object> value;
{
@@ -2234,26 +1918,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
+// Like IF_ACCESS_FROM_HEAP but we also allow direct heap access for
// kBackgroundSerialized only for methods that we identified to be safe.
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
- return MakeRef(broker(), result::cast(object()->name())); \
- }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \
}
-// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
+// Like BIMODAL_ACCESSOR except that we force a direct heap access if
// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
// This is because we identified the method to be safe to use direct heap
// access, but the holder##Data class still needs to be serialized.
-#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- }
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
@@ -2298,31 +1973,22 @@ uint64_t HeapNumberRef::value_as_bits() const {
return object()->value_as_bits(kRelaxedLoad);
}
-base::Optional<JSReceiverRef> JSBoundFunctionRef::bound_target_function()
- const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_target_function(),
- kAssumeMemoryFence);
- }
- return TryMakeRef<JSReceiver>(
- broker(), data()->AsJSBoundFunction()->bound_target_function());
+JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
}
-base::Optional<ObjectRef> JSBoundFunctionRef::bound_this() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_this(), kAssumeMemoryFence);
- }
- return TryMakeRef<Object>(broker(),
- data()->AsJSBoundFunction()->bound_this());
+
+ObjectRef JSBoundFunctionRef::bound_this() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
}
+
FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
- }
- return FixedArrayRef(broker(),
- data()->AsJSBoundFunction()->bound_arguments());
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
}
// Immutable after initialization.
@@ -2354,8 +2020,6 @@ BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
@@ -2385,33 +2049,16 @@ bool FunctionTemplateInfoRef::is_signature_undefined() const {
return object()->signature().IsUndefined(broker()->isolate());
}
-bool FunctionTemplateInfoRef::has_call_code() const {
- HeapObject call_code = object()->call_code(kAcquireLoad);
- return !call_code.IsUndefined();
-}
-
HEAP_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
- MapRef receiver_map, SerializationPolicy policy) {
+ MapRef receiver_map) {
const HolderLookupResult not_found;
- // There are currently two ways we can see a FunctionTemplateInfo on the
- // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
- // AccessorPair. In both cases, the FTI is fully constructed on the main
- // thread before.
- // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
- // have to use the GC predicate to check whether objects are fully
- // initialized and safe to read.
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !object()->accept_any_receiver())) {
+ if (!receiver_map.IsJSObjectMap() || (receiver_map.is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
return not_found;
}
- if (!receiver_map.IsJSObjectMap()) return not_found;
-
- DCHECK(has_call_code());
-
Handle<FunctionTemplateInfo> expected_receiver_type;
{
DisallowGarbageCollection no_gc;
@@ -2424,17 +2071,11 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) {
return HolderLookupResult(CallOptimization::kHolderIsReceiver);
}
-
if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- receiver_map.SerializePrototype(NotConcurrentInliningTag{broker()});
- }
base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
- if (!prototype.has_value()) return not_found;
- if (prototype->IsNull()) return not_found;
-
+ if (!prototype.has_value() || prototype->IsNull()) return not_found;
if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) {
return not_found;
}
@@ -2457,6 +2098,7 @@ ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
+ CHECK(HasBytecodeArray());
BytecodeArray bytecode_array;
if (!broker()->IsMainThread()) {
bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
@@ -2480,12 +2122,9 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
broker()->is_turboprop());
}
-base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
- DisallowGarbageCollection no_gc;
+ObjectRef FeedbackCellRef::value() const {
DCHECK(data_->should_access_heap());
- Object value = object()->value(kAcquireLoad);
- if (!value.IsFeedbackVector()) return base::nullopt;
- return MakeRefAssumeMemoryFence(broker(), FeedbackVector::cast(value));
+ return MakeRefAssumeMemoryFence(broker(), object()->value(kAcquireLoad));
}
base::Optional<ObjectRef> MapRef::GetStrongValue(
@@ -2513,75 +2152,59 @@ base::Optional<HeapObjectRef> MapRef::prototype() const {
return HeapObjectRef(broker(), prototype_data);
}
-void MapRef::SerializeRootMap(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeRootMap(broker(), tag);
+MapRef MapRef::FindRootMap() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // TODO(solanes, v8:7790): Consider caching the result of the root map.
+ return MakeRefAssumeMemoryFence(broker(),
+ object()->FindRootMap(broker()->isolate()));
}
-// TODO(solanes, v8:7790): Remove base::Optional from the return type when
-// deleting serialization.
-base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // TODO(solanes): Change TryMakeRef to MakeRef when Map is moved to
- // kNeverSerialized.
- // TODO(solanes, v8:7790): Consider caching the result of the root map.
- return TryMakeRef(broker(), object()->FindRootMap(broker()->isolate()));
+ObjectRef MapRef::GetConstructor() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
- ObjectData* map_data = data()->AsMap()->FindRootMap();
- if (map_data != nullptr) {
- return MapRef(broker(), map_data);
+ return ObjectRef(broker(), data()->AsMap()->GetConstructor());
+}
+
+HeapObjectRef MapRef::GetBackPointer() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(
+ broker(), HeapObject::cast(object()->GetBackPointer()));
}
- TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
- return base::nullopt;
+ return HeapObjectRef(broker(), ObjectRef::data()->AsMap()->GetBackPointer());
}
bool JSTypedArrayRef::is_on_heap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. with
- // release-store.
- return object()->is_on_heap(kAcquireLoad);
- }
- return data()->AsJSTypedArray()->data_ptr();
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Underlying field written 1. during initialization or 2. with release-store.
+ return object()->is_on_heap(kAcquireLoad);
}
size_t JSTypedArrayRef::length() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return object()->length();
- }
- return data()->AsJSTypedArray()->length();
+ // Immutable after initialization.
+ return object()->length();
}
HeapObjectRef JSTypedArrayRef::buffer() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return MakeRef<HeapObject>(broker(), object()->buffer());
- }
- return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+ // Immutable after initialization.
+ return MakeRef<HeapObject>(broker(), object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. protected by
- // the is_on_heap release/acquire semantics (external_pointer store
- // happens-before base_pointer store, and this external_pointer load
- // happens-after base_pointer load).
- STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
- return object()->DataPtr();
- }
- return data()->AsJSTypedArray()->data_ptr();
+ // Underlying field written 1. during initialization or 2. protected by the
+ // is_on_heap release/acquire semantics (external_pointer store happens-before
+ // base_pointer store, and this external_pointer load happens-after
+ // base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
+ return object()->DataPtr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -2642,32 +2265,6 @@ ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
bool StringRef::IsSeqString() const { return object()->IsSeqString(); }
-void NativeContextRef::Serialize(NotConcurrentInliningTag tag) {
- // TODO(jgruber): Disable visitation if should_access_heap() once all
- // NativeContext element refs can be created on background threads. Until
- // then, we *must* iterate them and create refs at serialization-time (even
- // though NativeContextRef itself is never-serialized).
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
-#define SERIALIZE_MEMBER(type, name) \
- { \
- ObjectData* member_data = broker()->GetOrCreateData(object()->name()); \
- if (member_data->IsMap() && !InstanceTypeChecker::IsContext( \
- member_data->AsMap()->instance_type())) { \
- member_data->AsMap()->SerializeConstructor(broker(), tag); \
- } \
- }
- BROKER_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
-#undef SERIALIZE_MEMBER
-
- for (int i = Context::FIRST_FUNCTION_MAP_INDEX;
- i <= Context::LAST_FUNCTION_MAP_INDEX; i++) {
- MapData* member_data = broker()->GetOrCreateData(object()->get(i))->AsMap();
- if (!InstanceTypeChecker::IsContext(member_data->instance_type())) {
- member_data->SerializeConstructor(broker(), tag);
- }
- }
-}
-
ScopeInfoRef NativeContextRef::scope_info() const {
// The scope_info is immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->scope_info());
@@ -2777,25 +2374,18 @@ bool ObjectRef::should_access_heap() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
- *elements_ref.object(), map().elements_kind(), index);
-
- if (!maybe_element.has_value()) return {};
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
+ *elements_ref.object(), map().elements_kind(), index);
+ if (!maybe_element.has_value()) return {};
- base::Optional<ObjectRef> result =
- TryMakeRef(broker(), maybe_element.value());
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantElement(*this, index, *result);
- }
- return result;
- } else {
- ObjectData* element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- return TryMakeRef<Object>(broker(), element);
+ base::Optional<ObjectRef> result =
+ TryMakeRef(broker(), maybe_element.value());
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantElement(*this, index, *result);
}
+ return result;
}
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
@@ -2844,109 +2434,82 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
- broker(), *this, field_representation, index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDataProperty(
- *this, map(), field_representation, index, *result);
- }
- return result;
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
+ broker(), *this, field_representation, index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDataProperty(
+ *this, map(), field_representation, index, *result);
}
- ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
- broker(), field_representation, index, policy);
- return TryMakeRef<Object>(broker(), property);
+ return result;
}
base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy) const {
+ InternalIndex index, CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(index.is_found());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result =
- GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDictionaryProperty(*this, index,
- *result);
- }
- return result;
+ base::Optional<ObjectRef> result =
+ GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDictionaryProperty(*this, index, *result);
}
- ObjectData* property =
- data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
- CHECK_NE(property, nullptr);
- return ObjectRef(broker(), property);
+ return result;
}
ObjectRef JSArrayRef::GetBoilerplateLength() const {
// Safe to read concurrently because:
// - boilerplates are immutable after initialization.
// - boilerplates are published into the feedback vector.
- return length_unsafe();
+ // These facts also mean we can expect a valid value.
+ return length_unsafe().value();
}
-ObjectRef JSArrayRef::length_unsafe() const {
+base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return MakeRef(broker(),
- object()->length(broker()->isolate(), kRelaxedLoad));
+ return TryMakeRef(broker(),
+ object()->length(broker()->isolate(), kRelaxedLoad));
} else {
return ObjectRef{broker(), data()->AsJSArray()->length()};
}
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Note: we'd like to check `elements_ref == elements()` here, but due to
- // concurrency this may not hold. The code below must be able to deal with
- // concurrent `elements` modifications.
-
- // Due to concurrency, the kind read here may not be consistent with
- // `elements_ref`. The caller has to guarantee consistency at runtime by
- // other means (e.g. through a runtime equality check or a compilation
- // dependency).
- ElementsKind elements_kind = map().elements_kind();
-
- // We only inspect fixed COW arrays, which may only occur for fast
- // smi/objects elements kinds.
- if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
- DCHECK(IsFastElementsKind(elements_kind));
- if (!elements_ref.map().IsFixedCowArrayMap()) return {};
-
- // As the name says, the `length` read here is unsafe and may not match
- // `elements`. We rely on the invariant that any `length` change will
- // also result in an `elements` change to make this safe. The `elements`
- // consistency check in the caller thus also guards the value of `length`.
- ObjectRef length_ref = length_unsafe();
-
- // Likewise we only deal with smi lengths.
- if (!length_ref.IsSmi()) return {};
-
- base::Optional<Object> result =
- ConcurrentLookupIterator::TryGetOwnCowElement(
- broker()->isolate(), *elements_ref.AsFixedArray().object(),
- elements_kind, length_ref.AsSmi(), index);
- if (!result.has_value()) return {};
-
- return TryMakeRef(broker(), result.value());
- } else {
- DCHECK(!data_->should_access_heap());
- DCHECK(!broker()->is_concurrent_inlining());
+ FixedArrayBaseRef elements_ref, uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Note: we'd like to check `elements_ref == elements()` here, but due to
+ // concurrency this may not hold. The code below must be able to deal with
+ // concurrent `elements` modifications.
- // Just to clarify that `elements_ref` is not used on this path.
- // GetOwnElement accesses the serialized `elements` field on its own.
- USE(elements_ref);
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. The caller has to guarantee consistency at runtime by
+ // other means (e.g. through a runtime equality check or a compilation
+ // dependency).
+ ElementsKind elements_kind = map().elements_kind();
- if (!elements(kRelaxedLoad).value().map().IsFixedCowArrayMap()) return {};
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
- }
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // consistency check in the caller thus also guards the value of `length`.
+ base::Optional<ObjectRef> length_ref = length_unsafe();
+
+ if (!length_ref.has_value()) return {};
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref->IsSmi()) return {};
+
+ base::Optional<Object> result = ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(), elements_kind,
+ length_ref->AsSmi(), index);
+ if (!result.has_value()) return {};
+
+ return TryMakeRef(broker(), result.value());
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
@@ -3062,15 +2625,22 @@ base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
return TryMakeRef(broker(), heap_object);
}
+base::Optional<FeedbackVectorRef> FeedbackCellRef::feedback_vector() const {
+ ObjectRef contents = value();
+ if (!contents.IsFeedbackVector()) return {};
+ return contents.AsFeedbackVector();
+}
+
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- base::Optional<FeedbackVectorRef> feedback_vector = value();
- if (!feedback_vector.has_value()) return {};
- return feedback_vector->shared_function_info();
+ base::Optional<FeedbackVectorRef> vector = feedback_vector();
+ if (!vector.has_value()) return {};
+ return vector->shared_function_info();
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
- return MakeRef(broker(), object()->shared_function_info());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->shared_function_info());
}
bool NameRef::IsUniqueName() const {
@@ -3143,20 +2713,6 @@ Handle<T> TinyRef<T>::object() const {
HEAP_BROKER_OBJECT_LIST(V)
#undef V
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line) {
- TRACE_MISSING(broker, "data in function " << function << " at line " << line);
- return AdvancedReducer::NoChange();
-}
-
-bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) {
- return true;
- }
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsJSBoundFunction()->Serialize(broker(), tag);
-}
-
#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \
const { \
@@ -3174,26 +2730,40 @@ bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
return data()->AsJSFunction()->Name(); \
}
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_feedback_vector,
- JSFunctionData::kHasFeedbackVector)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_initial_map,
- JSFunctionData::kHasInitialMap)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_instance_prototype,
- JSFunctionData::kHasInstancePrototype)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(
+// Like JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C but only depend on the
+// field in question if its recorded value is "relevant". This is in order to
+// tolerate certain state changes during compilation, e.g. from "has no feedback
+// vector" (in which case we would simply do less optimization) to "has feedback
+// vector".
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C( \
+ Result, Name, UsedField, RelevantValue) \
+ Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ Result const result = data()->AsJSFunction()->Name(); \
+ if (result == RelevantValue) { \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker(), *this, data()->AsJSFunction(), UsedField); \
+ } \
+ return result; \
+ }
+
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(bool, has_initial_map,
+ JSFunctionData::kHasInitialMap,
+ true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
+ bool, has_instance_prototype, JSFunctionData::kHasInstancePrototype, true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
bool, PrototypeRequiresRuntimeLookup,
- JSFunctionData::kPrototypeRequiresRuntimeLookup)
+ JSFunctionData::kPrototypeRequiresRuntimeLookup, false)
+
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Map, initial_map,
JSFunctionData::kInitialMap)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Object, instance_prototype,
JSFunctionData::kInstancePrototype)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackCell, raw_feedback_cell,
JSFunctionData::kFeedbackCell)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackVector, feedback_vector,
- JSFunctionData::kFeedbackVector)
BIMODAL_ACCESSOR(JSFunction, Context, context)
-BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
@@ -3203,6 +2773,11 @@ CodeRef JSFunctionRef::code() const {
return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
}
+NativeContextRef JSFunctionRef::native_context() const {
+ return MakeRefAssumeMemoryFence(broker(),
+ context().object()->native_context());
+}
+
base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (!object()->IsApiFunction()) return {};
@@ -3269,23 +2844,6 @@ void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
CHECK(TrySerializePrototype(tag));
}
-void JSTypedArrayRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Nothing to do.
- } else {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker(), tag);
- }
-}
-
-bool JSTypedArrayRef::serialized() const {
- if (data_->should_access_heap()) return true;
- if (broker()->is_concurrent_inlining()) return true;
- if (data_->AsJSTypedArray()->serialized()) return true;
- TRACE_BROKER_MISSING(broker(), "data for JSTypedArray " << this);
- return false;
-}
-
bool PropertyCellRef::Cache() const {
if (data_->should_access_heap()) return true;
CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
@@ -3293,18 +2851,6 @@ bool PropertyCellRef::Cache() const {
return data()->AsPropertyCell()->Cache(broker());
}
-void FunctionTemplateInfoRef::SerializeCallCode(NotConcurrentInliningTag tag) {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- // CallHandlerInfo::data may still hold a serialized heap object, so we
- // have to make the broker aware of it.
- // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
- Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
- broker()->isolate());
- if (call_code->IsCallHandlerInfo()) {
- broker()->GetOrCreateData(Handle<CallHandlerInfo>::cast(call_code)->data());
- }
-}
-
bool NativeContextRef::GlobalIsDetached() const {
base::Optional<ObjectRef> proxy_proto =
global_proxy_object().map().prototype();
@@ -3312,14 +2858,15 @@ bool NativeContextRef::GlobalIsDetached() const {
}
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
- NameRef const& name, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return GetPropertyCellFromHeap(broker(), name.object());
- }
-
- ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
- broker(), name.data(), policy);
- return TryMakeRef<PropertyCell>(broker(), property_cell_data);
+ NameRef const& name) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<PropertyCell> maybe_cell =
+ ConcurrentLookupIterator::TryGetPropertyCell(
+ broker()->isolate(), broker()->local_isolate_or_isolate(),
+ broker()->target_native_context().global_object().object(),
+ name.object());
+ if (!maybe_cell.has_value()) return {};
+ return TryMakeRef(broker(), *maybe_cell);
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
@@ -3347,13 +2894,11 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
-#undef BIMODAL_ACCESSOR_WITH_FLAG
#undef BIMODAL_ACCESSOR_WITH_FLAG_B
#undef BIMODAL_ACCESSOR_WITH_FLAG_C
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
-#undef IF_ACCESS_FROM_HEAP_WITH_FLAG
#undef IF_ACCESS_FROM_HEAP_WITH_FLAG_C
#undef TRACE
#undef TRACE_MISSING
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index d580671f6d..4644071ea5 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -55,8 +55,6 @@ inline bool IsAnyStore(AccessMode mode) {
return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
}
-enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
-
// Clarifies in function signatures that a method may only be called when
// concurrent inlining is disabled.
class NotConcurrentInliningTag final {
@@ -272,6 +270,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
private:
friend class FunctionTemplateInfoRef;
friend class JSArrayData;
+ friend class JSFunctionData;
friend class JSGlobalObjectData;
friend class JSGlobalObjectRef;
friend class JSHeapBroker;
@@ -395,9 +394,7 @@ class JSObjectRef : public JSReceiverRef {
// against inconsistency due to weak memory concurrency.
base::Optional<ObjectRef> GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// The direct-read implementation of the above, extracted into a helper since
// it's also called from compilation-dependency validation. This helper is
// guaranteed to not create new Ref instances.
@@ -412,16 +409,12 @@ class JSObjectRef : public JSReceiverRef {
// property at code finalization time.
base::Optional<ObjectRef> GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// Return the value of the dictionary property at {index} in the dictionary
// if {index} is known to be an own data property of the object.
base::Optional<ObjectRef> GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ InternalIndex index, CompilationDependencies* dependencies) const;
// When concurrent inlining is enabled, reads the elements through a direct
// relaxed read. This is to ease the transition to unserialized (or
@@ -451,12 +444,8 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- bool Serialize(NotConcurrentInliningTag tag);
-
- // TODO(neis): Make return types non-optional once JSFunction is no longer
- // fg-serialized.
- base::Optional<JSReceiverRef> bound_target_function() const;
- base::Optional<ObjectRef> bound_this() const;
+ JSReceiverRef bound_target_function() const;
+ ObjectRef bound_this() const;
FixedArrayRef bound_arguments() const;
};
@@ -474,8 +463,8 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ CodeRef code() const;
- bool has_feedback_vector(CompilationDependencies* dependencies) const;
bool has_initial_map(CompilationDependencies* dependencies) const;
bool PrototypeRequiresRuntimeLookup(
CompilationDependencies* dependencies) const;
@@ -484,12 +473,10 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
MapRef initial_map(CompilationDependencies* dependencies) const;
int InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const;
- FeedbackVectorRef feedback_vector(
- CompilationDependencies* dependencies) const;
FeedbackCellRef raw_feedback_cell(
CompilationDependencies* dependencies) const;
-
- CodeRef code() const;
+ base::Optional<FeedbackVectorRef> feedback_vector(
+ CompilationDependencies* dependencies) const;
};
class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
@@ -535,9 +522,6 @@ class ContextRef : public HeapObjectRef {
base::Optional<ObjectRef> get(int index) const;
};
-// TODO(jgruber): Don't serialize NativeContext fields once all refs can be
-// created concurrently.
-
#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
V(JSFunction, array_function) \
V(JSFunction, bigint_function) \
@@ -629,13 +613,12 @@ class FeedbackCellRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef)
Handle<FeedbackCell> object() const;
- base::Optional<SharedFunctionInfoRef> shared_function_info() const;
- // TODO(mvstanton): Once we allow inlining of functions we didn't see
- // during serialization, we do need to ensure that any feedback vector
- // we read here has been fully initialized (ie, store-ordered into the
- // cell).
- base::Optional<FeedbackVectorRef> value() const;
+ ObjectRef value() const;
+
+ // Convenience wrappers around {value()}:
+ base::Optional<FeedbackVectorRef> feedback_vector() const;
+ base::Optional<SharedFunctionInfoRef> shared_function_info() const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -729,6 +712,8 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
OddballType oddball_type() const;
+ bool CanInlineElementAccess() const;
+
// Note: Only returns a value if the requested elements kind matches the
// current kind, or if the current map is an unmodified JSArray initial map.
base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
@@ -752,6 +737,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
+ DescriptorArrayRef instance_descriptors() const;
MapRef FindFieldOwner(InternalIndex descriptor_index) const;
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
@@ -760,11 +746,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
- DescriptorArrayRef instance_descriptors() const;
-
- void SerializeRootMap(NotConcurrentInliningTag tag);
- base::Optional<MapRef> FindRootMap() const;
-
+ MapRef FindRootMap() const;
ObjectRef GetConstructor() const;
};
@@ -785,17 +767,10 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
bool is_signature_undefined() const;
bool accept_any_receiver() const;
- // The following returns true if the CallHandlerInfo is present.
- bool has_call_code() const;
-
- void SerializeCallCode(NotConcurrentInliningTag tag);
base::Optional<CallHandlerInfoRef> call_code() const;
ZoneVector<Address> c_functions() const;
ZoneVector<const CFunctionInfo*> c_signatures() const;
-
- HolderLookupResult LookupHolderOfExpectedType(
- MapRef receiver_map,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map);
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -821,12 +796,6 @@ class FixedArrayRef : public FixedArrayBaseRef {
Handle<FixedArray> object() const;
- ObjectRef get(int i) const;
-
- // As above but may fail if Ref construction is not possible (e.g. for
- // serialized types on the background thread).
- // TODO(jgruber): Remove once all Ref types are never-serialized or
- // background-serialized and can thus be created on background threads.
base::Optional<ObjectRef> TryGet(int i) const;
};
@@ -894,15 +863,14 @@ class JSArrayRef : public JSObjectRef {
// storage and {index} is known to be an own data property.
// Note the value returned by this function is only valid if we ensure at
// runtime that the backing store has not changed.
- base::Optional<ObjectRef> GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetOwnCowElement(FixedArrayBaseRef elements_ref,
+ uint32_t index) const;
// The `JSArray::length` property; not safe to use in general, but can be
// used in some special cases that guarantee a valid `length` value despite
- // concurrent reads.
- ObjectRef length_unsafe() const;
+ // concurrent reads. The result needs to be optional in case the
+ // return value was created too recently to pass the gc predicate.
+ base::Optional<ObjectRef> length_unsafe() const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -918,22 +886,23 @@ class ScopeInfoRef : public HeapObjectRef {
ScopeInfoRef OuterScopeInfo() const;
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_simple_parameters) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript) \
- IF_WASM(V, const wasm::WasmModule*, wasm_module) \
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count_without_receiver) \
+ V(bool, IsDontAdaptArguments) \
+ V(bool, has_simple_parameters) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ IF_WASM(V, const wasm::WasmModule*, wasm_module) \
IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
@@ -966,9 +935,7 @@ class StringRef : public NameRef {
// With concurrent inlining on, we return base::nullopt due to not being able
// to use LookupIterator in a thread-safe way.
- base::Optional<ObjectRef> GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetCharAsStringOrUndefined(uint32_t index) const;
// When concurrently accessing non-read-only non-supported strings, we return
// base::nullopt for these methods.
@@ -1002,10 +969,6 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
void* data_ptr() const;
-
- void Serialize(NotConcurrentInliningTag tag);
- bool serialized() const;
-
HeapObjectRef buffer() const;
};
@@ -1042,9 +1005,7 @@ class JSGlobalObjectRef : public JSObjectRef {
bool IsDetachedFrom(JSGlobalProxyRef const& proxy) const;
// Can be called even when there is no property cell for the given name.
- base::Optional<PropertyCellRef> GetPropertyCell(
- NameRef const& name, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name) const;
};
class JSGlobalProxyRef : public JSObjectRef {
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 28eb30969c..00930998dd 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -944,29 +944,31 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64AtomicLoad: {
DCHECK_EQ(4, node->InputCount());
- MachineType type = AtomicOpType(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
DefaultLowering(node, true);
- if (type == MachineType::Uint64()) {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
+ if (params.representation() == MachineType::Uint64()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairLoad(params.order()));
ReplaceNodeWithProjections(node);
} else {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
case IrOpcode::kWord64AtomicStore: {
DCHECK_EQ(5, node->InputCount());
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- if (rep == MachineRepresentation::kWord64) {
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ if (params.representation() == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairStore(params.order()));
} else {
DefaultLowering(node, true);
- NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(params));
}
break;
}
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 3dcdc6a33e..91197ead1e 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -728,8 +728,7 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
TNode<HeapObject> elements =
LoadField<HeapObject>(AccessBuilder::ForJSObjectElements(), o);
TNode<Object> value = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind, LoadSensitivity::kCritical),
- elements, index);
+ AccessBuilder::ForFixedArrayElement(kind), elements, index);
return std::make_pair(index, value);
}
@@ -2099,7 +2098,8 @@ FrameState CreateArtificialFrameState(
FrameState PromiseConstructorFrameState(
const PromiseCtorFrameStateParams& params, CommonOperatorBuilder* common,
Graph* graph) {
- DCHECK_EQ(1, params.shared.internal_formal_parameter_count());
+ DCHECK_EQ(1,
+ params.shared.internal_formal_parameter_count_without_receiver());
return CreateArtificialFrameState(
params.node_ptr, params.outer_frame_state, 1,
BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
@@ -3639,8 +3639,6 @@ Reduction JSCallReducer::ReduceCallApiFunction(
FunctionTemplateInfoRef function_template_info(
shared.function_template_info().value());
- if (!function_template_info.has_call_code()) return NoChange();
-
if (function_template_info.accept_any_receiver() &&
function_template_info.is_signature_undefined()) {
// We might be able to
@@ -3764,7 +3762,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(argc)));
node->ReplaceInput(3, receiver); // Update receiver input.
node->ReplaceInput(6 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -4039,7 +4038,8 @@ JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments(
return NoChange();
}
formal_parameter_count =
- MakeRef(broker(), shared).internal_formal_parameter_count();
+ MakeRef(broker(), shared)
+ .internal_formal_parameter_count_without_receiver();
}
if (type == CreateArgumentsType::kMappedArguments) {
@@ -4309,13 +4309,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, function.shared());
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
- base::Optional<ObjectRef> bound_this = function.bound_this();
- if (!bound_this.has_value()) return NoChange();
+ ObjectRef bound_this = function.bound_this();
ConvertReceiverMode const convert_mode =
- bound_this->IsNullOrUndefined()
+ bound_this.IsNullOrUndefined()
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
@@ -4336,9 +4332,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSCallNode::TargetIndex());
- NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(*bound_this),
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
JSCallNode::ReceiverIndex());
// Insert the [[BoundArguments]] for {node}.
@@ -4372,13 +4368,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, p.shared_info(broker()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- if (cell.shared_function_info().has_value()) {
- return ReduceJSCall(node, *cell.shared_function_info());
- } else {
+ base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ if (!shared.has_value()) {
TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
<< cell << " has no FeedbackVector");
return NoChange();
}
+ return ReduceJSCall(node, *shared);
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -4457,7 +4453,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell =
MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
- if (feedback_cell.value().has_value()) {
+ // TODO(neis): This check seems unnecessary.
+ if (feedback_cell.feedback_vector().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
Node* target_closure = effect =
@@ -5055,9 +5052,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
+ JSReceiverRef bound_target_function = function.bound_target_function();
FixedArrayRef bound_arguments = function.bound_arguments();
const int bound_arguments_length = bound_arguments.length();
@@ -5076,20 +5071,20 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Patch {node} to use [[BoundTargetFunction]].
node->ReplaceInput(n.TargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
// Patch {node} to use [[BoundTargetFunction]]
// as new.target if {new_target} equals {target}.
if (target == new_target) {
node->ReplaceInput(n.NewTargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
} else {
node->ReplaceInput(
n.NewTargetIndex(),
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
graph()->NewNode(simplified()->ReferenceEqual(),
target, new_target),
- jsgraph()->Constant(*bound_target_function),
+ jsgraph()->Constant(bound_target_function),
new_target));
}
@@ -6373,9 +6368,8 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* value = effect = graph()->NewNode(string_access_operator, receiver,
- masked_index, effect, control);
+ index, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -6433,11 +6427,9 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* etrue = effect;
Node* vtrue;
{
- Node* masked_position = graph()->NewNode(
- simplified()->PoisonIndex(), unsigned_position);
Node* string_first = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_position, etrue, if_true);
+ unsigned_position, etrue, if_true);
Node* search_first =
jsgraph()->Constant(str.GetFirstChar().value());
@@ -6488,10 +6480,8 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* value = effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index,
- effect, control);
+ Node* value = effect = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, effect, control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
ReplaceWithValue(node, value, effect, control);
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 02e5cb1710..36217ca13b 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -103,7 +103,16 @@ base::Optional<ContextRef> GetSpecializationContext(
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(node->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(node->op()));
if (object.IsContext()) return object.AsContext();
break;
}
@@ -231,7 +240,16 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
switch (context->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(context->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(context->op()));
if (object.IsContext()) {
return find_context(object.AsContext());
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 414977eb7d..60c9017fc2 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -197,11 +197,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kUnmappedArguments,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kUnmappedArguments,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
jsgraph()->Constant(native_context().strict_arguments_map());
@@ -222,14 +222,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
- Node* const rest_length = graph()->NewNode(
- simplified()->RestLength(shared.internal_formal_parameter_count()));
+ Node* const rest_length = graph()->NewNode(simplified()->RestLength(
+ shared.internal_formal_parameter_count_without_receiver()));
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kRestParameter,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kRestParameter,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
native_context().js_array_packed_elements_map());
@@ -332,7 +332,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- int start_index = shared.internal_formal_parameter_count();
+ int start_index =
+ shared.internal_formal_parameter_count_without_receiver();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
@@ -401,7 +402,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Allocate a register file.
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
- int parameter_count_no_receiver = shared.internal_formal_parameter_count();
+ int parameter_count_no_receiver =
+ shared.internal_formal_parameter_count_without_receiver();
int length = parameter_count_no_receiver +
shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
@@ -466,9 +468,10 @@ Reduction JSCreateLowering::ReduceNewArray(
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map,
- initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind)));
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind));
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Because CheckBounds performs implicit conversion from string to number, an
// additional CheckNumber is required to behave correctly for calls with a
@@ -525,8 +528,12 @@ Reduction JSCreateLowering::ReduceNewArray(
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
+
DCHECK(IsFastElementsKind(elements_kind));
// Setup elements and properties.
@@ -566,8 +573,11 @@ Reduction JSCreateLowering::ReduceNewArray(
// Determine the appropriate elements kind.
DCHECK(IsFastElementsKind(elements_kind));
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
@@ -1479,7 +1489,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
// If there is no aliasing, the arguments object elements are not special in
// any way, we can just return an unmapped backing store instead.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return TryAllocateArguments(effect, control, frame_state);
}
@@ -1545,7 +1556,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return graph()->NewNode(
simplified()->NewArgumentsElements(
@@ -1713,7 +1725,6 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
Type::Any(),
MachineType::AnyTagged(),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
const_field_info};
// Note: the use of RawInobjectPropertyAt (vs. the higher-level
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index bbc47e45ad..08896e3f11 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -586,7 +586,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
// between top of stack and JS arguments.
DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
- Node* stub_arity = jsgraph()->Int32Constant(arity);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arity));
base::Optional<AllocationSiteRef> const site = p.site(broker());
Node* type_info = site.has_value() ? jsgraph()->Constant(site.value())
: jsgraph()->UndefinedConstant();
@@ -820,7 +820,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
@@ -843,7 +843,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* receiver = jsgraph()->UndefinedConstant();
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(zone(), 0, stub_code);
@@ -906,7 +906,8 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
Node* receiver = jsgraph()->UndefinedConstant();
DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex());
node->RemoveInput(n.FeedbackVectorIndex());
@@ -930,7 +931,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
@@ -951,7 +952,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -1009,7 +1010,8 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index dc34bcae6d..0007a582a0 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -50,12 +50,10 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
- is_isolate_bootstrapping_(isolate->bootstrapper()->IsActive()),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
- minimorphic_property_access_infos_(zone()),
- typed_array_string_tags_(zone()) {
+ minimorphic_property_access_infos_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
@@ -220,20 +218,6 @@ bool JSHeapBroker::ObjectMayBeUninitialized(HeapObject object) const {
return !IsMainThread() && isolate()->heap()->IsPendingAllocation(object);
}
-bool CanInlineElementAccess(MapRef const& map) {
- if (!map.IsJSObjectMap()) return false;
- if (map.is_access_check_needed()) return false;
- if (map.has_indexed_interceptor()) return false;
- ElementsKind const elements_kind = map.elements_kind();
- if (IsFastElementsKind(elements_kind)) return true;
- if (IsTypedArrayElementsKind(elements_kind) &&
- elements_kind != BIGUINT64_ELEMENTS &&
- elements_kind != BIGINT64_ELEMENTS) {
- return true;
- }
- return false;
-}
-
ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
: kind_(kind), slot_kind_(slot_kind) {}
@@ -423,7 +407,10 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
for (Handle<Map> map : group) {
- if (!MakeRef(broker, map).IsStringMap()) return false;
+ // We assume a memory fence because {map} was read earlier from
+ // the feedback vector and was store ordered on insertion into the
+ // vector.
+ if (!MakeRefAssumeMemoryFence(broker, map).IsStringMap()) return false;
}
}
return true;
@@ -880,11 +867,7 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (MapRef& map : maps) {
- if (!is_concurrent_inlining()) {
- map.SerializeRootMap(NotConcurrentInliningTag{this});
- }
-
- if (CanInlineElementAccess(map) &&
+ if (map.CanInlineElementAccess() &&
IsFastElementsKind(map.elements_kind()) &&
GetInitialFastElementsKind() != map.elements_kind()) {
possible_transition_targets.push_back(map.object());
@@ -992,9 +975,13 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
+ // We can assume a memory fence on {source.vector} because in production,
+ // the vector has already passed the gc predicate. Unit tests create
+ // FeedbackSource objects directly from handles, but they run on
+ // the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
- << MakeRef<Object>(this, source.vector));
+ << MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
}
return access_info;
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 91b94bebb5..bf9b9aaac0 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -117,7 +117,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
- bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
@@ -173,7 +172,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const* feedback);
FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
- // TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind);
@@ -291,8 +289,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void IncrementTracingIndentation();
void DecrementTracingIndentation();
- RootIndexMap const& root_index_map() { return root_index_map_; }
-
// Locks {mutex} through the duration of this scope iff it is the first
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
@@ -389,8 +385,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void CollectArrayAndObjectPrototypes();
- PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
-
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
@@ -419,7 +413,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
std::unique_ptr<CanonicalHandlesMap> canonical_handles);
Isolate* const isolate_;
- Zone* const zone_ = nullptr;
+ Zone* const zone_;
base::Optional<NativeContextRef> target_native_context_;
RefsMap* refs_;
RootIndexMap root_index_map_;
@@ -429,13 +423,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- bool const is_isolate_bootstrapping_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
unsigned trace_indentation_ = 0;
- PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
@@ -446,8 +438,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource::Hash, FeedbackSource::Equal>
minimorphic_property_access_infos_;
- ZoneVector<ObjectData*> typed_array_string_tags_;
-
CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
@@ -460,7 +450,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// Likewise for boilerplate migrations.
int boilerplate_migration_mutex_depth_ = 0;
- static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
static constexpr uint32_t kMinimalRefsBucketCount = 8;
STATIC_ASSERT(base::bits::IsPowerOfTwo(kMinimalRefsBucketCount));
static constexpr uint32_t kInitialRefsBucketCount = 1024;
@@ -487,21 +476,6 @@ class V8_NODISCARD TraceScope {
JSHeapBroker* const broker_;
};
-#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
- optionally_something) \
- auto optionally_something_ = optionally_something; \
- if (!optionally_something_) \
- return NoChangeBecauseOfMissingData(broker(), __FUNCTION__, __LINE__); \
- something_var = *optionally_something_;
-
-class Reduction;
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line);
-
-// Miscellaneous definitions that should be moved elsewhere once concurrent
-// compilation is finished.
-bool CanInlineElementAccess(MapRef const& map);
-
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 177f35c7a0..c6a223b600 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -27,8 +27,40 @@ bool IsSmall(int const size) {
}
bool CanConsiderForInlining(JSHeapBroker* broker,
- SharedFunctionInfoRef const& shared,
- FeedbackVectorRef const& feedback_vector) {
+ FeedbackCellRef const& feedback_cell) {
+ base::Optional<FeedbackVectorRef> feedback_vector =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector.has_value()) {
+ TRACE("Cannot consider " << feedback_cell
+ << " for inlining (no feedback vector)");
+ return false;
+ }
+ SharedFunctionInfoRef shared = feedback_vector->shared_function_info();
+
+ if (!shared.HasBytecodeArray()) {
+ TRACE("Cannot consider " << shared << " for inlining (no bytecode)");
+ return false;
+ }
+ // Ensure we have a persistent handle to the bytecode in order to avoid
+ // flushing it during the remaining compilation.
+ shared.GetBytecodeArray();
+
+ // Read feedback vector again in case it got flushed before we were able to
+ // prevent flushing above.
+ base::Optional<FeedbackVectorRef> feedback_vector_again =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector_again.has_value()) {
+ TRACE("Cannot consider " << shared << " for inlining (no feedback vector)");
+ return false;
+ }
+ if (!feedback_vector_again->equals(*feedback_vector)) {
+ // The new feedback vector likely contains lots of uninitialized slots, so
+ // it doesn't make much sense to inline this function now.
+ TRACE("Not considering " << shared
+ << " for inlining (feedback vector changed)");
+ return false;
+ }
+
SharedFunctionInfo::Inlineability inlineability = shared.GetInlineability();
if (inlineability != SharedFunctionInfo::kIsInlineable) {
TRACE("Cannot consider "
@@ -36,22 +68,20 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- DCHECK(shared.HasBytecodeArray());
- TRACE("Considering " << shared << " for inlining with " << feedback_vector);
+ TRACE("Considering " << shared << " for inlining with " << *feedback_vector);
return true;
}
bool CanConsiderForInlining(JSHeapBroker* broker,
JSFunctionRef const& function) {
- if (!function.has_feedback_vector(broker->dependencies())) {
- TRACE("Cannot consider " << function
- << " for inlining (no feedback vector)");
- return false;
- }
-
- return CanConsiderForInlining(
- broker, function.shared(),
- function.feedback_vector(broker->dependencies()));
+ FeedbackCellRef feedback_cell =
+ function.raw_feedback_cell(broker->dependencies());
+ bool const result = CanConsiderForInlining(broker, feedback_cell);
+ if (result) {
+ CHECK(
+ function.shared().equals(feedback_cell.shared_function_info().value()));
+ }
+ return result;
}
} // namespace
@@ -65,8 +95,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
HeapObjectMatcher m(callee);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
- out.functions[0] = m.Ref(broker()).AsJSFunction();
- JSFunctionRef function = out.functions[0].value();
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ out.functions[0] = function;
if (CanConsiderForInlining(broker(), function)) {
out.bytecode[0] = function.shared().GetBytecodeArray();
out.num_functions = 1;
@@ -98,10 +128,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell = MakeRef(broker(), FeedbackCellOf(m.op()));
- SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
- out.shared_info = shared_info;
- if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
}
out.num_functions = 1;
return out;
@@ -109,13 +138,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsJSCreateClosure()) {
DCHECK(!out.functions[0].has_value());
JSCreateClosureNode n(callee);
- CreateClosureParameters const& p = n.Parameters();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- SharedFunctionInfoRef shared_info = p.shared_info(broker());
- out.shared_info = shared_info;
- if (feedback_cell.value().has_value() &&
- CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
+ CHECK(out.shared_info->equals(n.Parameters().shared_info(broker())));
}
out.num_functions = 1;
return out;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index a17a43ecd2..deb8345bf7 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -305,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
- if (!function.has_feedback_vector(broker()->dependencies())) {
+ if (!function.feedback_vector(broker()->dependencies()).has_value()) {
return base::nullopt;
}
@@ -355,7 +355,7 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
- CHECK(function.has_feedback_vector(broker()->dependencies()));
+ CHECK(function.feedback_vector(broker()->dependencies()).has_value());
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
@@ -709,7 +709,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Insert argument adaptor frame if required. The callees formal parameter
// count have to match the number of arguments passed
// to the call.
- int parameter_count = shared_info->internal_formal_parameter_count();
+ int parameter_count =
+ shared_info->internal_formal_parameter_count_without_receiver();
DCHECK_EQ(parameter_count, start.FormalParameterCountWithoutReceiver());
if (call.argument_count() != parameter_count) {
frame_state = CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index e03e0d41a3..cdbc4848cc 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -230,8 +230,9 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
broker(),
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked());
DCHECK(shared.is_compiled());
- int register_count = shared.internal_formal_parameter_count() +
- shared.GetBytecodeArray().register_count();
+ int register_count =
+ shared.internal_formal_parameter_count_without_receiver() +
+ shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
@@ -617,15 +618,11 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (bound_target_function.has_value()) return NoChange();
-
Node* feedback = jsgraph()->UndefinedConstant();
NodeProperties::ReplaceValueInput(node, object,
JSInstanceOfNode::LeftIndex());
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSInstanceOfNode::RightIndex());
node->InsertInput(zone(), JSInstanceOfNode::FeedbackVectorIndex(),
feedback);
@@ -970,6 +967,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
break;
}
case PropertyCellType::kUndefined:
+ case PropertyCellType::kInTransition:
UNREACHABLE();
}
}
@@ -1635,8 +1633,7 @@ void JSNativeContextSpecialization::RemoveImpossibleMaps(
maps->erase(std::remove_if(maps->begin(), maps->end(),
[root_map](const MapRef& map) {
return map.is_abandoned_prototype_map() ||
- (map.FindRootMap().has_value() &&
- !map.FindRootMap()->equals(*root_map));
+ !map.FindRootMap().equals(*root_map);
}),
maps->end());
}
@@ -1747,14 +1744,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- // Check if we have the necessary data for building element accesses.
for (ElementAccessInfo const& access_info : access_infos) {
if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
- base::Optional<JSTypedArrayRef> typed_array =
- GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value() && !typed_array->serialized()) {
- return NoChange();
- }
}
// Check for the monomorphic case.
@@ -2256,10 +2247,6 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, FunctionTemplateInfoRef const& function_template_info) {
- if (!function_template_info.has_call_code()) {
- return nullptr;
- }
-
if (!function_template_info.call_code().has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
@@ -2449,7 +2436,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
@@ -2483,7 +2469,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
storage = effect =
@@ -2789,10 +2774,8 @@ JSNativeContextSpecialization::BuildElementAccess(
if (situation == kHandleOOB_SmiCheckDone) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -2980,10 +2963,9 @@ JSNativeContextSpecialization::BuildElementAccess(
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
- ElementAccess element_access = {
- kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type,
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
// Access the actual element.
if (keyed_mode.access_mode() == AccessMode::kLoad) {
@@ -3003,10 +2985,8 @@ JSNativeContextSpecialization::BuildElementAccess(
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -3289,9 +3269,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, *control);
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
// Do a real bounds check against {length}. This is in order to protect
@@ -3302,10 +3280,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero |
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, *effect, if_true);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* vtrue = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, etrue, if_true);
+ Node* vtrue = etrue = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, etrue, if_true);
vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -3323,12 +3299,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero),
index, length, *effect, *control);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
-
// Return the character from the {receiver} as single character string.
- Node* value = *effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, *effect, *control);
+ Node* value = *effect = graph()->NewNode(
+ simplified()->StringCharCodeAt(), receiver, index, *effect, *control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
return value;
}
@@ -3465,10 +3438,7 @@ base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
base::Optional<MapRef> initial_map =
NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
- if (!initial_map->FindRootMap().has_value()) {
- return base::nullopt;
- }
- DCHECK(initial_map->equals(*initial_map->FindRootMap()));
+ DCHECK(initial_map->equals(initial_map->FindRootMap()));
return *initial_map;
}
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index e986ef1baf..8d67e41751 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -998,9 +998,9 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
@@ -1595,7 +1595,8 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(start_index));
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1633,7 +1634,8 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
STATIC_ASSERT(JSConstructNode::NewTargetIndex() == 1);
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1663,7 +1665,8 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1750,8 +1753,11 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
- int formal_count = shared->internal_formal_parameter_count();
- if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
+ int formal_count =
+ shared->internal_formal_parameter_count_without_receiver();
+ // TODO(v8:11112): Once the sentinel is always 0, the check against
+ // IsDontAdaptArguments() can be removed.
+ if (!shared->IsDontAdaptArguments() && formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
// Underapplication. Massage the arguments to match the expected number of
// arguments.
@@ -1763,7 +1769,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), formal_count + 2, new_target);
node->InsertInput(graph()->zone(), formal_count + 3,
- jsgraph()->Constant(arity));
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + formal_count,
@@ -1786,13 +1792,15 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, stub_code); // Code object.
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
// Patch {node} to a direct call.
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), arity + 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity,
@@ -1811,7 +1819,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index fac24e802d..fec0040b61 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -219,9 +219,10 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo shared = info->closure()->shared();
- return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + shared.internal_formal_parameter_count(),
- CallDescriptor::kCanUseRoots);
+ return GetJSCallDescriptor(
+ zone, info->is_osr(),
+ shared.internal_formal_parameter_count_with_receiver(),
+ CallDescriptor::kCanUseRoots);
}
return nullptr; // TODO(titzer): ?
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 8b33444b29..707c7d98ab 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -214,15 +214,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 4,
- // Use retpoline for this call if indirect.
- kRetpoline = 1u << 5,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
- kFixedTargetRegister = 1u << 6,
- kCallerSavedRegisters = 1u << 7,
+ kFixedTargetRegister = 1u << 5,
+ kCallerSavedRegisters = 1u << 6,
// The kCallerSavedFPRegisters only matters (and set) when the more general
// flag for kCallerSavedRegisters above is also set.
- kCallerSavedFPRegisters = 1u << 8,
+ kCallerSavedFPRegisters = 1u << 7,
// Tail calls for tier up are special (in fact they are different enough
// from normal tail calls to warrant a dedicated opcode; but they also have
// enough similar aspects that reusing the TailCall opcode is pragmatic).
@@ -238,15 +236,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
//
// In other words, behavior is identical to a jmp instruction prior caller
// frame construction.
- kIsTailCallForTierUp = 1u << 9,
+ kIsTailCallForTierUp = 1u << 8,
+
+ // AIX has a function descriptor by default but it can be disabled for a
+ // certain CFunction call (only used for Kind::kCallAddress).
+ kNoFunctionDescriptor = 1u << 9,
// Flags past here are *not* encoded in InstructionCode and are thus not
// accessible from the code generator. See also
// kFlagsBitsEncodedInInstructionCode.
-
- // AIX has a function descriptor by default but it can be disabled for a
- // certain CFunction call (only used for Kind::kCallAddress).
- kNoFunctionDescriptor = 1u << 10,
};
using Flags = base::Flags<Flag>;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index e184534ed7..7b660856b7 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -5,12 +5,17 @@
#include "src/compiler/loop-analysis.h"
#include "src/codegen/tick-counter.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/zone/zone.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -581,12 +586,24 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
loop_header);
// All uses are outside the loop, do nothing.
break;
- case IrOpcode::kCall:
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
// Call nodes are considered to have unbounded size, i.e. >max_size.
+ // An exception is the call to the stack guard builtin at the beginning
+ // of many loops.
return nullptr;
+ case IrOpcode::kCall: {
+ Node* callee = node->InputAt(0);
+ if (callee->opcode() == IrOpcode::kRelocatableInt32Constant ||
+ callee->opcode() == IrOpcode::kRelocatableInt64Constant) {
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (info.value() != v8::internal::wasm::WasmCode::kWasmStackGuard) {
+ return nullptr;
+ }
+ }
+ V8_FALLTHROUGH;
+ }
default:
for (Node* use : node->uses()) {
if (visited->count(use) == 0) queue.push_back(use);
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 88679283d9..fedb208b5f 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -121,10 +121,14 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord64AtomicLoad:
+ representation_vector_[node->id()] =
+ PromoteRepresentation(AtomicLoadParametersOf(node->op())
+ .representation()
+ .representation());
+ break;
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
@@ -154,8 +158,8 @@ class MachineRepresentationInferrer {
}
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord64AtomicStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+ representation_vector_[node->id()] = PromoteRepresentation(
+ AtomicStoreParametersOf(node->op()).representation());
break;
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord32AtomicPairStore:
@@ -206,15 +210,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
- case IrOpcode::kTaggedPoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord32;
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord64;
- break;
case IrOpcode::kCompressedHeapConstant:
representation_vector_[node->id()] =
MachineRepresentation::kCompressedPointer;
@@ -394,14 +391,6 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(
node, 0, MachineType::PointerRepresentation());
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord32);
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord64);
- break;
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
if (COMPRESS_POINTERS_BOOL) {
@@ -410,9 +399,6 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
}
break;
- case IrOpcode::kTaggedPoisonOnSpeculation:
- CheckValueInputIsTagged(node, 0);
- break;
case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat64ToFloat32:
@@ -566,7 +552,6 @@ class MachineRepresentationChecker {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord64AtomicLoad:
- case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -605,9 +590,12 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
- node->opcode() == IrOpcode::kStore &&
- IsAnyTagged(
- StoreRepresentationOf(node->op()).representation())) {
+ ((node->opcode() == IrOpcode::kStore &&
+ IsAnyTagged(StoreRepresentationOf(node->op())
+ .representation())) ||
+ (node->opcode() == IrOpcode::kWord32AtomicStore &&
+ IsAnyTagged(AtomicStoreParametersOf(node->op())
+ .representation())))) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
CheckValueInputIsTagged(node, 2);
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 33d58c854b..775e5ada81 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -947,6 +947,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
return ReduceWord64Comparisons(node);
}
+ case IrOpcode::kFloat32Select:
+ case IrOpcode::kFloat64Select:
+ case IrOpcode::kWord32Select:
+ case IrOpcode::kWord64Select: {
+ Int32Matcher match(node->InputAt(0));
+ if (match.HasResolvedValue()) {
+ if (match.Is(0)) {
+ return Replace(node->InputAt(2));
+ } else {
+ return Replace(node->InputAt(1));
+ }
+ }
+ break;
+ }
default:
break;
}
@@ -2061,7 +2075,6 @@ bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
} // namespace
-
Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
DCHECK(IrOpcode::kFloat64Equal == node->opcode() ||
IrOpcode::kFloat64LessThan == node->opcode() ||
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 411c6d4cb3..d24030e1a7 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -32,6 +32,41 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
+bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return lhs.store_representation() == rhs.store_representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicStoreParameters params) {
+ return base::hash_combine(hash_value(params.store_representation()),
+ params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
+ return os << params.store_representation() << ", " << params.order();
+}
+
+bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return lhs.representation() == rhs.representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicLoadParameters params) {
+ return base::hash_combine(params.representation(), params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
+ return os << params.representation() << ", " << params.order();
+}
+
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
@@ -121,21 +156,29 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
- IrOpcode::kWord32AtomicLoad == op->opcode() ||
- IrOpcode::kWord64AtomicLoad == op->opcode() ||
- IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
- IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
+AtomicLoadParameters AtomicLoadParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicLoad == op->opcode() ||
+ IrOpcode::kWord64AtomicLoad == op->opcode());
+ return OpParameter<AtomicLoadParameters>(op);
+}
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
+AtomicStoreParameters const& AtomicStoreParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
+ IrOpcode::kWord64AtomicStore == op->opcode());
+ return OpParameter<AtomicStoreParameters>(op);
+}
+
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
@@ -182,12 +225,6 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
return OpParameter<StackSlotRepresentation>(op);
}
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode());
- return OpParameter<MachineRepresentation>(op);
-}
-
MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@@ -650,6 +687,30 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Load32Zero) \
V(S128Load64Zero)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_TYPE_LIST(V)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#else
+
+#define ATOMIC_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
@@ -665,6 +726,28 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged)
+
+#else
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged) \
+ V(kCompressedPointer) \
+ V(kCompressed)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
@@ -831,13 +914,6 @@ struct MachineOperatorGlobalCache {
Operator::kEliminatable, "Load", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct PoisonedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- PoisonedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
- "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@@ -861,7 +937,6 @@ struct MachineOperatorGlobalCache {
0, 0, 1, 0, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
- PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type; \
LoadImmutable##Type##Operator kLoadImmutable##Type;
@@ -976,55 +1051,63 @@ struct MachineOperatorGlobalCache {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-#define ATOMIC_LOAD(Type) \
- struct Word32AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word32AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word32SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word32SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
-#define ATOMIC_LOAD(Type) \
- struct Word64AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word64AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word64SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word64SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
- struct Word32AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word32AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word32SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word32SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord32AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
+ Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
#define ATOMIC_STORE(Type) \
- struct Word64AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word64AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word64SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word64SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
@@ -1084,21 +1167,23 @@ struct MachineOperatorGlobalCache {
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
- struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ struct Word32SeqCstPairLoadOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairLoadOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0,
+ AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
-
- struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ Word32SeqCstPairLoadOperator kWord32SeqCstPairLoad;
+
+ struct Word32SeqCstPairStoreOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairStoreOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1,
+ 0, AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+ Word32SeqCstPairStoreOperator kWord32SeqCstPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
@@ -1157,30 +1242,6 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
- struct TaggedPoisonOnSpeculation : public Operator {
- TaggedPoisonOnSpeculation()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
-
- struct Word32PoisonOnSpeculation : public Operator {
- Word32PoisonOnSpeculation()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
-
- struct Word64PoisonOnSpeculation : public Operator {
- Word64PoisonOnSpeculation()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
-
struct AbortCSAAssertOperator : public Operator {
AbortCSAAssertOperator()
: Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
@@ -1366,16 +1427,6 @@ const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) {
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kPoisonedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -1592,23 +1643,47 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord32AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstLoad##Type; \
+ }
+ ATOMIC_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_TYPE_LIST(LOAD)
+ ATOMIC_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord32AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstStore##kRep; \
+ }
+ ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC_REPRESENTATION_LIST(STORE)
+ ATOMIC_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
@@ -1685,24 +1760,49 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord64AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstLoad##Type; \
+ }
+ ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_U64_TYPE_LIST(LOAD)
+ ATOMIC64_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord64AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstStore##kRep; \
+ }
+ ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
+ ATOMIC64_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
+
UNREACHABLE();
}
@@ -1777,12 +1877,24 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return &cache_.kWord32AtomicPairLoad;
+const Operator* MachineOperatorBuilder::Word32AtomicPairLoad(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairLoad;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairLoad, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0, order);
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return &cache_.kWord32AtomicPairStore;
+const Operator* MachineOperatorBuilder::Word32AtomicPairStore(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairStore;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairStore, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0, order);
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
@@ -1813,18 +1925,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
-const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
-}
-
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \
int32_t lane_index) { \
@@ -1918,8 +2018,12 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
#undef ATOMIC_TYPE_LIST
#undef ATOMIC_U64_TYPE_LIST
#undef ATOMIC_U32_TYPE_LIST
+#undef ATOMIC_TAGGED_TYPE_LIST
+#undef ATOMIC64_TAGGED_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
+#undef ATOMIC_TAGGED_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
+#undef ATOMIC64_TAGGED_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
#undef LOAD_TRANSFORM_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 0ee3649ad0..7bd73663ab 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/globals.h"
#include "src/compiler/write-barrier-kind.h"
@@ -50,6 +51,32 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
+// order.
+class AtomicLoadParameters final {
+ public:
+ AtomicLoadParameters(LoadRepresentation representation,
+ AtomicMemoryOrder order)
+ : representation_(representation), order_(order) {}
+
+ LoadRepresentation representation() const { return representation_; }
+ AtomicMemoryOrder order() const { return order_; }
+
+ private:
+ LoadRepresentation representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
+bool operator!=(AtomicLoadParameters, AtomicLoadParameters);
+
+size_t hash_value(AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
enum class MemoryAccessKind {
kNormal,
kUnaligned,
@@ -131,6 +158,43 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
+class AtomicStoreParameters final {
+ public:
+ AtomicStoreParameters(MachineRepresentation representation,
+ WriteBarrierKind write_barrier_kind,
+ AtomicMemoryOrder order)
+ : store_representation_(representation, write_barrier_kind),
+ order_(order) {}
+
+ MachineRepresentation representation() const {
+ return store_representation_.representation();
+ }
+ WriteBarrierKind write_barrier_kind() const {
+ return store_representation_.write_barrier_kind();
+ }
+ AtomicMemoryOrder order() const { return order_; }
+
+ StoreRepresentation store_representation() const {
+ return store_representation_;
+ }
+
+ private:
+ StoreRepresentation store_representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
+bool operator!=(AtomicStoreParameters, AtomicStoreParameters);
+
+size_t hash_value(AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
// An UnalignedStore needs a MachineType.
using UnalignedStoreRepresentation = MachineRepresentation;
@@ -173,9 +237,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
Operator const* op) V8_WARN_UNUSED_RESULT;
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
- V8_WARN_UNUSED_RESULT;
-
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
class S128ImmediateParameter {
@@ -852,7 +913,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
const Operator* LoadImmutable(LoadRepresentation rep);
- const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
@@ -879,11 +939,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
- // Destroy value by masking when misspeculating.
- const Operator* TaggedPoisonOnSpeculation();
- const Operator* Word32PoisonOnSpeculation();
- const Operator* Word64PoisonOnSpeculation();
-
// Access to the machine stack.
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
@@ -901,13 +956,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* MemBarrier();
// atomic-load [base + index]
- const Operator* Word32AtomicLoad(LoadRepresentation rep);
+ const Operator* Word32AtomicLoad(AtomicLoadParameters params);
// atomic-load [base + index]
- const Operator* Word64AtomicLoad(LoadRepresentation rep);
+ const Operator* Word64AtomicLoad(AtomicLoadParameters params);
// atomic-store [base + index], value
- const Operator* Word32AtomicStore(MachineRepresentation rep);
+ const Operator* Word32AtomicStore(AtomicStoreParameters params);
// atomic-store [base + index], value
- const Operator* Word64AtomicStore(MachineRepresentation rep);
+ const Operator* Word64AtomicStore(AtomicStoreParameters params);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
@@ -937,9 +992,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index]
- const Operator* Word32AtomicPairLoad();
+ const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
// atomic-pair-sub [base + index], value_high, value-low
- const Operator* Word32AtomicPairStore();
+ const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
// atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low
@@ -980,7 +1035,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
- V(Word, PoisonOnSpeculation) \
V(Int, Add) \
V(Int, Sub) \
V(Int, Mul) \
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 9673a51844..27ad71c07a 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -60,7 +60,6 @@ class MemoryLowering::AllocationGroup final : public ZoneObject {
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
@@ -71,7 +70,6 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
machine_(jsgraph->machine()),
graph_assembler_(graph_assembler),
allocation_folding_(allocation_folding),
- poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
@@ -401,11 +399,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
DCHECK(!type.IsMapWord());
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
return Changed(node);
}
@@ -413,8 +407,7 @@ Node* MemoryLowering::DecodeExternalPointer(
Node* node, ExternalPointerTag external_pointer_tag) {
#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
- DCHECK(node->opcode() == IrOpcode::kLoad ||
- node->opcode() == IrOpcode::kPoisonedLoad);
+ DCHECK(node->opcode() == IrOpcode::kLoad);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
@@ -476,16 +469,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (type.IsMapWord()) {
- DCHECK(!NeedsPoisoning(access.load_sensitivity));
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
return ReduceLoadMap(node);
}
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
@@ -655,21 +643,6 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
return write_barrier_kind;
}
-bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
- // Safe loads do not need poisoning.
- if (load_sensitivity == LoadSensitivity::kSafe) return false;
-
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return true;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return load_sensitivity == LoadSensitivity::kCritical;
- }
- UNREACHABLE();
-}
-
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 1fbe18abff..9edb880e6f 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -75,7 +75,6 @@ class MemoryLowering final : public Reducer {
MemoryLowering(
JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
@@ -112,7 +111,6 @@ class MemoryLowering final : public Reducer {
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
- bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
void EnsureAllocateOperator();
Node* GetWasmInstanceNode();
@@ -133,7 +131,6 @@ class MemoryLowering final : public Reducer {
MachineOperatorBuilder* machine_;
JSGraphAssembler* graph_assembler_;
AllocationFolding allocation_folding_;
- PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 860ea1fae1..ba4a5c1f67 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -40,7 +40,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
@@ -54,7 +53,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
- case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@@ -77,7 +75,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicSub:
case IrOpcode::kWord32AtomicXor:
- case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicCompareExchange:
@@ -87,7 +84,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicXor:
- case IrOpcode::kWord64PoisonOnSpeculation:
return false;
case IrOpcode::kCall:
@@ -183,13 +179,12 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
MemoryOptimizer::MemoryOptimizer(
- JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: graph_assembler_(jsgraph, zone),
- memory_lowering_(jsgraph, zone, &graph_assembler_, poisoning_level,
- allocation_folding, WriteBarrierAssertFailed,
- function_debug_name),
+ memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
+ WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 3845304fdd..7d8bca44d4 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -30,7 +30,6 @@ using NodeId = uint32_t;
class MemoryOptimizer final {
public:
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 1ce4023424..52dc476dc4 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -743,7 +743,6 @@ struct BaseWithIndexAndDisplacementMatcher {
switch (from->opcode()) {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 912bd7b5ce..b956f148cc 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -463,7 +463,6 @@
V(PlainPrimitiveToFloat64) \
V(PlainPrimitiveToNumber) \
V(PlainPrimitiveToWord32) \
- V(PoisonIndex) \
V(RestLength) \
V(RuntimeAbort) \
V(StoreDataViewElement) \
@@ -686,7 +685,6 @@
V(DebugBreak) \
V(Comment) \
V(Load) \
- V(PoisonedLoad) \
V(LoadImmutable) \
V(Store) \
V(StackSlot) \
@@ -746,9 +744,6 @@
V(Word64Select) \
V(Float32Select) \
V(Float64Select) \
- V(TaggedPoisonOnSpeculation) \
- V(Word32PoisonOnSpeculation) \
- V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 82a6e6bb3e..16366bf588 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -10,21 +10,12 @@
#include "src/compiler/zone-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-// We log detailed phase information about the pipeline
-// in both the v8.turbofan and the v8.wasm.turbofan categories.
-constexpr const char kTraceCategory[] = // --
- TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
- TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
-
-} // namespace
+constexpr char PipelineStatistics::kTraceCategory[];
void PipelineStatistics::CommonStats::Begin(
PipelineStatistics* pipeline_stats) {
@@ -62,6 +53,7 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
: outer_zone_(info->zone()),
zone_stats_(zone_stats),
compilation_stats_(compilation_stats),
+ code_kind_(info->code_kind()),
phase_kind_name_(nullptr),
phase_name_(nullptr) {
if (info->has_shared_info()) {
@@ -70,7 +62,6 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
total_stats_.Begin(this);
}
-
PipelineStatistics::~PipelineStatistics() {
if (InPhaseKind()) EndPhaseKind();
CompilationStatistics::BasicStats diff;
@@ -82,7 +73,8 @@ PipelineStatistics::~PipelineStatistics() {
void PipelineStatistics::BeginPhaseKind(const char* phase_kind_name) {
DCHECK(!InPhase());
if (InPhaseKind()) EndPhaseKind();
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_kind_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_kind_name, "kind",
+ CodeKindToString(code_kind_));
phase_kind_name_ = phase_kind_name;
phase_kind_stats_.Begin(this);
}
@@ -92,11 +84,14 @@ void PipelineStatistics::EndPhaseKind() {
CompilationStatistics::BasicStats diff;
phase_kind_stats_.End(this, &diff);
compilation_stats_->RecordPhaseKindStats(phase_kind_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_kind_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_kind_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
void PipelineStatistics::BeginPhase(const char* phase_name) {
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_name, "kind",
+ CodeKindToString(code_kind_));
DCHECK(InPhaseKind());
phase_name_ = phase_name;
phase_stats_.Begin(this);
@@ -107,7 +102,9 @@ void PipelineStatistics::EndPhase() {
CompilationStatistics::BasicStats diff;
phase_stats_.End(this, &diff);
compilation_stats_->RecordPhaseStats(phase_kind_name_, phase_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 8a05d98011..19f7574e2a 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -11,6 +11,8 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler/zone-stats.h"
#include "src/diagnostics/compilation-statistics.h"
+#include "src/objects/code-kind.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -29,6 +31,12 @@ class PipelineStatistics : public Malloced {
void BeginPhaseKind(const char* phase_kind_name);
void EndPhaseKind();
+ // We log detailed phase information about the pipeline
+ // in both the v8.turbofan and the v8.wasm.turbofan categories.
+ static constexpr char kTraceCategory[] =
+ TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
+
private:
size_t OuterZoneSize() {
return static_cast<size_t>(outer_zone_->allocation_size());
@@ -60,6 +68,7 @@ class PipelineStatistics : public Malloced {
Zone* outer_zone_;
ZoneStats* zone_stats_;
CompilationStatistics* compilation_stats_;
+ CodeKind code_kind_;
std::string function_name_;
// Stats for the entire compilation.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e802cd7268..8d3d93aa2a 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -84,6 +84,7 @@
#include "src/execution/isolate-inl.h"
#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
+#include "src/logging/code-events.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/shared-function-info.h"
@@ -95,6 +96,7 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h"
+#include "src/compiler/wasm-inlining.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -547,8 +549,7 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- info()->GetPoisoningMitigationLevel(), assembler_options(),
- info_->builtin(), max_unoptimized_frame_height(),
+ assembler_options(), info_->builtin(), max_unoptimized_frame_height(),
max_pushed_argument_count(),
FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
}
@@ -947,13 +948,10 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
-#ifdef V8_RUNTIME_CALL_STATS
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify",
- RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
- RuntimeCallStats::kThreadSpecific);
-#else
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify");
-#endif
+ RCS_SCOPE(data->runtime_call_stats(),
+ RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
+ RuntimeCallStats::kThreadSpecific);
+ TRACE_EVENT0(PipelineStatistics::kTraceCategory, "V8.TraceScheduleAndVerify");
if (info->trace_turbo_json()) {
UnparkedScopeIfNeeded scope(data->broker());
AllowHandleDereference allow_deref;
@@ -1161,18 +1159,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_inlining) {
compilation_info()->set_inlining();
}
-
- // This is the bottleneck for computing and setting poisoning level in the
- // optimizing compiler.
- PoisoningMitigationLevel load_poisoning =
- PoisoningMitigationLevel::kDontPoison;
- if (FLAG_untrusted_code_mitigations) {
- // For full mitigations, this can be changed to
- // PoisoningMitigationLevel::kPoisonAll.
- load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
- }
- compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
-
if (FLAG_turbo_allocation_folding) {
compilation_info()->set_allocation_folding();
}
@@ -1424,8 +1410,8 @@ struct InliningPhase {
};
#if V8_ENABLE_WEBASSEMBLY
-struct WasmInliningPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+struct JSWasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->has_js_wasm_calls());
@@ -1629,10 +1615,10 @@ struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- SimplifiedLowering lowering(
- data->jsgraph(), data->broker(), temp_zone, data->source_positions(),
- data->node_origins(), data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter(), linkage, data->observe_node_manager());
+ SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
+ data->source_positions(), data->node_origins(),
+ &data->info()->tick_counter(), linkage,
+ data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1699,6 +1685,25 @@ struct WasmLoopUnrollingPhase {
}
}
};
+
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+
+ void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes) {
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
+ DeadCodeElimination dead(&graph_reducer, data->graph(),
+ data->mcgraph()->common(), temp_zone);
+ WasmInliner inliner(&graph_reducer, env, data->source_positions(),
+ data->node_origins(), data->mcgraph(), wire_bytes, 0);
+ AddReducer(data, &graph_reducer, &dead);
+ AddReducer(data, &graph_reducer, &inliner);
+
+ graph_reducer.ReduceGraph();
+ }
+};
#endif // V8_ENABLE_WEBASSEMBLY
struct LoopExitEliminationPhase {
@@ -1797,7 +1802,6 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
}
{
@@ -1899,7 +1903,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
- data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
+ data->jsgraph(), temp_zone,
data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
@@ -1989,7 +1993,6 @@ struct ScheduledEffectControlLinearizationPhase {
// - lower simplified memory and select nodes to machine level nodes.
LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
@@ -2205,7 +2208,6 @@ struct InstructionSelectionPhase {
data->assembler_options().enable_root_relative_access
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
- data->info()->GetPoisoningMitigationLevel(),
data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
@@ -2607,6 +2609,9 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
tracing_scope.stream(), isolate);
}
#endif
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code),
+ compilation_info()->GetDebugName().get()));
return SUCCEEDED;
}
return FAILED;
@@ -2750,8 +2755,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2853,8 +2858,8 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2969,17 +2974,12 @@ int HashGraphForPGO(Graph* graph) {
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data) {
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
kind);
info.set_builtin(builtin);
- if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
- info.SetPoisoningMitigationLevel(poisoning_level);
- }
-
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
@@ -3195,7 +3195,8 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
// static
void Pipeline::GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -3225,6 +3226,10 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
}
+ if (FLAG_wasm_inlining) {
+ pipeline.Run<WasmInliningPhase>(env, wire_bytes_storage);
+ pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
@@ -3546,18 +3551,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
} else {
- const RegisterConfiguration* config;
- if (data->info()->GetPoisoningMitigationLevel() !=
- PoisoningMitigationLevel::kDontPoison) {
-#ifdef V8_TARGET_ARCH_IA32
- FATAL("Poisoning is not supported on ia32.");
-#else
- config = RegisterConfiguration::Poisoning();
-#endif // V8_TARGET_ARCH_IA32
- } else {
- config = RegisterConfiguration::Default();
- }
-
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
@@ -3643,7 +3637,6 @@ std::ostream& operator<<(std::ostream& out,
out << "\"codeStartRegisterCheck\": "
<< s.offsets_info->code_start_register_check << ", ";
out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
- out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index ea67b31e06..19fd715885 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -23,11 +23,13 @@ class ProfileDataFromFile;
class RegisterConfiguration;
namespace wasm {
+struct CompilationEnv;
struct FunctionBody;
class NativeModule;
struct WasmCompilationResult;
class WasmEngine;
struct WasmModule;
+class WireBytesStorage;
} // namespace wasm
namespace compiler {
@@ -54,7 +56,8 @@ class Pipeline : public AllStatic {
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -78,8 +81,7 @@ class Pipeline : public AllStatic {
static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index a64521d6f6..456512a867 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -168,7 +168,9 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
Map::GetConstructorFunction(
*map_handle, *broker()->target_native_context().object())
.value();
- map = MakeRef(broker(), constructor.initial_map());
+ // {constructor.initial_map()} is loaded/stored with acquire-release
+ // semantics for constructors.
+ map = MakeRefAssumeMemoryFence(broker(), constructor.initial_map());
DCHECK(map.object()->IsJSObjectMap());
}
dependencies()->DependOnConstantInDictionaryPrototypeChain(
@@ -235,7 +237,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::Any(),
MachineType::AnyTagged(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -263,7 +264,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -291,7 +291,6 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
ConstFieldInfo::None()};
return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
@@ -319,7 +318,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
access_info.GetConstFieldInfo()};
if (field_representation == MachineRepresentation::kTaggedPointer ||
field_representation == MachineRepresentation::kCompressedPointer) {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 7ed217d4e3..383d63dd69 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -18,8 +18,7 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word, MachineOperatorBuilder::Flags flags,
- MachineOperatorBuilder::AlignmentRequirements alignment_requirements,
- PoisoningMitigationLevel poisoning_level)
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
: isolate_(isolate),
graph_(graph),
schedule_(zone()->New<Schedule>(zone())),
@@ -30,8 +29,7 @@ RawMachineAssembler::RawMachineAssembler(
call_descriptor_(call_descriptor),
target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
- current_block_(schedule()->start()),
- poisoning_level_(poisoning_level) {
+ current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
// Add an extra input for the JSFunction parameter to the start node.
graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
@@ -472,7 +470,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
return;
case IrOpcode::kIfTrue: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kTrue) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -485,7 +483,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
case IrOpcode::kIfFalse: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kFalse) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -516,11 +514,10 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
}
- BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op());
- if (info.hint == new_branch_hint) return;
- NodeProperties::ChangeOp(
- responsible_branch,
- common()->Branch(new_branch_hint, info.is_safety_check));
+ BranchHint hint = BranchHintOf(responsible_branch->op());
+ if (hint == new_branch_hint) return;
+ NodeProperties::ChangeOp(responsible_branch,
+ common()->Branch(new_branch_hint));
}
Node* RawMachineAssembler::TargetParameter() {
@@ -544,9 +541,7 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) {
void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = MakeNode(
- common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
- &condition);
+ Node* branch = MakeNode(common()->Branch(BranchHint::kNone), 1, &condition);
BasicBlock* true_block = schedule()->NewBasicBlock();
BasicBlock* false_block = schedule()->NewBasicBlock();
schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index a811fa7bf9..f0bb6e0425 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -52,9 +52,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder::Flag::kNoFlags,
MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport(),
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kPoisonCriticalOnly);
+ FullUnalignedAccessSupport());
~RawMachineAssembler() = default;
RawMachineAssembler(const RawMachineAssembler&) = delete;
@@ -67,7 +65,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
// Only used for tests: Finalizes the schedule and exports it to be used for
// code generation. Note that this RawMachineAssembler becomes invalid after
@@ -132,19 +129,11 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Memory Operations.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return Load(type, base, IntPtrConstant(0), needs_poisoning);
+ Node* Load(MachineType type, Node* base) {
+ return Load(type, base, IntPtrConstant(0));
}
- Node* Load(MachineType type, Node* base, Node* index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ Node* Load(MachineType type, Node* base, Node* index) {
const Operator* op = machine()->Load(type);
- CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
- if (needs_poisoning == LoadSensitivity::kCritical &&
- poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
- op = machine()->PoisonedLoad(type);
- }
-
Node* load = AddNode(op, base, index);
return load;
}
@@ -174,10 +163,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
bool IsMapOffsetConstantMinusTag(int offset) {
return offset == HeapObject::kMapOffset - kHeapObjectTag;
}
- Node* LoadFromObject(
- MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
+ Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
type == MachineType::MapInHeader());
ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
@@ -253,20 +239,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Atomic memory operations.
- Node* AtomicLoad(MachineType type, Node* base, Node* index) {
- DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicLoad(type), base, index);
+ Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
+ DCHECK_NE(rep.representation().representation(),
+ MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicLoad(rep), base, index);
}
- Node* AtomicLoad64(Node* base, Node* index) {
+ Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
if (machine()->Is64()) {
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
- return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
- index);
+ return AddNode(machine()->Word64AtomicLoad(rep), base, index);
} else {
- return AddNode(machine()->Word32AtomicPairLoad(), base, index);
+ return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
}
}
@@ -276,22 +262,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#define VALUE_HALVES value, value_high
#endif
- Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
Node* value) {
DCHECK(!IsMapOffsetConstantMinusTag(index));
- DCHECK_NE(rep, MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicStore(params), base, index, value);
}
- Node* AtomicStore64(Node* base, Node* index, Node* value, Node* value_high) {
+ Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
+ Node* value, Node* value_high) {
if (machine()->Is64()) {
DCHECK_NULL(value_high);
- return AddNode(
- machine()->Word64AtomicStore(MachineRepresentation::kWord64), base,
- index, value);
+ return AddNode(machine()->Word64AtomicStore(params), base, index, value);
} else {
- return AddNode(machine()->Word32AtomicPairStore(), base, index,
- VALUE_HALVES);
+ DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
+ params.representation() != MachineRepresentation::kTaggedSigned &&
+ params.representation() != MachineRepresentation::kTagged);
+ return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
+ index, VALUE_HALVES);
}
}
@@ -959,20 +947,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
- Node* TaggedPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->TaggedPoisonOnSpeculation(), value);
- }
- return value;
- }
-
- Node* WordPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->WordPoisonOnSpeculation(), value);
- }
- return value;
- }
-
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
Node* CallN(CallDescriptor* call_descriptor, int input_count,
@@ -1136,6 +1110,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common);
Isolate* isolate_;
+
Graph* graph_;
Schedule* schedule_;
SourcePositionTable* source_positions_;
@@ -1146,7 +1121,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* target_parameter_;
NodeVector parameters_;
BasicBlock* current_block_;
- PoisoningMitigationLevel poisoning_level_;
};
class V8_EXPORT_PRIVATE RawMachineLabel final {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1c07a23dde..6416eed376 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1735,11 +1735,9 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower<T>()) {
- if (lowering->poisoning_level_ ==
- PoisoningMitigationLevel::kDontPoison &&
- (index_type.IsNone() || length_type.IsNone() ||
- (index_type.Min() >= 0.0 &&
- index_type.Max() < length_type.Min()))) {
+ if (index_type.IsNone() || length_type.IsNone() ||
+ (index_type.Min() >= 0.0 &&
+ index_type.Max() < length_type.Min())) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
// TODO(neis): Move this into TypedOptimization?
@@ -3181,11 +3179,6 @@ class RepresentationSelector {
}
case IrOpcode::kCheckBounds:
return VisitCheckBounds<T>(node, lowering);
- case IrOpcode::kPoisonIndex: {
- VisitUnop<T>(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- return;
- }
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@@ -3835,7 +3828,7 @@ class RepresentationSelector {
case IrOpcode::kDateNow:
VisitInputs<T>(node);
- return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kFrameState:
return VisitFrameState<T>(FrameState{node});
case IrOpcode::kStateValues:
@@ -4225,18 +4218,19 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(
- JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter,
- Linkage* linkage, ObserveNodeManager* observe_node_manager)
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter,
+ Linkage* linkage,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level),
tick_counter_(tick_counter),
linkage_(linkage),
observe_node_manager_(observe_node_manager) {}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 54017b34f7..f60bc1a7e3 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -31,7 +31,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level,
TickCounter* tick_counter, Linkage* linkage,
ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
@@ -83,8 +82,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
- PoisoningMitigationLevel poisoning_level_;
-
TickCounter* const tick_counter_;
Linkage* const linkage_;
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9c4f8f083a..9461194b55 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -73,22 +73,6 @@ size_t hash_value(FieldAccess const& access) {
access.is_store_in_literal);
}
-size_t hash_value(LoadSensitivity load_sensitivity) {
- return static_cast<size_t>(load_sensitivity);
-}
-
-std::ostream& operator<<(std::ostream& os, LoadSensitivity load_sensitivity) {
- switch (load_sensitivity) {
- case LoadSensitivity::kCritical:
- return os << "Critical";
- case LoadSensitivity::kSafe:
- return os << "Safe";
- case LoadSensitivity::kUnsafe:
- return os << "Unsafe";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
os << "[" << access.base_is_tagged << ", " << access.offset << ", ";
#ifdef OBJECT_PRINT
@@ -107,9 +91,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
if (access.is_store_in_literal) {
os << " (store in literal)";
}
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
os << "]";
return os;
}
@@ -145,9 +126,6 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", "
<< access.type << ", " << access.machine_type << ", "
<< access.write_barrier_kind;
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
return os;
}
@@ -719,129 +697,128 @@ bool operator==(CheckMinusZeroParameters const& lhs,
return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1, 0) \
- V(NumberEqual, Operator::kCommutative, 2, 0) \
- V(NumberLessThan, Operator::kNoProperties, 2, 0) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(NumberAdd, Operator::kCommutative, 2, 0) \
- V(NumberSubtract, Operator::kNoProperties, 2, 0) \
- V(NumberMultiply, Operator::kCommutative, 2, 0) \
- V(NumberDivide, Operator::kNoProperties, 2, 0) \
- V(NumberModulus, Operator::kNoProperties, 2, 0) \
- V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
- V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
- V(NumberImul, Operator::kCommutative, 2, 0) \
- V(NumberAbs, Operator::kNoProperties, 1, 0) \
- V(NumberClz32, Operator::kNoProperties, 1, 0) \
- V(NumberCeil, Operator::kNoProperties, 1, 0) \
- V(NumberFloor, Operator::kNoProperties, 1, 0) \
- V(NumberFround, Operator::kNoProperties, 1, 0) \
- V(NumberAcos, Operator::kNoProperties, 1, 0) \
- V(NumberAcosh, Operator::kNoProperties, 1, 0) \
- V(NumberAsin, Operator::kNoProperties, 1, 0) \
- V(NumberAsinh, Operator::kNoProperties, 1, 0) \
- V(NumberAtan, Operator::kNoProperties, 1, 0) \
- V(NumberAtan2, Operator::kNoProperties, 2, 0) \
- V(NumberAtanh, Operator::kNoProperties, 1, 0) \
- V(NumberCbrt, Operator::kNoProperties, 1, 0) \
- V(NumberCos, Operator::kNoProperties, 1, 0) \
- V(NumberCosh, Operator::kNoProperties, 1, 0) \
- V(NumberExp, Operator::kNoProperties, 1, 0) \
- V(NumberExpm1, Operator::kNoProperties, 1, 0) \
- V(NumberLog, Operator::kNoProperties, 1, 0) \
- V(NumberLog1p, Operator::kNoProperties, 1, 0) \
- V(NumberLog10, Operator::kNoProperties, 1, 0) \
- V(NumberLog2, Operator::kNoProperties, 1, 0) \
- V(NumberMax, Operator::kNoProperties, 2, 0) \
- V(NumberMin, Operator::kNoProperties, 2, 0) \
- V(NumberPow, Operator::kNoProperties, 2, 0) \
- V(NumberRound, Operator::kNoProperties, 1, 0) \
- V(NumberSign, Operator::kNoProperties, 1, 0) \
- V(NumberSin, Operator::kNoProperties, 1, 0) \
- V(NumberSinh, Operator::kNoProperties, 1, 0) \
- V(NumberSqrt, Operator::kNoProperties, 1, 0) \
- V(NumberTan, Operator::kNoProperties, 1, 0) \
- V(NumberTanh, Operator::kNoProperties, 1, 0) \
- V(NumberTrunc, Operator::kNoProperties, 1, 0) \
- V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
- V(NumberToInt32, Operator::kNoProperties, 1, 0) \
- V(NumberToString, Operator::kNoProperties, 1, 0) \
- V(NumberToUint32, Operator::kNoProperties, 1, 0) \
- V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
- V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
- V(BigIntNegate, Operator::kNoProperties, 1, 0) \
- V(StringConcat, Operator::kNoProperties, 3, 0) \
- V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
- V(StringIndexOf, Operator::kNoProperties, 3, 0) \
- V(StringLength, Operator::kNoProperties, 1, 0) \
- V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
- V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
- V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
- V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
- V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
- V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
- V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
- V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
- V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
- V(ObjectIsString, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
- V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
- V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
- V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
- V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
- V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
- V(SameValue, Operator::kCommutative, 2, 0) \
- V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
- V(NumberSameValue, Operator::kCommutative, 2, 0) \
- V(ReferenceEqual, Operator::kCommutative, 2, 0) \
- V(StringEqual, Operator::kCommutative, 2, 0) \
- V(StringLessThan, Operator::kNoProperties, 2, 0) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(ToBoolean, Operator::kNoProperties, 1, 0) \
- V(NewConsString, Operator::kNoProperties, 3, 0) \
- V(PoisonIndex, Operator::kNoProperties, 1, 0)
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1, 0) \
+ V(NumberEqual, Operator::kCommutative, 2, 0) \
+ V(NumberLessThan, Operator::kNoProperties, 2, 0) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(NumberAdd, Operator::kCommutative, 2, 0) \
+ V(NumberSubtract, Operator::kNoProperties, 2, 0) \
+ V(NumberMultiply, Operator::kCommutative, 2, 0) \
+ V(NumberDivide, Operator::kNoProperties, 2, 0) \
+ V(NumberModulus, Operator::kNoProperties, 2, 0) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
+ V(NumberImul, Operator::kCommutative, 2, 0) \
+ V(NumberAbs, Operator::kNoProperties, 1, 0) \
+ V(NumberClz32, Operator::kNoProperties, 1, 0) \
+ V(NumberCeil, Operator::kNoProperties, 1, 0) \
+ V(NumberFloor, Operator::kNoProperties, 1, 0) \
+ V(NumberFround, Operator::kNoProperties, 1, 0) \
+ V(NumberAcos, Operator::kNoProperties, 1, 0) \
+ V(NumberAcosh, Operator::kNoProperties, 1, 0) \
+ V(NumberAsin, Operator::kNoProperties, 1, 0) \
+ V(NumberAsinh, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan2, Operator::kNoProperties, 2, 0) \
+ V(NumberAtanh, Operator::kNoProperties, 1, 0) \
+ V(NumberCbrt, Operator::kNoProperties, 1, 0) \
+ V(NumberCos, Operator::kNoProperties, 1, 0) \
+ V(NumberCosh, Operator::kNoProperties, 1, 0) \
+ V(NumberExp, Operator::kNoProperties, 1, 0) \
+ V(NumberExpm1, Operator::kNoProperties, 1, 0) \
+ V(NumberLog, Operator::kNoProperties, 1, 0) \
+ V(NumberLog1p, Operator::kNoProperties, 1, 0) \
+ V(NumberLog10, Operator::kNoProperties, 1, 0) \
+ V(NumberLog2, Operator::kNoProperties, 1, 0) \
+ V(NumberMax, Operator::kNoProperties, 2, 0) \
+ V(NumberMin, Operator::kNoProperties, 2, 0) \
+ V(NumberPow, Operator::kNoProperties, 2, 0) \
+ V(NumberRound, Operator::kNoProperties, 1, 0) \
+ V(NumberSign, Operator::kNoProperties, 1, 0) \
+ V(NumberSin, Operator::kNoProperties, 1, 0) \
+ V(NumberSinh, Operator::kNoProperties, 1, 0) \
+ V(NumberSqrt, Operator::kNoProperties, 1, 0) \
+ V(NumberTan, Operator::kNoProperties, 1, 0) \
+ V(NumberTanh, Operator::kNoProperties, 1, 0) \
+ V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
+ V(StringConcat, Operator::kNoProperties, 3, 0) \
+ V(StringToNumber, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
+ V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(TypeOf, Operator::kNoProperties, 1, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
+ V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
+ V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(SameValue, Operator::kCommutative, 2, 0) \
+ V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
+ V(NumberSameValue, Operator::kCommutative, 2, 0) \
+ V(ReferenceEqual, Operator::kCommutative, 2, 0) \
+ V(StringEqual, Operator::kCommutative, 2, 0) \
+ V(StringLessThan, Operator::kNoProperties, 2, 0) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0)
#define EFFECT_DEPENDENT_OP_LIST(V) \
V(BigIntAdd, Operator::kNoProperties, 2, 1) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index d7a5901448..0602b795a9 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -46,10 +46,6 @@ size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
-size_t hash_value(LoadSensitivity);
-
-std::ostream& operator<<(std::ostream&, LoadSensitivity);
-
struct ConstFieldInfo {
// the map that introduced the const field, if any. An access is considered
// mutable iff the handle is null.
@@ -82,7 +78,6 @@ struct FieldAccess {
Type type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
@@ -96,14 +91,12 @@ struct FieldAccess {
type(Type::None()),
machine_type(MachineType::None()),
write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe),
const_field_info(ConstFieldInfo::None()),
is_store_in_literal(false) {}
FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
MaybeHandle<Map> map, Type type, MachineType machine_type,
WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
bool is_store_in_literal = false
#ifdef V8_HEAP_SANDBOX
@@ -118,7 +111,6 @@ struct FieldAccess {
type(type),
machine_type(machine_type),
write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
is_store_in_literal(is_store_in_literal)
#ifdef V8_HEAP_SANDBOX
@@ -162,25 +154,21 @@ struct ElementAccess {
Type type; // type of the element.
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ElementAccess()
: base_is_tagged(kTaggedBase),
header_size(0),
type(Type::None()),
machine_type(MachineType::None()),
- write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe) {}
+ write_barrier_kind(kFullWriteBarrier) {}
ElementAccess(BaseTaggedness base_is_tagged, int header_size, Type type,
- MachineType machine_type, WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe)
+ MachineType machine_type, WriteBarrierKind write_barrier_kind)
: base_is_tagged(base_is_tagged),
header_size(header_size),
type(type),
machine_type(machine_type),
- write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity) {}
+ write_barrier_kind(write_barrier_kind) {}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -926,7 +914,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* PoisonIndex();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index ce9b6fdb18..5025233c88 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -814,9 +814,9 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 529f1cc7bb..a96d1ea981 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -882,9 +882,10 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
InductionVariable* induction_var) {
Node* node = induction_var->phi();
DCHECK_EQ(node->opcode(), IrOpcode::kInductionVariablePhi);
+ Node* arith = node->InputAt(1);
Type type = NodeProperties::GetType(node);
Type initial_type = Operand(node, 0);
- Node* arith = node->InputAt(1);
+ Type arith_type = Operand(node, 1);
Type increment_type = Operand(node, 2);
// Intersect {type} with useful bounds.
@@ -910,26 +911,30 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
type = Type::Intersect(type, bound_type, typer_->zone());
}
- // Apply ordinary typing to the "increment" operation.
- // clang-format off
- switch (arith->opcode()) {
+ if (arith_type.IsNone()) {
+ type = Type::None();
+ } else {
+ // Apply ordinary typing to the "increment" operation.
+ // clang-format off
+ switch (arith->opcode()) {
#define CASE(x) \
- case IrOpcode::k##x: \
- type = Type##x(type, increment_type); \
- break;
- CASE(JSAdd)
- CASE(JSSubtract)
- CASE(NumberAdd)
- CASE(NumberSubtract)
- CASE(SpeculativeNumberAdd)
- CASE(SpeculativeNumberSubtract)
- CASE(SpeculativeSafeIntegerAdd)
- CASE(SpeculativeSafeIntegerSubtract)
+ case IrOpcode::k##x: \
+ type = Type##x(type, increment_type); \
+ break;
+ CASE(JSAdd)
+ CASE(JSSubtract)
+ CASE(NumberAdd)
+ CASE(NumberSubtract)
+ CASE(SpeculativeNumberAdd)
+ CASE(SpeculativeNumberSubtract)
+ CASE(SpeculativeSafeIntegerAdd)
+ CASE(SpeculativeSafeIntegerSubtract)
#undef CASE
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+ // clang-format on
}
- // clang-format on
type = Type::Union(initial_type, type, typer_->zone());
@@ -2065,10 +2070,6 @@ Type Typer::Visitor::TypeStringLength(Node* node) {
Type Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
-Type Typer::Visitor::TypePoisonIndex(Node* node) {
- return Type::Union(Operand(node, 0), typer_->cache_->kSingletonZero, zone());
-}
-
Type Typer::Visitor::TypeCheckBounds(Node* node) {
return typer_->operation_typer_.CheckBounds(Operand(node, 0),
Operand(node, 1));
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index f33edaa6c0..a0f2aa569d 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -1422,10 +1422,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, TypeCache::Get()->kPositiveSafeInteger);
CheckTypeIs(node, TypeCache::Get()->kPositiveSafeInteger);
break;
- case IrOpcode::kPoisonIndex:
- CheckValueInputIs(node, 0, Type::Unsigned32());
- CheckTypeIs(node, Type::Unsigned32());
- break;
case IrOpcode::kCheckClosure:
// Any -> Function
CheckValueInputIs(node, 0, Type::Any());
@@ -1641,7 +1637,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
@@ -1817,9 +1812,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
- case IrOpcode::kTaggedPoisonOnSpeculation:
- case IrOpcode::kWord32PoisonOnSpeculation:
- case IrOpcode::kWord64PoisonOnSpeculation:
case IrOpcode::kLoadStackCheckOffset:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f91c21fd1d..f6f6c3844f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -44,6 +44,7 @@
#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/graph-builder-interface.h"
@@ -196,14 +197,7 @@ class WasmGraphAssembler : public GraphAssembler {
return Call(call_descriptor, call_target, args...);
}
- void EnsureEnd() {
- if (graph()->end() == nullptr) {
- graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
- }
- }
-
void MergeControlToEnd(Node* node) {
- EnsureEnd();
NodeProperties::MergeControlToEnd(graph(), mcgraph()->common(), node);
}
@@ -212,7 +206,6 @@ class WasmGraphAssembler : public GraphAssembler {
if (FLAG_debug_code) {
auto ok = MakeLabel();
GotoIfNot(condition, &ok);
- EnsureEnd();
Unreachable();
Bind(&ok);
}
@@ -472,7 +465,6 @@ WasmGraphBuilder::WasmGraphBuilder(
mcgraph_(mcgraph),
env_(env),
has_simd_(ContainsSimd(sig)),
- untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
source_position_table_(source_position_table),
isolate_(isolate) {
@@ -501,6 +493,8 @@ void WasmGraphBuilder::Start(unsigned params) {
gasm_->LoadFunctionDataFromJSFunction(
Param(Linkage::kJSCallClosureParamIndex, "%closure")))
: Param(wasm::kWasmInstanceParameterIndex);
+
+ graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
}
Node* WasmGraphBuilder::Param(int index, const char* debug_name) {
@@ -2901,13 +2895,13 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(
- const wasm::FunctionSig* sig, base::Vector<Node*> args,
- base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline, Node* frame_state) {
- CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline,
- kWasmFunction, frame_state != nullptr);
+Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
+ base::Vector<Node*> rets,
+ wasm::WasmCodePosition position,
+ Node* instance_node, Node* frame_state) {
+ CallDescriptor* call_descriptor = GetWasmCallDescriptor(
+ mcgraph()->zone(), sig, kWasmFunction, frame_state != nullptr);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
@@ -2935,10 +2929,9 @@ Node* WasmGraphBuilder::BuildWasmCall(
Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node,
- UseRetpoline use_retpoline) {
+ Node* instance_node) {
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
+ GetWasmCallDescriptor(mcgraph()->zone(), sig);
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2982,15 +2975,13 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Node* target_node = gasm_->LoadFromObject(
MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, ref_node);
case kReturnCall:
DCHECK(rets.empty());
- return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, ref_node);
}
}
@@ -3010,7 +3001,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmCall(sig, args, rets, position, nullptr, kNoRetpoline);
+ return BuildWasmCall(sig, args, rets, position, nullptr);
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
@@ -3095,16 +3086,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
- // Mask the key to prevent SSCA.
- if (untrusted_code_mitigations_) {
- // mask = ((key - size) & ~key) >> 31
- Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1));
- Node* masked_diff =
- gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key);
- Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31));
- key = gasm_->Word32And(key, mask);
- }
-
const wasm::ValueType table_type = env_->module->tables[table_index].type;
// Check that the table entry is not null and that the type of the function is
// **identical with** the function type declared at the call site (no
@@ -3140,16 +3121,12 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
intptr_scaled_key);
args[0] = target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, target_instance);
case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, target_instance);
}
}
@@ -3244,14 +3221,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
args[0] = end_label.PhiAt(0);
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
Node* call = continuation == kCallContinues
- ? BuildWasmCall(sig, args, rets, position, instance_node,
- use_retpoline)
- : BuildWasmReturnCall(sig, args, position, instance_node,
- use_retpoline);
+ ? BuildWasmCall(sig, args, rets, position, instance_node)
+ : BuildWasmReturnCall(sig, args, position, instance_node);
return call;
}
@@ -3287,7 +3259,7 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmReturnCall(sig, args, position, nullptr, kNoRetpoline);
+ return BuildWasmReturnCall(sig, args, position, nullptr);
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
@@ -3416,15 +3388,6 @@ void WasmGraphBuilder::InitInstanceCache(
// Load the memory size.
instance_cache->mem_size =
LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
-
- if (untrusted_code_mitigations_) {
- // Load the memory mask.
- instance_cache->mem_mask =
- LOAD_INSTANCE_FIELD(MemoryMask, MachineType::UintPtr());
- } else {
- // Explicitly set to nullptr to ensure a SEGV when we try to use it.
- instance_cache->mem_mask = nullptr;
- }
}
void WasmGraphBuilder::PrepareInstanceCacheForLoop(
@@ -3435,10 +3398,6 @@ void WasmGraphBuilder::PrepareInstanceCacheForLoop(
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation());
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineType::PointerRepresentation());
- }
-
#undef INTRODUCE_PHI
}
@@ -3453,10 +3412,6 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
- }
-
#undef INTRODUCE_PHI
}
@@ -3467,10 +3422,6 @@ void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
merge, to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
- if (untrusted_code_mitigations_) {
- to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
- merge, to->mem_mask, from->mem_mask);
- }
}
Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
@@ -3839,13 +3790,6 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// Introduce the actual bounds check.
Node* cond = gasm_->UintLessThan(index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
-
- if (untrusted_code_mitigations_) {
- // In the fallthrough case, condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
return {index, kDynamicallyChecked};
}
@@ -4345,13 +4289,6 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
-
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
index, effect(), bounds_check.if_true);
SetEffectControl(bounds_check.EffectPhi(load, effect()), bounds_check.merge);
@@ -4396,13 +4333,6 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->Word32And(index, mem_mask);
- }
-
index = BuildChangeUint32ToUintPtr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
@@ -5240,16 +5170,26 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const Operator* (MachineOperatorBuilder::*)(MachineType);
using OperatorByRep =
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
+ using OperatorByAtomicLoadRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicLoadParameters);
+ using OperatorByAtomicStoreRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicStoreParameters);
const Type type;
const MachineType machine_type;
const OperatorByType operator_by_type = nullptr;
const OperatorByRep operator_by_rep = nullptr;
+ const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
+ const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
: type(t), machine_type(m), operator_by_rep(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
+ : type(t), machine_type(m), operator_by_atomic_load_params(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
+ : type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
// Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
@@ -5358,11 +5298,21 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (info.type != AtomicOpInfo::kSpecial) {
- const Operator* op =
- info.operator_by_type
- ? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
- : (mcgraph()->machine()->*info.operator_by_rep)(
- info.machine_type.representation());
+ const Operator* op;
+ if (info.operator_by_type) {
+ op = (mcgraph()->machine()->*info.operator_by_type)(info.machine_type);
+ } else if (info.operator_by_rep) {
+ op = (mcgraph()->machine()->*info.operator_by_rep)(
+ info.machine_type.representation());
+ } else if (info.operator_by_atomic_load_params) {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
+ AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
+ } else {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
+ AtomicStoreParameters(info.machine_type.representation(),
+ WriteBarrierKind::kNoWriteBarrier,
+ AtomicMemoryOrder::kSeqCst));
+ }
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
int num_actual_inputs = info.type;
@@ -5610,13 +5560,16 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
wasm::WasmCodePosition position) {
TrapIfFalse(wasm::kTrapArrayTooLarge,
gasm_->Uint32LessThanOrEqual(
- length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
+ length, gasm_->Uint32Constant(WasmArray::MaxLength(type))),
position);
wasm::ValueType element_type = type->element_type();
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
- Node* a =
- gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length,
- Int32Constant(element_type.element_size_bytes()));
+ // Do NOT mark this as Operator::kEliminatable, because that would cause the
+ // Call node to have no control inputs, which means it could get scheduled
+ // before the check/trap above.
+ Node* a = gasm_->CallBuiltin(
+ stub, Operator::kNoDeopt | Operator::kNoThrow, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
if (initial_value != nullptr) {
// TODO(manoskouk): If the loop is ever removed here, we have to update
// ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current
@@ -5628,8 +5581,6 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
- // Loops need the graph's end to have been set up.
- gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
@@ -6005,24 +5956,33 @@ Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
return gasm_->LoadWasmArrayLength(array_object);
}
-// TODO(7748): Change {CallBuiltin} to {BuildCCall}. Add an option to copy in a
-// loop for small array sizes. To find the length limit, run
-// test/mjsunit/wasm/array-copy-benchmark.js.
+// TODO(7748): Add an option to copy in a loop for small array sizes. To find
+// the length limit, run test/mjsunit/wasm/array-copy-benchmark.js.
void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
- Node* src_array, Node* src_index, Node* length,
+ CheckForNull dst_null_check, Node* src_array,
+ Node* src_index, CheckForNull src_null_check,
+ Node* length,
wasm::WasmCodePosition position) {
- // TODO(7748): Skip null checks when possible.
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(dst_array, RefNull()),
- position);
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(src_array, RefNull()),
- position);
+ if (dst_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(dst_array, RefNull()), position);
+ }
+ if (src_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(src_array, RefNull()), position);
+ }
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
- Operator::Properties copy_properties =
- Operator::kIdempotent | Operator::kNoThrow | Operator::kNoDeopt;
- // The builtin needs the int parameters first.
- gasm_->CallBuiltin(Builtin::kWasmArrayCopy, copy_properties, dst_index,
- src_index, length, dst_array, src_array);
+
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
+ MachineType arg_types[]{
+ MachineType::TaggedPointer(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::Uint32()};
+ MachineSignature sig(0, 6, arg_types);
+ BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
+ src_index, length);
}
// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
@@ -6659,8 +6619,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The (cached) call target is the jump table slot for that function.
args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data);
BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets),
- wasm::kNoCodePosition, nullptr, kNoRetpoline,
- frame_state);
+ wasm::kNoCodePosition, nullptr, frame_state);
}
}
@@ -6929,8 +6888,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = function_context;
args[pos++] = effect();
args[pos++] = control();
@@ -6957,8 +6917,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = wasm_count; i < expected_arity; ++i) {
args[pos++] = undefined_node;
}
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
Node* function_context =
gasm_->LoadContextFromJSFunction(callable_node);
@@ -6981,7 +6942,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] =
gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable_node;
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = undefined_node; // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -7162,8 +7124,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int pos = 0;
args[pos++] = gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = Int32Constant(wasm_count); // argument count
- args[pos++] = UndefinedValue(); // receiver
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
+ args[pos++] = UndefinedValue(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
@@ -7457,7 +7420,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
- if (shared->internal_formal_parameter_count() ==
+ if (shared->internal_formal_parameter_count_without_receiver() ==
expected_sig->parameter_count()) {
return std::make_pair(WasmImportCallKind::kJSFunctionArityMatch,
callable);
@@ -7623,8 +7586,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
// Schedule and compile to machine code.
CallDescriptor* incoming =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper);
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
@@ -7665,8 +7627,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
// Run the compiler pipeline to generate machine code.
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmCapiFunction);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmCapiFunction);
if (mcgraph->machine()->Is32()) {
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
@@ -7676,13 +7637,18 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION,
wasm::WasmCode::kWasmToCapiWrapper, debug_name,
WasmStubAssemblerOptions(), source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
- return native_module->PublishCode(std::move(wasm_code));
+ wasm::WasmCode* published_code;
+ {
+ wasm::CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
+ return published_code;
}
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
@@ -7716,8 +7682,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
// Generate the call descriptor.
CallDescriptor* incoming =
- GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper);
// Run the compilation job synchronously.
std::unique_ptr<OptimizedCompilationJob> job(
@@ -7851,9 +7816,10 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
auto* allocator = wasm::GetWasmEngine()->allocator();
- wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
- allocator, env->enabled_features, env->module, &builder, detected,
- func_body, loop_infos, node_origins, func_index);
+ wasm::VoidResult graph_construction_result =
+ wasm::BuildTFGraph(allocator, env->enabled_features, env->module,
+ &builder, detected, func_body, loop_infos,
+ node_origins, func_index, wasm::kInstrumentEndpoints);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7886,8 +7852,9 @@ base::Vector<const char> GetDebugName(Zone* zone, int index) {
} // namespace
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv* env, const wasm::FunctionBody& func_body,
- int func_index, Counters* counters, wasm::WasmFeatures* detected) {
+ wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody& func_body, int func_index, Counters* counters,
+ wasm::WasmFeatures* detected) {
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileTopTier", "func_index", func_index, "body_size",
func_body.end - func_body.start);
@@ -7939,9 +7906,10 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
}
- Pipeline::GenerateCodeForWasmFunction(
- &info, mcgraph, call_descriptor, source_positions, node_origins,
- func_body, env->module, func_index, &loop_infos);
+ Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
+ call_descriptor, source_positions,
+ node_origins, func_body, env->module,
+ func_index, &loop_infos);
if (counters) {
int zone_bytes =
@@ -7997,10 +7965,9 @@ class LinkageLocationAllocator {
} // namespace
// General code uses the above configuration data.
-CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, const wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind,
- bool need_frame_state) {
+CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
+ WasmCallKind call_kind,
+ bool need_frame_state) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
bool extra_callable_param =
@@ -8078,10 +8045,9 @@ CallDescriptor* GetWasmCallDescriptor(
descriptor_kind = CallDescriptor::kCallWasmCapiFunction;
}
- CallDescriptor::Flags flags =
- use_retpoline ? CallDescriptor::kRetpoline
- : need_frame_state ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ CallDescriptor::Flags flags = need_frame_state
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
return zone->New<CallDescriptor>( // --
descriptor_kind, // kind
target_type, // target MachineType
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 71e3111c8c..328152b363 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -53,13 +53,15 @@ using TFNode = compiler::Node;
using TFGraph = compiler::MachineGraph;
class WasmCode;
class WasmFeatures;
+class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
} // namespace wasm
namespace compiler {
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv*, const wasm::FunctionBody&, int func_index, Counters*,
+ wasm::CompilationEnv*, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody&, int func_index, Counters*,
wasm::WasmFeatures* detected);
// Calls to Wasm imports are handled in several different ways, depending on the
@@ -176,7 +178,6 @@ class JSWasmCallData {
struct WasmInstanceCacheNodes {
Node* mem_start;
Node* mem_size;
- Node* mem_mask;
};
struct WasmLoopInfo {
@@ -207,10 +208,6 @@ class WasmGraphBuilder {
kNeedsBoundsCheck = true,
kCanOmitBoundsCheck = false
};
- enum UseRetpoline : bool { // --
- kRetpoline = true,
- kNoRetpoline = false
- };
enum CheckForNull : bool { // --
kWithNullCheck = true,
kWithoutNullCheck = false
@@ -474,9 +471,9 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position);
- void ArrayCopy(Node* dst_array, Node* dst_index, Node* src_array,
- Node* src_index, Node* length,
- wasm::WasmCodePosition position);
+ void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
+ Node* src_array, Node* src_index, CheckForNull src_null_check,
+ Node* length, wasm::WasmCodePosition position);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
@@ -576,12 +573,11 @@ class WasmGraphBuilder {
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline,
- Node* frame_state = nullptr);
+ Node* instance_node, Node* frame_state = nullptr);
Node* BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline);
+ Node* instance_node);
Node* BuildImportCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets,
wasm::WasmCodePosition position, int func_index,
@@ -765,7 +761,6 @@ class WasmGraphBuilder {
bool use_js_isolate_and_params() const { return isolate_ != nullptr; }
bool has_simd_ = false;
bool needs_stack_check_ = false;
- const bool untrusted_code_mitigations_ = true;
const wasm::FunctionSig* const sig_;
@@ -791,8 +786,6 @@ V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
- WasmGraphBuilder::UseRetpoline use_retpoline =
- WasmGraphBuilder::kNoRetpoline,
WasmCallKind kind = kWasmFunction, bool need_frame_state = false);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
new file mode 100644
index 0000000000..6753769953
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -0,0 +1,195 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-inlining.h"
+
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction WasmInliner::Reduce(Node* node) {
+ if (node->opcode() == IrOpcode::kCall) {
+ return ReduceCall(node);
+ } else {
+ return NoChange();
+ }
+}
+
+// TODO(12166): Abstract over a heuristics provider.
+Reduction WasmInliner::ReduceCall(Node* call) {
+ Node* callee = NodeProperties::GetValueInput(call, 0);
+ IrOpcode::Value reloc_opcode = mcgraph_->machine()->Is32()
+ ? IrOpcode::kRelocatableInt32Constant
+ : IrOpcode::kRelocatableInt64Constant;
+ if (callee->opcode() != reloc_opcode) return NoChange();
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (static_cast<uint32_t>(info.value()) != inlinee_index_) return NoChange();
+
+ CHECK_LT(inlinee_index_, module()->functions.size());
+ const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
+ base::Vector<const byte> function_bytes =
+ wire_bytes_->GetCode(function->code);
+ const wasm::FunctionBody inlinee_body(function->sig, function->code.offset(),
+ function_bytes.begin(),
+ function_bytes.end());
+ wasm::WasmFeatures detected;
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig, spt_);
+ std::vector<WasmLoopInfo> infos;
+
+ wasm::DecodeResult result;
+ Node* inlinee_start;
+ Node* inlinee_end;
+ {
+ Graph::SubgraphScope scope(graph());
+ result = wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features,
+ module(), &builder, &detected, inlinee_body,
+ &infos, node_origins_, inlinee_index_,
+ wasm::kDoNotInstrumentEndpoints);
+ inlinee_start = graph()->start();
+ inlinee_end = graph()->end();
+ }
+
+ if (result.failed()) return NoChange();
+ return InlineCall(call, inlinee_start, inlinee_end);
+}
+
+// TODO(12166): Handle exceptions and tail calls.
+Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
+ Node* callee_end) {
+ DCHECK_EQ(call->opcode(), IrOpcode::kCall);
+
+ /* 1) Rewire callee formal parameters to the call-site real parameters. Rewire
+ * effect and control dependencies of callee's start node with the respective
+ * inputs of the call node.
+ */
+ Node* control = NodeProperties::GetControlInput(call);
+ Node* effect = NodeProperties::GetEffectInput(call);
+
+ for (Edge edge : callee_start->use_edges()) {
+ Node* use = edge.from();
+ switch (use->opcode()) {
+ case IrOpcode::kParameter: {
+ // Index 0 is the callee node.
+ int index = 1 + ParameterIndexOf(use->op());
+ Replace(use, NodeProperties::GetValueInput(call, index));
+ break;
+ }
+ default:
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ }
+
+ /* 2) Rewire uses of the call node to the return values of the callee. Since
+ * there might be multiple return nodes in the callee, we have to create Merge
+ * and Phi nodes for them.
+ */
+ NodeVector return_nodes(zone());
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ return_nodes.push_back(input);
+ break;
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kThrow:
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ break;
+ case IrOpcode::kTailCall:
+ // TODO(12166): A tail call in the inlined function has to be
+ // transformed into a regular call in the caller function.
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (return_nodes.size() > 0) {
+ int const return_count = static_cast<int>(return_nodes.size());
+ NodeVector controls(zone());
+ NodeVector effects(zone());
+ for (Node* const return_node : return_nodes) {
+ controls.push_back(NodeProperties::GetControlInput(return_node));
+ effects.push_back(NodeProperties::GetEffectInput(return_node));
+ }
+ Node* control_output = graph()->NewNode(common()->Merge(return_count),
+ return_count, &controls.front());
+ effects.push_back(control_output);
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(return_count),
+ static_cast<int>(effects.size()), &effects.front());
+
+ // The first input of a return node is discarded. This is because Wasm
+ // functions always return an additional 0 constant as a first return value.
+ DCHECK(
+ Int32Matcher(NodeProperties::GetValueInput(return_nodes[0], 0)).Is(0));
+ int const return_arity = return_nodes[0]->op()->ValueInputCount() - 1;
+ NodeVector values(zone());
+ for (int i = 0; i < return_arity; i++) {
+ NodeVector ith_values(zone());
+ for (Node* const return_node : return_nodes) {
+ Node* value = NodeProperties::GetValueInput(return_node, i + 1);
+ ith_values.push_back(value);
+ }
+ ith_values.push_back(control_output);
+ // Find the correct machine representation for the return values from the
+ // inlinee signature.
+ const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
+ MachineRepresentation repr =
+ function->sig->GetReturn(i).machine_representation();
+ Node* ith_value_output = graph()->NewNode(
+ common()->Phi(repr, return_count),
+ static_cast<int>(ith_values.size()), &ith_values.front());
+ values.push_back(ith_value_output);
+ }
+
+ if (return_arity == 0) {
+ // Void function, no value uses.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ } else if (return_arity == 1) {
+ // One return value. Just replace value uses of the call node with it.
+ ReplaceWithValue(call, values[0], effect_output, control_output);
+ } else {
+ // Multiple returns. We have to find the projections of the call node and
+ // replace them with the returned values.
+ for (Edge use_edge : call->use_edges()) {
+ if (NodeProperties::IsValueEdge(use_edge)) {
+ Node* use = use_edge.from();
+ DCHECK_EQ(use->opcode(), IrOpcode::kProjection);
+ ReplaceWithValue(use, values[ProjectionIndexOf(use->op())]);
+ }
+ }
+ // All value inputs are replaced by the above loop, so it is ok to use
+ // Dead() as a dummy for value replacement.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ }
+ return Replace(mcgraph()->Dead());
+ } else {
+ // The callee can never return. The call node and all its uses are dead.
+ ReplaceWithValue(call, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ return Changed(call);
+ }
+}
+
+const wasm::WasmModule* WasmInliner::module() const { return env_->module; }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
new file mode 100644
index 0000000000..8b31b6b291
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_INLINING_H_
+#define V8_COMPILER_WASM_INLINING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+struct CompilationEnv;
+struct WasmModule;
+class WireBytesStorage;
+} // namespace wasm
+
+class BytecodeOffset;
+class OptimizedCompilationInfo;
+
+namespace compiler {
+
+class NodeOriginTable;
+class SourcePositionTable;
+
+// The WasmInliner provides the core graph inlining machinery for Webassembly
+// graphs. Note that this class only deals with the mechanics of how to inline
+// one graph into another, heuristics that decide what and how much to inline
+// are beyond its scope. As a current placeholder, only a function at specific
+// given index {inlinee_index} is inlined.
+class WasmInliner final : public AdvancedReducer {
+ public:
+ WasmInliner(Editor* editor, wasm::CompilationEnv* env,
+ SourcePositionTable* spt, NodeOriginTable* node_origins,
+ MachineGraph* mcgraph, const wasm::WireBytesStorage* wire_bytes,
+ uint32_t inlinee_index)
+ : AdvancedReducer(editor),
+ env_(env),
+ spt_(spt),
+ node_origins_(node_origins),
+ mcgraph_(mcgraph),
+ wire_bytes_(wire_bytes),
+ inlinee_index_(inlinee_index) {}
+
+ const char* reducer_name() const override { return "WasmInliner"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Zone* zone() const { return mcgraph_->zone(); }
+ CommonOperatorBuilder* common() const { return mcgraph_->common(); }
+ Graph* graph() const { return mcgraph_->graph(); }
+ MachineGraph* mcgraph() const { return mcgraph_; }
+ const wasm::WasmModule* module() const;
+
+ Reduction ReduceCall(Node* call);
+ Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end);
+
+ wasm::CompilationEnv* const env_;
+ SourcePositionTable* const spt_;
+ NodeOriginTable* const node_origins_;
+ MachineGraph* const mcgraph_;
+ const wasm::WireBytesStorage* const wire_bytes_;
+ const uint32_t inlinee_index_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_INLINING_H_