summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS11
-rw-r--r--deps/v8/src/compiler/access-info.cc21
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h22
-rw-r--r--deps/v8/src/compiler/allocation-builder.h6
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc267
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h15
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc15
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc106
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc275
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h13
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc13
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc139
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h6
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc143
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h19
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc546
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h62
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc62
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc269
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h7
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc1
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc214
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h9
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc39
-rw-r--r--deps/v8/src/compiler/backend/instruction.h25
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc196
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc192
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h24
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc40
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc114
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc204
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h26
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc42
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc166
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc171
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h13
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc13
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc167
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc2
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc2775
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h447
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc1579
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc3034
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc540
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h25
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc25
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc139
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc350
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h26
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc28
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc170
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc38
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc11
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h6
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc273
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/c-linkage.cc42
-rw-r--r--deps/v8/src/compiler/code-assembler.cc14
-rw-r--r--deps/v8/src/compiler/code-assembler.h18
-rw-r--r--deps/v8/src/compiler/common-operator.cc13
-rw-r--r--deps/v8/src/compiler/common-operator.h126
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc26
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h35
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc209
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc20
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc8
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame-states.cc55
-rw-r--r--deps/v8/src/compiler/frame-states.h48
-rw-r--r--deps/v8/src/compiler/frame.h8
-rw-r--r--deps/v8/src/compiler/functional-list.h2
-rw-r--r--deps/v8/src/compiler/graph-assembler.h51
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc19
-rw-r--r--deps/v8/src/compiler/graph-reducer.h10
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc2
-rw-r--r--deps/v8/src/compiler/heap-refs.h262
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc199
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h7
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc10
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc364
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h29
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc15
-rw-r--r--deps/v8/src/compiler/js-graph.cc7
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc1437
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h45
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc81
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h13
-rw-r--r--deps/v8/src/compiler/js-inlining.cc159
-rw-r--r--deps/v8/src/compiler/js-inlining.h22
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc134
-rw-r--r--deps/v8/src/compiler/js-operator.cc55
-rw-r--r--deps/v8/src/compiler/js-operator.h74
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc33
-rw-r--r--deps/v8/src/compiler/linkage.cc63
-rw-r--r--deps/v8/src/compiler/linkage.h11
-rw-r--r--deps/v8/src/compiler/load-elimination.cc5
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc87
-rw-r--r--deps/v8/src/compiler/loop-analysis.h82
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc116
-rw-r--r--deps/v8/src/compiler/loop-peeling.h4
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc4
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc75
-rw-r--r--deps/v8/src/compiler/machine-operator.cc1286
-rw-r--r--deps/v8/src/compiler/machine-operator.h17
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h2
-rw-r--r--deps/v8/src/compiler/node-observer.cc61
-rw-r--r--deps/v8/src/compiler/node-observer.h130
-rw-r--r--deps/v8/src/compiler/node-properties.cc3
-rw-r--r--deps/v8/src/compiler/node-properties.h6
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h19
-rw-r--r--deps/v8/src/compiler/opcodes.h19
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/operator.h2
-rw-r--r--deps/v8/src/compiler/osr.cc4
-rw-r--r--deps/v8/src/compiler/persistent-map.h4
-rw-r--r--deps/v8/src/compiler/pipeline.cc211
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/processed-feedback.h2
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc89
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc2
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h2
-rw-r--r--deps/v8/src/compiler/representation-change.cc2
-rw-r--r--deps/v8/src/compiler/scheduled-machine-lowering.cc2
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc85
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h4
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc50
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc424
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h18
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc31
-rw-r--r--deps/v8/src/compiler/simplified-operator.h12
-rw-r--r--deps/v8/src/compiler/state-values-utils.h11
-rw-r--r--deps/v8/src/compiler/typer.cc24
-rw-r--r--deps/v8/src/compiler/types.cc11
-rw-r--r--deps/v8/src/compiler/verifier.cc49
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc2064
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h99
139 files changed, 16049 insertions, 5960 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index afc8551ae0..6175ef3e06 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,7 +1,6 @@
bmeurer@chromium.org
danno@chromium.org
sigurds@chromium.org
-tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
mslekova@chromium.org
@@ -18,6 +17,16 @@ per-file wasm-*=jkummerow@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
+per-file machine-operator.*=ahaas@chromium.org
+per-file machine-operator.*=bbudge@chromium.org
+per-file machine-operator.*=gdeepti@chromium.org
+per-file machine-operator.*=zhin@chromium.org
+
+per-file opcodes.*=ahaas@chromium.org
+per-file opcodes.*=bbudge@chromium.org
+per-file opcodes.*=gdeepti@chromium.org
+per-file opcodes.*=zhin@chromium.org
+
per-file simd-scalar-lowering.*=bbudge@chromium.org
per-file simd-scalar-lowering.*=gdeepti@chromium.org
per-file simd-scalar-lowering.*=zhin@chromium.org
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index ddf742e708..06806feb42 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -394,11 +394,9 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
descriptor));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- if (!FLAG_unbox_double_fields) {
- unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(
- map_ref, descriptor));
- }
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ descriptor));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
@@ -433,7 +431,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
- } else if (broker()->is_turboprop() && !map->is_prototype_map()) {
+ } else if (broker()->is_turboprop() && !map->is_prototype_map() &&
+ !IsAnyStore(access_mode)) {
// The constness feedback is too unstable for the aggresive compilation
// of turboprop.
constness = PropertyConstness::kMutable;
@@ -861,12 +860,10 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
transition_map_ref, number));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- if (!FLAG_unbox_double_fields) {
- transition_map_ref.SerializeOwnDescriptor(number);
- unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(
- transition_map_ref, number));
- }
+ transition_map_ref.SerializeOwnDescriptor(number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ transition_map_ref, number));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index ff1404baa7..022d6aa4d4 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -27,11 +27,21 @@ void AllocationBuilder::AllocateContext(int variadic_part_length, MapRef map) {
jsgraph()->Constant(variadic_part_length));
}
+// static
+bool AllocationBuilder::CanAllocateArray(int length, MapRef map,
+ AllocationType allocation) {
+ DCHECK(map.instance_type() == FIXED_ARRAY_TYPE ||
+ map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int const size = (map.instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ return size <= Heap::MaxRegularHeapObjectSize(allocation);
+}
+
// Compound allocation of a FixedArray.
void AllocationBuilder::AllocateArray(int length, MapRef map,
AllocationType allocation) {
- DCHECK(map.instance_type() == FIXED_ARRAY_TYPE ||
- map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ DCHECK(CanAllocateArray(length, map, allocation));
int size = (map.instance_type() == FIXED_ARRAY_TYPE)
? FixedArray::SizeFor(length)
: FixedDoubleArray::SizeFor(length);
@@ -40,8 +50,16 @@ void AllocationBuilder::AllocateArray(int length, MapRef map,
Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
+// static
+bool AllocationBuilder::CanAllocateSloppyArgumentElements(
+ int length, MapRef map, AllocationType allocation) {
+ int const size = SloppyArgumentsElements::SizeFor(length);
+ return size <= Heap::MaxRegularHeapObjectSize(allocation);
+}
+
void AllocationBuilder::AllocateSloppyArgumentElements(
int length, MapRef map, AllocationType allocation) {
+ DCHECK(CanAllocateSloppyArgumentElements(length, map, allocation));
int size = SloppyArgumentsElements::SizeFor(length);
Allocate(size, allocation, Type::OtherInternal());
Store(AccessBuilder::ForMap(), map);
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index c9a2570493..289a06b1ad 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -52,10 +52,16 @@ class AllocationBuilder final {
inline void AllocateContext(int variadic_part_length, MapRef map);
// Compound allocation of a FixedArray.
+ inline static bool CanAllocateArray(
+ int length, MapRef map,
+ AllocationType allocation = AllocationType::kYoung);
inline void AllocateArray(int length, MapRef map,
AllocationType allocation = AllocationType::kYoung);
// Compound allocation of a SloppyArgumentsElements
+ static inline bool CanAllocateSloppyArgumentElements(
+ int length, MapRef map,
+ AllocationType allocation = AllocationType::kYoung);
inline void AllocateSloppyArgumentElements(
int length, MapRef map,
AllocationType allocation = AllocationType::kYoung);
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 74215cac30..d243c07790 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -314,8 +314,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode,
ArmOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
@@ -326,8 +325,7 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
InstructionCode opcode,
ArmOperandConverter const& i,
Register address) {
- DCHECK_EQ(kMemoryAccessPoisoned,
- static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
+ DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
switch (AddressingModeField::decode(opcode)) {
case kMode_Offset_RI:
codegen->tasm()->mov(address, i.InputImmediate(1));
@@ -517,8 +515,9 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
- QwNeonRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1); \
+ UseScratchRegisterScope temps(tasm()); \
+ Simd128Register tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
__ and_(shift, i.InputRegister(1), Operand(mask)); \
__ vdup(sz, tmp, shift); \
@@ -536,8 +535,9 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
if (instr->InputAt(1)->IsImmediate()) { \
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
} else { \
- QwNeonRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1); \
+ UseScratchRegisterScope temps(tasm()); \
+ Simd128Register tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire(); \
constexpr int mask = (1 << width) - 1; \
__ and_(shift, i.InputRegister(1), Operand(mask)); \
__ vdup(sz, tmp, shift); \
@@ -558,30 +558,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ cmp(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
@@ -786,13 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -901,7 +871,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_parameters);
}
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1678,8 +1648,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
UseScratchRegisterScope temps(tasm());
Register address = temps.Acquire();
@@ -1716,8 +1685,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
UseScratchRegisterScope temps(tasm());
Register address = temps.Acquire();
@@ -1795,35 +1763,36 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ VFPCanonicalizeNaN(result, value);
break;
}
- case kArmPush:
- if (instr->InputAt(0)->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- switch (op->representation()) {
- case MachineRepresentation::kFloat32:
- __ vpush(i.InputFloatRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
- break;
- case MachineRepresentation::kFloat64:
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- break;
- case MachineRepresentation::kSimd128: {
- __ vpush(i.InputSimd128Register(0));
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- __ push(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ case kArmPush: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
+ MachineRepresentation rep = op->representation();
+ int pushed_slots = ElementSizeInPointers(rep);
+ // Slot-sized arguments are never padded but there may be a gap if
+ // the slot allocator reclaimed other padding slots. Adjust the stack
+ // here to skip any gap.
+ if (slots > pushed_slots) {
+ __ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
}
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ __ vpush(i.InputFloatRegister(1));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ vpush(i.InputDoubleRegister(1));
+ break;
+ case MachineRepresentation::kSimd128:
+ __ vpush(i.InputSimd128Register(1));
+ break;
+ default:
+ __ push(i.InputRegister(1));
+ break;
+ }
+ frame_access_state()->IncreaseSPDelta(slots);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ }
case kArmPoke: {
int const slot = MiscField::decode(instr->opcode());
__ str(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize));
@@ -2098,6 +2067,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrintn(dst.high(), src.high());
break;
}
+ case kArmF64x2ConvertLowI32x4S: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f64_s32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
+ __ vcvt_f64_s32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ break;
+ }
+ case kArmF64x2ConvertLowI32x4U: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f64_u32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
+ __ vcvt_f64_u32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ break;
+ }
+ case kArmF64x2PromoteLowF32x4: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f64_f32(dst.low(), SwVfpRegister::from_code(src.code() * 4));
+ __ vcvt_f64_f32(dst.high(), SwVfpRegister::from_code(src.code() * 4 + 1));
+ break;
+ }
case kArmI64x2SplatI32Pair: {
Simd128Register dst = i.OutputSimd128Register();
__ vdup(Neon32, dst, i.InputRegister(0));
@@ -2123,11 +2113,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Mul: {
+ UseScratchRegisterScope temps(tasm());
QwNeonRegister dst = i.OutputSimd128Register();
QwNeonRegister left = i.InputSimd128Register(0);
QwNeonRegister right = i.InputSimd128Register(1);
QwNeonRegister tmp1 = i.TempSimd128Register(0);
- QwNeonRegister tmp2 = i.TempSimd128Register(1);
+ QwNeonRegister tmp2 = temps.AcquireQ();
// This algorithm uses vector operations to perform 64-bit integer
// multiplication by splitting it into a high and low 32-bit integers.
@@ -2167,6 +2158,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// dst: [ (a2*b3 + a3*b2)<<32 + (a2*b2) | (a0*b1 + a1*b0)<<32 + (a0*b0) ]
break;
}
+ case kArmI64x2Abs: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register tmp = temps.AcquireQ();
+ __ vshr(NeonS64, tmp, src, 63);
+ __ veor(dst, src, tmp);
+ __ vsub(Neon64, dst, dst, tmp);
+ break;
+ }
case kArmI64x2Neg: {
Simd128Register dst = i.OutputSimd128Register();
__ vmov(dst, uint64_t{0});
@@ -2190,16 +2191,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2BitMask: {
- UseScratchRegisterScope temps(tasm());
- Register dst = i.OutputRegister();
- Simd128Register src = i.InputSimd128Register(0);
- QwNeonRegister tmp1 = temps.AcquireQ();
- Register tmp = temps.Acquire();
-
- __ vshr(NeonU64, tmp1, src, 63);
- __ vmov(NeonU32, dst, tmp1.low(), 0);
- __ vmov(NeonU32, tmp, tmp1.high(), 0);
- __ add(dst, dst, Operand(tmp, LSL, 1));
+ __ I64x2BitMask(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
case kArmI64x2SConvertI32x4Low: {
@@ -2381,6 +2373,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vbsl(dst, rhs, lhs);
break;
}
+ case kArmF32x4DemoteF64x2Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_f32_f64(SwVfpRegister::from_code(dst.code() * 4), src.low());
+ __ vcvt_f32_f64(SwVfpRegister::from_code(dst.code() * 4 + 1), src.high());
+ __ vmov(dst.high(), 0);
+ break;
+ }
case kArmI32x4Splat: {
__ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2450,13 +2450,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI64x2Eq: {
+ __ I64x2Eq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmI64x2Ne: {
Simd128Register dst = i.OutputSimd128Register();
UseScratchRegisterScope temps(tasm());
- Simd128Register scratch = temps.AcquireQ();
+ Simd128Register tmp = temps.AcquireQ();
__ vceq(Neon32, dst, i.InputSimd128Register(0),
i.InputSimd128Register(1));
- __ vrev64(Neon32, scratch, dst);
- __ vand(dst, dst, scratch);
+ __ vrev64(Neon32, tmp, dst);
+ __ vand(dst, dst, tmp);
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmI64x2GtS: {
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmI64x2GeS: {
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI32x4Eq: {
@@ -2525,19 +2541,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI32x4BitMask: {
Register dst = i.OutputRegister();
+ UseScratchRegisterScope temps(tasm());
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register tmp2 = i.TempSimd128Register(0);
- Simd128Register mask = i.TempSimd128Register(1);
+ Simd128Register tmp = temps.AcquireQ();
+ Simd128Register mask = i.TempSimd128Register(0);
- __ vshr(NeonS32, tmp2, src, 31);
+ __ vshr(NeonS32, tmp, src, 31);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
__ vmov(mask.low(), Double(uint64_t{0x0000'0002'0000'0001}));
__ vmov(mask.high(), Double(uint64_t{0x0000'0008'0000'0004}));
- __ vand(tmp2, mask, tmp2);
- __ vpadd(Neon32, tmp2.low(), tmp2.low(), tmp2.high());
- __ vpadd(Neon32, tmp2.low(), tmp2.low(), kDoubleRegZero);
- __ VmovLow(dst, tmp2.low());
+ __ vand(tmp, mask, tmp);
+ __ vpadd(Neon32, tmp.low(), tmp.low(), tmp.high());
+ __ vpadd(Neon32, tmp.low(), tmp.low(), kDoubleRegZero);
+ __ VmovLow(dst, tmp.low());
break;
}
case kArmI32x4DotI16x8S: {
@@ -2553,6 +2570,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpadd(Neon32, dst.high(), scratch.low(), scratch.high());
break;
}
+ case kArmI32x4TruncSatF64x2SZero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_s32_f64(SwVfpRegister::from_code(dst.code() * 4), src.low());
+ __ vcvt_s32_f64(SwVfpRegister::from_code(dst.code() * 4 + 1), src.high());
+ __ vmov(dst.high(), 0);
+ break;
+ }
+ case kArmI32x4TruncSatF64x2UZero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vcvt_u32_f64(SwVfpRegister::from_code(dst.code() * 4), src.low());
+ __ vcvt_u32_f64(SwVfpRegister::from_code(dst.code() * 4 + 1), src.high());
+ __ vmov(dst.high(), 0);
+ break;
+ }
case kArmI16x8Splat: {
__ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2714,21 +2747,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI16x8BitMask: {
+ UseScratchRegisterScope temps(tasm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register tmp2 = i.TempSimd128Register(0);
- Simd128Register mask = i.TempSimd128Register(1);
+ Simd128Register tmp = temps.AcquireQ();
+ Simd128Register mask = i.TempSimd128Register(0);
- __ vshr(NeonS16, tmp2, src, 15);
+ __ vshr(NeonS16, tmp, src, 15);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
__ vmov(mask.low(), Double(uint64_t{0x0008'0004'0002'0001}));
__ vmov(mask.high(), Double(uint64_t{0x0080'0040'0020'0010}));
- __ vand(tmp2, mask, tmp2);
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vmov(NeonU16, dst, tmp2.low(), 0);
+ __ vand(tmp, mask, tmp);
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vmov(NeonU16, dst, tmp.low(), 0);
break;
}
case kArmI16x8Q15MulRSatS: {
@@ -2873,23 +2907,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmI8x16BitMask: {
+ UseScratchRegisterScope temps(tasm());
Register dst = i.OutputRegister();
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register tmp2 = i.TempSimd128Register(0);
- Simd128Register mask = i.TempSimd128Register(1);
+ Simd128Register tmp = temps.AcquireQ();
+ Simd128Register mask = i.TempSimd128Register(0);
- __ vshr(NeonS8, tmp2, src, 7);
+ __ vshr(NeonS8, tmp, src, 7);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
__ vmov(mask.low(), Double(uint64_t{0x8040'2010'0804'0201}));
__ vmov(mask.high(), Double(uint64_t{0x8040'2010'0804'0201}));
- __ vand(tmp2, mask, tmp2);
- __ vext(mask, tmp2, tmp2, 8);
- __ vzip(Neon8, mask, tmp2);
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
- __ vmov(NeonU16, dst, tmp2.low(), 0);
+ __ vand(tmp, mask, tmp);
+ __ vext(mask, tmp, tmp, 8);
+ __ vzip(Neon8, mask, tmp);
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ __ vmov(NeonU16, dst, tmp.low(), 0);
break;
}
case kArmSignSelect: {
@@ -3241,9 +3276,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrev16(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmV32x4AnyTrue:
- case kArmV16x8AnyTrue:
- case kArmV8x16AnyTrue: {
+ case kArmV128AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3254,6 +3287,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
+ case kArmV64x2AllTrue: {
+ __ V64x2AllTrue(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kArmV32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
@@ -3675,7 +3712,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -3842,7 +3879,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -3926,7 +3963,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = r3;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -3934,9 +3970,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index b5a77a1a10..416f8a564a 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -154,6 +154,9 @@ namespace compiler {
V(ArmF64x2Floor) \
V(ArmF64x2Trunc) \
V(ArmF64x2NearestInt) \
+ V(ArmF64x2ConvertLowI32x4S) \
+ V(ArmF64x2ConvertLowI32x4U) \
+ V(ArmF64x2PromoteLowF32x4) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
@@ -177,8 +180,10 @@ namespace compiler {
V(ArmF32x4Le) \
V(ArmF32x4Pmin) \
V(ArmF32x4Pmax) \
+ V(ArmF32x4DemoteF64x2Zero) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
+ V(ArmI64x2Abs) \
V(ArmI64x2Neg) \
V(ArmI64x2Shl) \
V(ArmI64x2ShrS) \
@@ -188,6 +193,9 @@ namespace compiler {
V(ArmI64x2ShrU) \
V(ArmI64x2BitMask) \
V(ArmI64x2Eq) \
+ V(ArmI64x2Ne) \
+ V(ArmI64x2GtS) \
+ V(ArmI64x2GeS) \
V(ArmI64x2SConvertI32x4Low) \
V(ArmI64x2SConvertI32x4High) \
V(ArmI64x2UConvertI32x4Low) \
@@ -222,6 +230,8 @@ namespace compiler {
V(ArmI32x4Abs) \
V(ArmI32x4BitMask) \
V(ArmI32x4DotI16x8S) \
+ V(ArmI32x4TruncSatF64x2SZero) \
+ V(ArmI32x4TruncSatF64x2UZero) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLaneS) \
V(ArmI16x8ReplaceLane) \
@@ -327,11 +337,10 @@ namespace compiler {
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
- V(ArmV32x4AnyTrue) \
+ V(ArmV64x2AllTrue) \
V(ArmV32x4AllTrue) \
- V(ArmV16x8AnyTrue) \
V(ArmV16x8AllTrue) \
- V(ArmV8x16AnyTrue) \
+ V(ArmV128AnyTrue) \
V(ArmV8x16AllTrue) \
V(ArmS128Load8Splat) \
V(ArmS128Load16Splat) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 8b52a18482..b82369e763 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -134,6 +134,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF64x2Floor:
case kArmF64x2Trunc:
case kArmF64x2NearestInt:
+ case kArmF64x2ConvertLowI32x4S:
+ case kArmF64x2ConvertLowI32x4U:
+ case kArmF64x2PromoteLowF32x4:
case kArmF32x4Splat:
case kArmF32x4ExtractLane:
case kArmF32x4ReplaceLane:
@@ -157,8 +160,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4Le:
case kArmF32x4Pmin:
case kArmF32x4Pmax:
+ case kArmF32x4DemoteF64x2Zero:
case kArmI64x2SplatI32Pair:
case kArmI64x2ReplaceLaneI32Pair:
+ case kArmI64x2Abs:
case kArmI64x2Neg:
case kArmI64x2Shl:
case kArmI64x2ShrS:
@@ -168,6 +173,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI64x2ShrU:
case kArmI64x2BitMask:
case kArmI64x2Eq:
+ case kArmI64x2Ne:
+ case kArmI64x2GtS:
+ case kArmI64x2GeS:
case kArmI64x2SConvertI32x4Low:
case kArmI64x2SConvertI32x4High:
case kArmI64x2UConvertI32x4Low:
@@ -202,6 +210,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4Abs:
case kArmI32x4BitMask:
case kArmI32x4DotI16x8S:
+ case kArmI32x4TruncSatF64x2SZero:
+ case kArmI32x4TruncSatF64x2UZero:
case kArmI16x8Splat:
case kArmI16x8ExtractLaneS:
case kArmI16x8ReplaceLane:
@@ -307,11 +317,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS8x8Reverse:
case kArmS8x4Reverse:
case kArmS8x2Reverse:
- case kArmV32x4AnyTrue:
+ case kArmV64x2AllTrue:
case kArmV32x4AllTrue:
- case kArmV16x8AnyTrue:
case kArmV16x8AllTrue:
- case kArmV8x16AnyTrue:
+ case kArmV128AnyTrue:
case kArmV8x16AllTrue:
return kNoOpcodeFlags;
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index bd1e7c4b4f..3f15323297 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -108,10 +108,7 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseImmediate(node->InputAt(1)));
}
} else {
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ VisitRRR(selector, opcode, node);
}
}
@@ -511,41 +508,9 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
-namespace {
-// Helper struct for load lane and store lane to indicate which opcode to use
-// and what memory size to be encoded in the opcode, and the new lane index.
-struct LoadStoreLaneParams {
- bool low_op;
- NeonSize sz;
- uint8_t laneidx;
- LoadStoreLaneParams(uint8_t laneidx, NeonSize sz, int lanes)
- : low_op(laneidx < lanes), sz(sz), laneidx(laneidx % lanes) {}
-};
-
-// The register mapping on ARM (1 Q to 2 D), means that loading/storing high
-// lanes of a Q register is equivalent to loading/storing the high D reg, modulo
-// number of lanes in a D reg. This function decides, based on the laneidx and
-// load/store size, whether the low or high D reg is accessed, and what the new
-// lane index is.
-LoadStoreLaneParams GetLoadStoreLaneParams(MachineRepresentation rep,
- uint8_t laneidx) {
- if (rep == MachineRepresentation::kWord8) {
- return LoadStoreLaneParams(laneidx, Neon8, 8);
- } else if (rep == MachineRepresentation::kWord16) {
- return LoadStoreLaneParams(laneidx, Neon16, 4);
- } else if (rep == MachineRepresentation::kWord32) {
- return LoadStoreLaneParams(laneidx, Neon32, 2);
- } else if (rep == MachineRepresentation::kWord64) {
- return LoadStoreLaneParams(laneidx, Neon64, 1);
- } else {
- UNREACHABLE();
- }
-}
-} // namespace
-
void InstructionSelector::VisitStoreLane(Node* node) {
StoreLaneParameters params = StoreLaneParametersOf(node->op());
- LoadStoreLaneParams f = GetLoadStoreLaneParams(params.rep, params.laneidx);
+ LoadStoreLaneParams f(params.rep, params.laneidx);
InstructionCode opcode =
f.low_op ? kArmS128StoreLaneLow : kArmS128StoreLaneHigh;
opcode |= MiscField::encode(f.sz);
@@ -563,8 +528,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
void InstructionSelector::VisitLoadLane(Node* node) {
LoadLaneParameters params = LoadLaneParametersOf(node->op());
- LoadStoreLaneParams f =
- GetLoadStoreLaneParams(params.rep.representation(), params.laneidx);
+ LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
InstructionCode opcode =
f.low_op ? kArmS128LoadLaneLow : kArmS128LoadLaneHigh;
opcode |= MiscField::encode(f.sz);
@@ -673,7 +637,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
InstructionOperand output = g.DefineAsRegister(node);
@@ -1741,10 +1705,14 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
+ stack_decrement += kSystemPointerSize;
// Skip any alignment holes in pushed nodes.
if (input.node == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node));
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ Emit(kArmPush, g.NoOutput(), decrement, g.UseRegister(input.node));
}
}
}
@@ -1776,8 +1744,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
namespace {
// Shared routine for multiple compare operations.
@@ -2605,6 +2571,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Neg, kArmF32x4Neg) \
V(F32x4RecipApprox, kArmF32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArmF32x4RecipSqrtApprox) \
+ V(I64x2Abs, kArmI64x2Abs) \
V(I64x2SConvertI32x4Low, kArmI64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High, kArmI64x2SConvertI32x4High) \
V(I64x2UConvertI32x4Low, kArmI64x2UConvertI32x4Low) \
@@ -2627,11 +2594,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16Abs, kArmI8x16Abs) \
V(I8x16Popcnt, kArmVcnt) \
V(S128Not, kArmS128Not) \
- V(V32x4AnyTrue, kArmV32x4AnyTrue) \
+ V(V64x2AllTrue, kArmV64x2AllTrue) \
V(V32x4AllTrue, kArmV32x4AllTrue) \
- V(V16x8AnyTrue, kArmV16x8AnyTrue) \
V(V16x8AllTrue, kArmV16x8AllTrue) \
- V(V8x16AnyTrue, kArmV8x16AnyTrue) \
+ V(V128AnyTrue, kArmV128AnyTrue) \
V(V8x16AllTrue, kArmV8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -2679,6 +2645,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4MaxS, kArmI32x4MaxS) \
V(I32x4Eq, kArmI32x4Eq) \
V(I64x2Eq, kArmI64x2Eq) \
+ V(I64x2Ne, kArmI64x2Ne) \
+ V(I64x2GtS, kArmI64x2GtS) \
+ V(I64x2GeS, kArmI64x2GeS) \
V(I32x4Ne, kArmI32x4Ne) \
V(I32x4GtS, kArmI32x4GtS) \
V(I32x4GeS, kArmI32x4GeS) \
@@ -2844,8 +2813,7 @@ void InstructionSelector::VisitI64x2Neg(Node* node) {
void InstructionSelector::VisitI64x2Mul(Node* node) {
ArmOperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kArmI64x2Mul, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -3053,8 +3021,7 @@ namespace {
template <ArchOpcode opcode>
void VisitBitMask(InstructionSelector* selector, Node* node) {
ArmOperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempSimd128Register()};
+ InstructionOperand temps[] = {g.TempSimd128Register()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
@@ -3182,6 +3149,45 @@ void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+// TODO(v8:9780)
+// These double precision conversion instructions need a low Q register (q0-q7)
+// because the codegen accesses the S registers they overlap with.
+void InstructionSelector::VisitF64x2ConvertLowI32x4S(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF64x2ConvertLowI32x4S, g.DefineAsRegister(node),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF64x2ConvertLowI32x4U, g.DefineAsRegister(node),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmI32x4TruncSatF64x2SZero, g.DefineAsFixed(node, q0),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmI32x4TruncSatF64x2UZero, g.DefineAsFixed(node, q0),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF32x4DemoteF64x2Zero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF32x4DemoteF64x2Zero, g.DefineAsFixed(node, q0),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmF64x2PromoteLowF32x4, g.DefineAsRegister(node),
+ g.UseFixed(node->InputAt(0), q0));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 76613d1182..5b9c2e4d4f 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -375,11 +375,78 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+class WasmOutOfLineTrap : public OutOfLineCode {
+ public:
+ WasmOutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), gen_(gen), instr_(instr) {}
+ void Generate() override {
+ Arm64OperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ protected:
+ CodeGenerator* gen_;
+
+ void GenerateWithTrapId(TrapId trap_id) { GenerateCallToTrap(trap_id); }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(),
+ 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+ }
+ }
+
+ Instruction* instr_;
+};
+
+class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
+ public:
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, Instruction* instr)
+ : WasmOutOfLineTrap(gen, instr), pc_(pc) {}
+
+ void Generate() override {
+ gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
+ GenerateWithTrapId(TrapId::kTrapMemOutOfBounds);
+ }
+
+ private:
+ int pc_;
+};
+
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr, int pc) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(AccessModeField::decode(opcode));
+ if (access_mode == kMemoryAccessProtected) {
+ zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
+ }
+}
+
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
Arm64OperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
@@ -390,8 +457,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
Arm64OperandConverter* i, VRegister output_reg) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
AddressingMode address_mode = AddressingModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
UseScratchRegisterScope temps(codegen->tasm());
@@ -560,30 +626,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Cmp(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ Ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -720,13 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -845,7 +881,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ Bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1140,14 +1176,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Saddlp: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Saddlp(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f));
break;
}
case kArm64Uaddlp: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Uaddlp(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f));
@@ -1159,7 +1195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister32(1));
} else {
DCHECK(instr->InputAt(0)->IsSimd128Register());
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidth(dst_f);
__ Smull(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1168,7 +1204,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Smull2: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Smull2(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1181,7 +1217,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister32(1));
} else {
DCHECK(instr->InputAt(0)->IsSimd128Register());
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidth(dst_f);
__ Umull(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1190,7 +1226,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umull2: {
- VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
__ Umull2(i.OutputSimd128Register().Format(dst_f),
i.InputSimd128Register(0).Format(src_f),
@@ -1477,7 +1513,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cnt: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cnt(i.OutputSimd128Register().Format(f),
i.InputSimd128Register(0).Format(f));
break;
@@ -1729,39 +1765,49 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kArm64Ldrb:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strb:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strh:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrsw:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
@@ -1778,27 +1824,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64LdrS:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
break;
case kArm64StrS:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrD:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
break;
case kArm64StrD:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrQ:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register(), i.MemoryOperand());
break;
case kArm64StrQ:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
case kArm64DmbIsh:
@@ -1960,33 +2013,67 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64Sxtl: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
__ Sxtl(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
case kArm64Sxtl2: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
__ Sxtl2(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
case kArm64Uxtl: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
__ Uxtl(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
case kArm64Uxtl2: {
- VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
__ Uxtl2(i.OutputSimd128Register().Format(wide),
i.InputSimd128Register(0).Format(narrow));
break;
}
+ case kArm64F64x2ConvertLowI32x4S: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ __ Sxtl(dst, i.InputSimd128Register(0).V2S());
+ __ Scvtf(dst, dst);
+ break;
+ }
+ case kArm64F64x2ConvertLowI32x4U: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ __ Uxtl(dst, i.InputSimd128Register(0).V2S());
+ __ Ucvtf(dst, dst);
+ break;
+ }
+ case kArm64I32x4TruncSatF64x2SZero: {
+ VRegister dst = i.OutputSimd128Register();
+ __ Fcvtzs(dst.V2D(), i.InputSimd128Register(0).V2D());
+ __ Sqxtn(dst.V2S(), dst.V2D());
+ break;
+ }
+ case kArm64I32x4TruncSatF64x2UZero: {
+ VRegister dst = i.OutputSimd128Register();
+ __ Fcvtzu(dst.V2D(), i.InputSimd128Register(0).V2D());
+ __ Uqxtn(dst.V2S(), dst.V2D());
+ break;
+ }
+ case kArm64F32x4DemoteF64x2Zero: {
+ __ Fcvtn(i.OutputSimd128Register().V2S(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ }
+ case kArm64F64x2PromoteLowF32x4: {
+ __ Fcvtl(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2S());
+ break;
+ }
case kArm64F64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
break;
@@ -2146,6 +2233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
break;
}
+ SIMD_UNOP_CASE(kArm64I64x2Abs, Abs, 2D);
SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
@@ -2217,20 +2305,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
+ case kArm64I64x2Ne: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
+ i.InputSimd128Register(1).V2D());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
+ SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
}
case kArm64I64x2BitMask: {
- UseScratchRegisterScope scope(tasm());
- Register dst = i.OutputRegister32();
- VRegister src = i.InputSimd128Register(0);
- VRegister tmp1 = scope.AcquireV(kFormat2D);
- Register tmp2 = scope.AcquireX();
- __ Ushr(tmp1.V2D(), src.V2D(), 63);
- __ Mov(dst.X(), tmp1.D(), 0);
- __ Mov(tmp2.X(), tmp1.D(), 1);
- __ Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
+ __ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
case kArm64I32x4Splat: {
@@ -2535,7 +2624,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64SignSelect: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cmlt(i.OutputSimd128Register().Format(f),
i.InputSimd128Register(2).Format(f), 0);
__ Bsl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
@@ -2670,59 +2759,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
case kArm64LoadSplat: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ ld1r(i.OutputSimd128Register().Format(f), i.MemoryOperand(0));
break;
}
case kArm64LoadLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
int laneidx = i.InputInt8(1);
__ ld1(i.OutputSimd128Register().Format(f), laneidx, i.MemoryOperand(2));
break;
}
case kArm64StoreLane: {
- VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
int laneidx = i.InputInt8(1);
__ st1(i.InputSimd128Register(0).Format(f), laneidx, i.MemoryOperand(2));
break;
}
case kArm64S128Load8x8S: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64S128Load8x8U: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64S128Load16x4S: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64S128Load16x4U: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64S128Load32x2S: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
case kArm64S128Load32x2U: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64S128Load32Zero: {
- __ Ldr(i.OutputSimd128Register().S(), i.MemoryOperand(0));
- break;
- }
- case kArm64S128Load64Zero: {
- __ Ldr(i.OutputSimd128Register().D(), i.MemoryOperand(0));
+ case kArm64V64x2AllTrue: {
+ __ V64x2AllTrue(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
@@ -2838,50 +2932,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
- class OutOfLineTrap final : public OutOfLineCode {
- public:
- OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
- : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
- void Generate() final {
- Arm64OperandConverter i(gen_, instr_);
- TrapId trap_id =
- static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
- GenerateCallToTrap(trap_id);
- }
-
- private:
- void GenerateCallToTrap(TrapId trap_id) {
- if (trap_id == TrapId::kInvalid) {
- // We cannot test calls to the runtime in cctest/test-run-wasm.
- // Therefore we emit a call to C here instead of a call to the runtime.
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(), 0);
- __ LeaveFrame(StackFrame::WASM);
- auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count =
- static_cast<int>(call_descriptor->StackParameterCount());
- pop_count += (pop_count & 1); // align
- __ Drop(pop_count);
- __ Ret();
- } else {
- gen_->AssembleSourcePosition(instr_);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched when the code
- // is added to the native module and copied into wasm code space.
- __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
- ReferenceMap* reference_map =
- gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- // The trap code should never return.
- __ Brk(0);
- }
- }
- }
- Instruction* instr_;
- CodeGenerator* gen_;
- };
- auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ auto ool = zone()->New<WasmOutOfLineTrap>(this, instr);
Label* tlabel = ool->entry();
Condition cc = FlagsConditionToCondition(condition);
__ B(cc, tlabel);
@@ -3051,7 +3102,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ Brk(0);
}
@@ -3180,7 +3231,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = x3;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -3188,9 +3238,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index e56d0323fe..ee2c20372e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -198,6 +198,9 @@ namespace compiler {
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -223,9 +226,11 @@ namespace compiler {
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
+ V(Arm64I64x2Abs) \
V(Arm64I64x2Neg) \
V(Arm64I64x2Shl) \
V(Arm64I64x2ShrS) \
@@ -233,6 +238,9 @@ namespace compiler {
V(Arm64I64x2Sub) \
V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
+ V(Arm64I64x2Ne) \
+ V(Arm64I64x2GtS) \
+ V(Arm64I64x2GeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4Splat) \
@@ -263,6 +271,8 @@ namespace compiler {
V(Arm64I32x4Abs) \
V(Arm64I32x4BitMask) \
V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
@@ -368,6 +378,7 @@ namespace compiler {
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
+ V(Arm64V64x2AllTrue) \
V(Arm64V32x4AllTrue) \
V(Arm64V16x8AllTrue) \
V(Arm64V8x16AllTrue) \
@@ -380,8 +391,6 @@ namespace compiler {
V(Arm64S128Load16x4U) \
V(Arm64S128Load32x2S) \
V(Arm64S128Load32x2U) \
- V(Arm64S128Load32Zero) \
- V(Arm64S128Load64Zero) \
V(Arm64Word64AtomicLoadUint8) \
V(Arm64Word64AtomicLoadUint16) \
V(Arm64Word64AtomicLoadUint32) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 0e70a424f5..a384a84479 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -163,6 +163,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
case kArm64F64x2Pmax:
+ case kArm64F64x2ConvertLowI32x4S:
+ case kArm64F64x2ConvertLowI32x4U:
+ case kArm64F64x2PromoteLowF32x4:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -188,9 +191,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
+ case kArm64F32x4DemoteF64x2Zero:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
+ case kArm64I64x2Abs:
case kArm64I64x2Neg:
case kArm64I64x2Shl:
case kArm64I64x2ShrS:
@@ -198,6 +203,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I64x2Sub:
case kArm64I64x2Mul:
case kArm64I64x2Eq:
+ case kArm64I64x2Ne:
+ case kArm64I64x2GtS:
+ case kArm64I64x2GeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
case kArm64I32x4Splat:
@@ -232,6 +240,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4Abs:
case kArm64I32x4BitMask:
case kArm64I32x4DotI16x8S:
+ case kArm64I32x4TruncSatF64x2SZero:
+ case kArm64I32x4TruncSatF64x2UZero:
case kArm64I16x8Splat:
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
@@ -337,6 +347,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
case kArm64V128AnyTrue:
+ case kArm64V64x2AllTrue:
case kArm64V32x4AllTrue:
case kArm64V16x8AllTrue:
case kArm64V8x16AllTrue:
@@ -368,8 +379,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S128Load16x4U:
case kArm64S128Load32x2S:
case kArm64S128Load32x2U:
- case kArm64S128Load32Zero:
- case kArm64S128Load64Zero:
return kIsLoadOperation;
case kArm64Claim:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 0f432f3bc1..091272ac4e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -672,7 +672,10 @@ void InstructionSelector::VisitLoadLane(Node* node) {
params.rep == MachineType::Int32() || params.rep == MachineType::Int64());
InstructionCode opcode = kArm64LoadLane;
- opcode |= MiscField::encode(params.rep.MemSize() * kBitsPerByte);
+ opcode |= LaneSizeField::encode(params.rep.MemSize() * kBitsPerByte);
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
Arm64OperandGenerator g(this);
InstructionOperand addr = EmitAddBeforeLoadOrStore(this, node, &opcode);
@@ -686,7 +689,11 @@ void InstructionSelector::VisitStoreLane(Node* node) {
DCHECK_GE(MachineRepresentation::kWord64, params.rep);
InstructionCode opcode = kArm64StoreLane;
- opcode |= MiscField::encode(ElementSizeInBytes(params.rep) * kBitsPerByte);
+ opcode |=
+ LaneSizeField::encode(ElementSizeInBytes(params.rep) * kBitsPerByte);
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
Arm64OperandGenerator g(this);
InstructionOperand addr = EmitAddBeforeLoadOrStore(this, node, &opcode);
@@ -707,22 +714,22 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
switch (params.transformation) {
case LoadTransformation::kS128Load8Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(8);
+ opcode |= LaneSizeField::encode(8);
require_add = true;
break;
case LoadTransformation::kS128Load16Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(16);
+ opcode |= LaneSizeField::encode(16);
require_add = true;
break;
case LoadTransformation::kS128Load32Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(32);
+ opcode |= LaneSizeField::encode(32);
require_add = true;
break;
case LoadTransformation::kS128Load64Splat:
opcode = kArm64LoadSplat;
- opcode |= MiscField::encode(64);
+ opcode |= LaneSizeField::encode(64);
require_add = true;
break;
case LoadTransformation::kS128Load8x8S:
@@ -744,10 +751,10 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
opcode = kArm64S128Load32x2U;
break;
case LoadTransformation::kS128Load32Zero:
- opcode = kArm64S128Load32Zero;
+ opcode = kArm64LdrS;
break;
case LoadTransformation::kS128Load64Zero:
- opcode = kArm64S128Load64Zero;
+ opcode = kArm64LdrD;
break;
default:
UNIMPLEMENTED();
@@ -774,6 +781,9 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
} else {
opcode |= AddressingModeField::encode(kMode_MRR);
}
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
Emit(opcode, 1, outputs, 2, inputs);
}
@@ -844,7 +854,10 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
+ }
+ if (node->opcode() == IrOpcode::kProtectedLoad) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
EmitLoad(this, node, opcode, immediate_mode, rep);
@@ -852,10 +865,7 @@ void InstructionSelector::VisitLoad(Node* node) {
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
Arm64OperandGenerator g(this);
@@ -987,14 +997,15 @@ void InstructionSelector::VisitStore(Node* node) {
opcode |= AddressingModeField::encode(kMode_MRR);
}
+ if (node->opcode() == IrOpcode::kProtectedStore) {
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
+ }
+
Emit(opcode, 0, nullptr, input_count, inputs);
}
}
-void InstructionSelector::VisitProtectedStore(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitProtectedStore(Node* node) { VisitStore(node); }
void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
UNREACHABLE();
@@ -1735,7 +1746,7 @@ namespace {
void VisitExtMul(InstructionSelector* selector, ArchOpcode opcode, Node* node,
int dst_lane_size) {
InstructionCode code = opcode;
- code |= MiscField::encode(dst_lane_size);
+ code |= LaneSizeField::encode(dst_lane_size);
VisitRRR(selector, code, node);
}
} // namespace
@@ -1792,7 +1803,7 @@ namespace {
void VisitExtAddPairwise(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int dst_lane_size) {
InstructionCode code = opcode;
- code |= MiscField::encode(dst_lane_size);
+ code |= LaneSizeField::encode(dst_lane_size);
VisitRR(selector, code, node);
}
} // namespace
@@ -2091,21 +2102,24 @@ void InstructionSelector::EmitPrepareArguments(
// Poke the arguments into the stack.
while (slot >= 0) {
PushParameter input0 = (*arguments)[slot];
+ // Skip holes in the param array. These represent both extra slots for
+ // multi-slot values and padding slots for alignment.
+ if (input0.node == nullptr) {
+ slot--;
+ continue;
+ }
PushParameter input1 = slot > 0 ? (*arguments)[slot - 1] : PushParameter();
// Emit a poke-pair if consecutive parameters have the same type.
// TODO(arm): Support consecutive Simd128 parameters.
- if (input0.node != nullptr && input1.node != nullptr &&
+ if (input1.node != nullptr &&
input0.location.GetType() == input1.location.GetType()) {
Emit(kArm64PokePair, g.NoOutput(), g.UseRegister(input0.node),
g.UseRegister(input1.node), g.TempImmediate(slot));
slot -= 2;
- } else if (input0.node != nullptr) {
+ } else {
Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input0.node),
g.TempImmediate(slot));
slot--;
- } else {
- // Skip any alignment holes in pushed nodes.
- slot--;
}
}
}
@@ -2139,8 +2153,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
namespace {
// Shared routine for multiple compare operations.
@@ -3397,36 +3409,42 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kArm64F64x2Abs) \
- V(F64x2Neg, kArm64F64x2Neg) \
- V(F64x2Sqrt, kArm64F64x2Sqrt) \
- V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4Abs, kArm64F32x4Abs) \
- V(F32x4Neg, kArm64F32x4Neg) \
- V(F32x4Sqrt, kArm64F32x4Sqrt) \
- V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
- V(I64x2Neg, kArm64I64x2Neg) \
- V(I64x2BitMask, kArm64I64x2BitMask) \
- V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4Neg, kArm64I32x4Neg) \
- V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4Abs, kArm64I32x4Abs) \
- V(I32x4BitMask, kArm64I32x4BitMask) \
- V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8Abs, kArm64I16x8Abs) \
- V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16Neg, kArm64I8x16Neg) \
- V(I8x16Abs, kArm64I8x16Abs) \
- V(I8x16BitMask, kArm64I8x16BitMask) \
- V(S128Not, kArm64S128Not) \
- V(V32x4AnyTrue, kArm64V128AnyTrue) \
- V(V32x4AllTrue, kArm64V32x4AllTrue) \
- V(V16x8AnyTrue, kArm64V128AnyTrue) \
- V(V16x8AllTrue, kArm64V16x8AllTrue) \
- V(V8x16AnyTrue, kArm64V128AnyTrue) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kArm64F64x2Abs) \
+ V(F64x2Neg, kArm64F64x2Neg) \
+ V(F64x2Sqrt, kArm64F64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
+ V(F32x4Abs, kArm64F32x4Abs) \
+ V(F32x4Neg, kArm64F32x4Neg) \
+ V(F32x4Sqrt, kArm64F32x4Sqrt) \
+ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kArm64I64x2Abs) \
+ V(I64x2Neg, kArm64I64x2Neg) \
+ V(I64x2BitMask, kArm64I64x2BitMask) \
+ V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
+ V(I32x4Neg, kArm64I32x4Neg) \
+ V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
+ V(I32x4Abs, kArm64I32x4Abs) \
+ V(I32x4BitMask, kArm64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kArm64I16x8Neg) \
+ V(I16x8Abs, kArm64I16x8Abs) \
+ V(I16x8BitMask, kArm64I16x8BitMask) \
+ V(I8x16Neg, kArm64I8x16Neg) \
+ V(I8x16Abs, kArm64I8x16Abs) \
+ V(I8x16BitMask, kArm64I8x16BitMask) \
+ V(S128Not, kArm64S128Not) \
+ V(V128AnyTrue, kArm64V128AnyTrue) \
+ V(V64x2AllTrue, kArm64V64x2AllTrue) \
+ V(V32x4AllTrue, kArm64V32x4AllTrue) \
+ V(V16x8AllTrue, kArm64V16x8AllTrue) \
V(V8x16AllTrue, kArm64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -3468,6 +3486,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Add, kArm64I64x2Add) \
V(I64x2Sub, kArm64I64x2Sub) \
V(I64x2Eq, kArm64I64x2Eq) \
+ V(I64x2Ne, kArm64I64x2Ne) \
+ V(I64x2GtS, kArm64I64x2GtS) \
+ V(I64x2GeS, kArm64I64x2GeS) \
V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
V(I32x4Mul, kArm64I32x4Mul) \
V(I32x4MinS, kArm64I32x4MinS) \
@@ -3603,7 +3624,7 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#define VISIT_SIGN_SELECT(NAME, SIZE) \
void InstructionSelector::Visit##NAME(Node* node) { \
InstructionCode opcode = kArm64SignSelect; \
- opcode |= MiscField::encode(SIZE); \
+ opcode |= LaneSizeField::encode(SIZE); \
VisitRRRR(this, opcode, node); \
}
@@ -3886,7 +3907,7 @@ namespace {
void VisitSignExtendLong(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int lane_size) {
InstructionCode code = opcode;
- code |= MiscField::encode(lane_size);
+ code |= LaneSizeField::encode(lane_size);
VisitRR(selector, code, node);
}
} // namespace
@@ -3941,7 +3962,7 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
void InstructionSelector::VisitI8x16Popcnt(Node* node) {
InstructionCode code = kArm64Cnt;
- code |= MiscField::encode(8);
+ code |= LaneSizeField::encode(8);
VisitRR(this, code, node);
}
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index c94ca74f73..bc5aa579d6 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -187,7 +187,7 @@ class InstructionOperandConverter {
// Deoptimization exit.
class DeoptimizationExit : public ZoneObject {
public:
- explicit DeoptimizationExit(SourcePosition pos, BailoutId bailout_id,
+ explicit DeoptimizationExit(SourcePosition pos, BytecodeOffset bailout_id,
int translation_id, int pc_offset,
DeoptimizeKind kind, DeoptimizeReason reason)
: deoptimization_id_(kNoDeoptIndex),
@@ -215,7 +215,7 @@ class DeoptimizationExit : public ZoneObject {
Label* label() { return &label_; }
// The label after the deoptimization check, which will resume execution.
Label* continue_label() { return &continue_label_; }
- BailoutId bailout_id() const { return bailout_id_; }
+ BytecodeOffset bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
DeoptimizeKind kind() const { return kind_; }
@@ -238,7 +238,7 @@ class DeoptimizationExit : public ZoneObject {
const SourcePosition pos_;
Label label_;
Label continue_label_;
- const BailoutId bailout_id_;
+ const BytecodeOffset bailout_id_;
const int translation_id_;
const int pc_offset_;
const DeoptimizeKind kind_;
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 83f8fbc4e8..e9a39f74a9 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -606,9 +606,8 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
.IsNext(instructions()->InstructionBlockAt(block)->ao_number());
}
-void CodeGenerator::RecordSafepoint(ReferenceMap* references,
- Safepoint::DeoptMode deopt_mode) {
- Safepoint safepoint = safepoints()->DefineSafepoint(tasm(), deopt_mode);
+void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
+ Safepoint safepoint = safepoints()->DefineSafepoint(tasm());
int frame_header_offset = frame()->GetFixedSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
@@ -856,16 +855,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
+ Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
- branch.false_label = exit->continue_label();
+ branch.false_label = &continue_label;
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(exit->continue_label());
+ tasm()->bind(&continue_label);
if (mode == kFlags_deoptimize_and_poison) {
AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
}
+ tasm()->bind(exit->continue_label());
break;
}
case kFlags_set: {
@@ -985,8 +986,8 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
Handle<DeoptimizationData> data =
DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
- Handle<ByteArray> translation_array =
- translations_.CreateByteArray(isolate()->factory());
+ Handle<TranslationArray> translation_array =
+ translations_.ToTranslationArray(isolate()->factory());
data->SetTranslationByteArray(*translation_array);
data->SetInlinedFunctionCount(
@@ -1022,7 +1023,7 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
} else {
- BailoutId osr_offset = BailoutId::None();
+ BytecodeOffset osr_offset = BytecodeOffset::None();
data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
data->SetOsrPcOffset(Smi::FromInt(-1));
}
@@ -1049,9 +1050,7 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
void CodeGenerator::RecordCallPosition(Instruction* instr) {
const bool needs_frame_state =
instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
- RecordSafepoint(instr->reference_map(), needs_frame_state
- ? Safepoint::kLazyDeopt
- : Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
InstructionOperandConverter i(this, instr);
@@ -1094,66 +1093,49 @@ DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
void CodeGenerator::TranslateStateValueDescriptor(
StateValueDescriptor* desc, StateValueList* nested,
- Translation* translation, InstructionOperandIterator* iter) {
- // Note:
- // If translation is null, we just skip the relevant instruction operands.
+ InstructionOperandIterator* iter) {
if (desc->IsNested()) {
- if (translation != nullptr) {
- translation->BeginCapturedObject(static_cast<int>(nested->size()));
- }
+ translations_.BeginCapturedObject(static_cast<int>(nested->size()));
for (auto field : *nested) {
- TranslateStateValueDescriptor(field.desc, field.nested, translation,
- iter);
+ TranslateStateValueDescriptor(field.desc, field.nested, iter);
}
} else if (desc->IsArgumentsElements()) {
- if (translation != nullptr) {
- translation->ArgumentsElements(desc->arguments_type());
- }
+ translations_.ArgumentsElements(desc->arguments_type());
} else if (desc->IsArgumentsLength()) {
- if (translation != nullptr) {
- translation->ArgumentsLength();
- }
+ translations_.ArgumentsLength();
} else if (desc->IsDuplicate()) {
- if (translation != nullptr) {
- translation->DuplicateObject(static_cast<int>(desc->id()));
- }
+ translations_.DuplicateObject(static_cast<int>(desc->id()));
} else if (desc->IsPlain()) {
InstructionOperand* op = iter->Advance();
- if (translation != nullptr) {
- AddTranslationForOperand(translation, iter->instruction(), op,
- desc->type());
- }
+ AddTranslationForOperand(iter->instruction(), op, desc->type());
} else {
DCHECK(desc->IsOptimizedOut());
- if (translation != nullptr) {
if (optimized_out_literal_id_ == -1) {
optimized_out_literal_id_ = DefineDeoptimizationLiteral(
DeoptimizationLiteral(isolate()->factory()->optimized_out()));
}
- translation->StoreLiteral(optimized_out_literal_id_);
- }
+ translations_.StoreLiteral(optimized_out_literal_id_);
}
}
void CodeGenerator::TranslateFrameStateDescriptorOperands(
- FrameStateDescriptor* desc, InstructionOperandIterator* iter,
- Translation* translation) {
+ FrameStateDescriptor* desc, InstructionOperandIterator* iter) {
size_t index = 0;
StateValueList* values = desc->GetStateValueDescriptors();
for (StateValueList::iterator it = values->begin(); it != values->end();
++it, ++index) {
- TranslateStateValueDescriptor((*it).desc, (*it).nested, translation, iter);
+ TranslateStateValueDescriptor((*it).desc, (*it).nested, iter);
}
DCHECK_EQ(desc->GetSize(), index);
}
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
- Translation* translation, OutputFrameStateCombine state_combine) {
+ OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
if (descriptor->outer_state() != nullptr) {
BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
- translation, state_combine);
+ state_combine);
}
Handle<SharedFunctionInfo> shared_info;
@@ -1164,49 +1146,57 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
shared_info = info()->shared_info();
}
- const BailoutId bailout_id = descriptor->bailout_id();
+ const BytecodeOffset bailout_id = descriptor->bailout_id();
const int shared_info_id =
DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
const unsigned int height =
static_cast<unsigned int>(descriptor->GetHeight());
switch (descriptor->type()) {
- case FrameStateType::kInterpretedFunction: {
+ case FrameStateType::kUnoptimizedFunction: {
int return_offset = 0;
int return_count = 0;
if (!state_combine.IsOutputIgnored()) {
return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
return_count = static_cast<int>(iter->instruction()->OutputCount());
}
- translation->BeginInterpretedFrame(bailout_id, shared_info_id, height,
- return_offset, return_count);
+ translations_.BeginInterpretedFrame(bailout_id, shared_info_id, height,
+ return_offset, return_count);
break;
}
case FrameStateType::kArgumentsAdaptor:
- translation->BeginArgumentsAdaptorFrame(shared_info_id, height);
+ translations_.BeginArgumentsAdaptorFrame(shared_info_id, height);
break;
case FrameStateType::kConstructStub:
DCHECK(bailout_id.IsValidForConstructStub());
- translation->BeginConstructStubFrame(bailout_id, shared_info_id, height);
+ translations_.BeginConstructStubFrame(bailout_id, shared_info_id, height);
break;
case FrameStateType::kBuiltinContinuation: {
- translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
- height);
+ translations_.BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
+ height);
+ break;
+ }
+ case FrameStateType::kJSToWasmBuiltinContinuation: {
+ const JSToWasmFrameStateDescriptor* js_to_wasm_descriptor =
+ static_cast<const JSToWasmFrameStateDescriptor*>(descriptor);
+ translations_.BeginJSToWasmBuiltinContinuationFrame(
+ bailout_id, shared_info_id, height,
+ js_to_wasm_descriptor->return_type());
break;
}
case FrameStateType::kJavaScriptBuiltinContinuation: {
- translation->BeginJavaScriptBuiltinContinuationFrame(
+ translations_.BeginJavaScriptBuiltinContinuationFrame(
bailout_id, shared_info_id, height);
break;
}
case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
- translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
+ translations_.BeginJavaScriptBuiltinContinuationWithCatchFrame(
bailout_id, shared_info_id, height);
break;
}
}
- TranslateFrameStateDescriptorOperands(descriptor, iter, translation);
+ TranslateFrameStateDescriptorOperands(descriptor, iter);
}
DeoptimizationExit* CodeGenerator::BuildTranslation(
@@ -1217,23 +1207,21 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
- int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
- Translation translation(&translations_,
- static_cast<int>(descriptor->GetFrameCount()),
- static_cast<int>(descriptor->GetJSFrameCount()),
- update_feedback_count, zone());
+ const int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
+ const int translation_index = translations_.BeginTranslation(
+ static_cast<int>(descriptor->GetFrameCount()),
+ static_cast<int>(descriptor->GetJSFrameCount()), update_feedback_count);
if (entry.feedback().IsValid()) {
DeoptimizationLiteral literal =
DeoptimizationLiteral(entry.feedback().vector);
int literal_id = DefineDeoptimizationLiteral(literal);
- translation.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
+ translations_.AddUpdateFeedback(literal_id, entry.feedback().slot.ToInt());
}
InstructionOperandIterator iter(instr, frame_state_offset);
- BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
- state_combine);
+ BuildTranslationForFrameStateDescriptor(descriptor, &iter, state_combine);
DeoptimizationExit* const exit = zone()->New<DeoptimizationExit>(
- current_source_position_, descriptor->bailout_id(), translation.index(),
+ current_source_position_, descriptor->bailout_id(), translation_index,
pc_offset, entry.kind(), entry.reason());
if (!Deoptimizer::kSupportsFixedDeoptExitSizes) {
@@ -1253,21 +1241,20 @@ DeoptimizationExit* CodeGenerator::BuildTranslation(
return exit;
}
-void CodeGenerator::AddTranslationForOperand(Translation* translation,
- Instruction* instr,
+void CodeGenerator::AddTranslationForOperand(Instruction* instr,
InstructionOperand* op,
MachineType type) {
if (op->IsStackSlot()) {
if (type.representation() == MachineRepresentation::kBit) {
- translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreBoolStackSlot(LocationOperand::cast(op)->index());
} else if (type == MachineType::Int8() || type == MachineType::Int16() ||
type == MachineType::Int32()) {
- translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreInt32StackSlot(LocationOperand::cast(op)->index());
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
- translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreUint32StackSlot(LocationOperand::cast(op)->index());
} else if (type == MachineType::Int64()) {
- translation->StoreInt64StackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreInt64StackSlot(LocationOperand::cast(op)->index());
} else {
#if defined(V8_COMPRESS_POINTERS)
CHECK(MachineRepresentation::kTagged == type.representation() ||
@@ -1275,27 +1262,27 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
#else
CHECK(MachineRepresentation::kTagged == type.representation());
#endif
- translation->StoreStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreStackSlot(LocationOperand::cast(op)->index());
}
} else if (op->IsFPStackSlot()) {
if (type.representation() == MachineRepresentation::kFloat64) {
- translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
- translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
+ translations_.StoreFloatStackSlot(LocationOperand::cast(op)->index());
}
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kBit) {
- translation->StoreBoolRegister(converter.ToRegister(op));
+ translations_.StoreBoolRegister(converter.ToRegister(op));
} else if (type == MachineType::Int8() || type == MachineType::Int16() ||
type == MachineType::Int32()) {
- translation->StoreInt32Register(converter.ToRegister(op));
+ translations_.StoreInt32Register(converter.ToRegister(op));
} else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
type == MachineType::Uint32()) {
- translation->StoreUint32Register(converter.ToRegister(op));
+ translations_.StoreUint32Register(converter.ToRegister(op));
} else if (type == MachineType::Int64()) {
- translation->StoreInt64Register(converter.ToRegister(op));
+ translations_.StoreInt64Register(converter.ToRegister(op));
} else {
#if defined(V8_COMPRESS_POINTERS)
CHECK(MachineRepresentation::kTagged == type.representation() ||
@@ -1303,15 +1290,15 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
#else
CHECK(MachineRepresentation::kTagged == type.representation());
#endif
- translation->StoreRegister(converter.ToRegister(op));
+ translations_.StoreRegister(converter.ToRegister(op));
}
} else if (op->IsFPRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kFloat64) {
- translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ translations_.StoreDoubleRegister(converter.ToDoubleRegister(op));
} else {
CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
- translation->StoreFloatRegister(converter.ToFloatRegister(op));
+ translations_.StoreFloatRegister(converter.ToFloatRegister(op));
}
} else {
CHECK(op->IsImmediate());
@@ -1390,10 +1377,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
UNREACHABLE();
}
if (literal.object().equals(info()->closure())) {
- translation->StoreJSFrameFunction();
+ translations_.StoreJSFrameFunction();
} else {
int literal_id = DefineDeoptimizationLiteral(literal);
- translation->StoreLiteral(literal_id);
+ translations_.StoreLiteral(literal_id);
}
}
}
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 9829a070ec..7cead5dbde 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -160,8 +160,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void AssembleSourcePosition(SourcePosition source_position);
// Record a safepoint with the given pointer map.
- void RecordSafepoint(ReferenceMap* references,
- Safepoint::DeoptMode deopt_mode);
+ void RecordSafepoint(ReferenceMap* references);
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
@@ -298,10 +297,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Generates code to manipulate the stack in preparation for a tail call.
void AssemblePrepareTailCall();
- // Generates code to pop current frame if it is an arguments adaptor frame.
- void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
- Register scratch2, Register scratch3);
-
enum PushTypeFlag {
kImmediatePush = 0x1,
kRegisterPush = 0x2,
@@ -398,16 +393,14 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
- Translation* translation, OutputFrameStateCombine state_combine);
+ OutputFrameStateCombine state_combine);
void TranslateStateValueDescriptor(StateValueDescriptor* desc,
StateValueList* nested,
- Translation* translation,
InstructionOperandIterator* iter);
void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
- InstructionOperandIterator* iter,
- Translation* translation);
- void AddTranslationForOperand(Translation* translation, Instruction* instr,
- InstructionOperand* op, MachineType type);
+ InstructionOperandIterator* iter);
+ void AddTranslationForOperand(Instruction* instr, InstructionOperand* op,
+ MachineType type);
void MarkLazyDeoptSite();
void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
@@ -448,7 +441,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_ = 0;
- TranslationBuffer translations_;
+ TranslationArrayBuilder translations_;
int handler_table_offset_ = 0;
int last_lazy_deopt_pc_ = 0;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 45a2c59597..77a4d92b96 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -574,43 +574,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register, Register,
- Register) {
- // There are not enough temp registers left on ia32 for a call instruction
- // so we pick some scratch registers and save/restore them manually here.
- int scratch_count = 3;
- Register scratch1 = esi;
- Register scratch2 = ecx;
- Register scratch3 = edx;
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &done, Label::kNear);
-
- __ push(scratch1);
- __ push(scratch2);
- __ push(scratch3);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3,
- scratch_count);
- __ pop(scratch3);
- __ pop(scratch2);
- __ pop(scratch1);
-
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -794,12 +757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- no_reg, no_reg, no_reg);
- }
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -925,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1824,69 +1782,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kIA32PushFloat32:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kFloatSize);
- __ Movss(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ Move(kScratchDoubleReg, i.InputFloat32(0));
- __ AllocateStackSpace(kFloatSize);
- __ Movss(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else {
- __ Movss(kScratchDoubleReg, i.InputOperand(0));
- __ AllocateStackSpace(kFloatSize);
- __ Movss(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- }
- break;
- case kIA32PushFloat64:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kDoubleSize);
- __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ Move(kScratchDoubleReg, i.InputDouble(0));
- __ AllocateStackSpace(kDoubleSize);
- __ Movsd(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- } else {
- __ Movsd(kScratchDoubleReg, i.InputOperand(0));
- __ AllocateStackSpace(kDoubleSize);
- __ Movsd(Operand(esp, 0), kScratchDoubleReg);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- }
- break;
- case kIA32PushSimd128:
- if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kSimd128Size);
- __ Movups(Operand(esp, 0), i.InputSimd128Register(0));
- } else {
- __ Movups(kScratchDoubleReg, i.InputOperand(0));
- __ AllocateStackSpace(kSimd128Size);
- __ Movups(Operand(esp, 0), kScratchDoubleReg);
- }
- frame_access_state()->IncreaseSPDelta(kSimd128Size / kSystemPointerSize);
- break;
- case kIA32Push:
- if (HasAddressingMode(instr)) {
- size_t index = 0;
+ case kIA32Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ // Whenever codegen uses push, we need to check if stack_decrement
+ // contains any extra padding and adjust the stack before the push.
+ if (HasImmediateInput(instr, 1)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputImmediate(1));
+ } else if (HasAddressingMode(instr)) {
+ // Only single slot pushes from memory are supported.
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else if (instr->InputAt(0)->IsFPRegister()) {
- __ AllocateStackSpace(kFloatSize);
- __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
- frame_access_state()->IncreaseSPDelta(1);
} else {
- __ push(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
+ InstructionOperand* input = instr->InputAt(1);
+ if (input->IsRegister()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputRegister(1));
+ } else if (input->IsFloatRegister()) {
+ DCHECK_GE(stack_decrement, kFloatSize);
+ __ AllocateStackSpace(stack_decrement);
+ __ Movss(Operand(esp, 0), i.InputDoubleRegister(1));
+ } else if (input->IsDoubleRegister()) {
+ DCHECK_GE(stack_decrement, kDoubleSize);
+ __ AllocateStackSpace(stack_decrement);
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ } else if (input->IsSimd128Register()) {
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ __ AllocateStackSpace(stack_decrement);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(Operand(esp, 0), i.InputSimd128Register(1));
+ } else if (input->IsStackSlot() || input->IsFloatStackSlot()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ push(i.InputOperand(1));
+ } else if (input->IsDoubleStackSlot()) {
+ DCHECK_GE(stack_decrement, kDoubleSize);
+ __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ AllocateStackSpace(stack_decrement);
+ __ Movsd(Operand(esp, 0), kScratchDoubleReg);
+ } else {
+ DCHECK(input->IsSimd128StackSlot());
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(kScratchDoubleReg, i.InputOperand(1));
+ __ AllocateStackSpace(stack_decrement);
+ __ Movups(Operand(esp, 0), kScratchDoubleReg);
+ }
}
+ frame_access_state()->IncreaseSPDelta(slots);
break;
+ }
case kIA32Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -2092,6 +2039,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Roundpd(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
break;
}
+ case kIA32F64x2PromoteLowF32x4: {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32F32x4DemoteF64x2Zero: {
+ __ Cvtpd2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32I32x4TruncSatF64x2SZero: {
+ __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
+ break;
+ }
+ case kIA32I32x4TruncSatF64x2UZero: {
+ __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
+ break;
+ }
+ case kIA32F64x2ConvertLowI32x4S: {
+ __ Cvtdq2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32F64x2ConvertLowI32x4U: {
+ __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), i.TempRegister(0));
+ break;
+ }
case kIA32I64x2ExtMulLowI32x4S: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
@@ -2177,6 +2153,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pinsrd(i.OutputSimd128Register(), i.InputOperand(3), lane * 2 + 1);
break;
}
+ case kIA32I64x2Abs: {
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ break;
+ }
case kIA32I64x2Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
@@ -2254,7 +2235,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I64x2Eq: {
__ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputOperand(1));
+ break;
+ }
+ case kIA32I64x2Ne: {
+ __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ __ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kIA32I64x2GtS: {
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kIA32I64x2GeS: {
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32I64x2SConvertI32x4Low: {
@@ -2262,15 +2260,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2SConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpunpckhqdq(dst, src, src);
- } else {
- __ pshufd(dst, src, 0xEE);
- }
- __ Pmovsxdq(dst, dst);
+ __ I64x2SConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kIA32I64x2UConvertI32x4Low: {
@@ -2278,17 +2269,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2UConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpunpckhdq(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pshufd(dst, src, 0xEE);
- __ pmovzxdq(dst, dst);
- }
+ __ I64x2UConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kIA32I8x16SignSelect: {
@@ -2315,64 +2297,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4ExtAddPairwiseI16x8S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // kScratchDoubleReg = i16x8.splat(1)
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlw(kScratchDoubleReg, byte{15});
- // pmaddwd multiplies signed words in kScratchDoubleReg and src, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- __ Pmaddwd(dst, src, kScratchDoubleReg);
+ __ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.TempRegister(0));
break;
}
case kIA32I32x4ExtAddPairwiseI16x8U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
-
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, kScratchDoubleReg, uint8_t{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- __ Pand(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- __ Psrld(dst, src, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- __ Paddd(dst, src, kScratchDoubleReg);
+ __ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kIA32I16x8ExtAddPairwiseI8x16S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_NE(dst, src);
- // dst = i8x16.splat(1)
- __ Move(dst, uint32_t{0x01010101});
- __ Pshufd(dst, dst, byte{0});
- __ Pmaddubsw(dst, dst, src);
- break;
+ __ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
break;
}
case kIA32I16x8ExtAddPairwiseI8x16U: {
- XMMRegister dst = i.OutputSimd128Register();
- // dst = i8x16.splat(1)
- __ Move(kScratchDoubleReg, uint32_t{0x01010101});
- __ Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- __ Pmaddubsw(dst, i.InputSimd128Register(0), kScratchDoubleReg);
+ __ I16x8ExtAddPairwiseI8x16U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0),
+ i.TempRegister(0));
break;
}
case kIA32I16x8Q15MulRSatS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- // k = i16x8.splat(0x8000)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllw(kScratchDoubleReg, kScratchDoubleReg, byte{15});
-
- __ Pmulhrsw(dst, src0, src1);
- __ Pcmpeqw(kScratchDoubleReg, dst);
- __ Pxor(dst, kScratchDoubleReg);
+ __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32I32x4SignSelect: {
@@ -2448,36 +2398,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
break;
}
- case kSSEF32x4Abs: {
+ case kIA32F32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(i.InputSimd128Register(0), dst);
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrld(kScratchDoubleReg, 1);
- __ andps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Abs: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1);
- __ vandps(i.OutputSimd128Register(), kScratchDoubleReg,
- i.InputOperand(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrld(kScratchDoubleReg, kScratchDoubleReg, 1);
+ __ Andps(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Psrld(dst, dst, 1);
+ __ Andps(dst, src);
+ }
break;
}
- case kSSEF32x4Neg: {
+ case kIA32F32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pslld(kScratchDoubleReg, 31);
- __ xorps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Neg: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpslld(kScratchDoubleReg, kScratchDoubleReg, 31);
- __ vxorps(i.OutputSimd128Register(), kScratchDoubleReg,
- i.InputOperand(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pslld(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ Xorps(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Pslld(dst, dst, 31);
+ __ Xorps(dst, src);
+ }
break;
}
case kIA32F32x4Sqrt: {
@@ -2683,9 +2629,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4SConvertI16x8High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovsxwd(dst, dst);
+ __ I32x4SConvertI16x8High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kIA32I32x4Neg: {
@@ -2893,9 +2838,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4UConvertI16x8High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovzxwd(dst, dst);
+ __ I32x4UConvertI16x8High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kIA32I32x4ShrU: {
@@ -2975,7 +2919,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I32x4DotI16x8S: {
__ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputOperand(1));
break;
}
case kIA32I16x8Splat: {
@@ -2996,9 +2940,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8SConvertI8x16High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovsxbw(dst, dst);
+ __ I16x8SConvertI8x16High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kIA32I16x8Neg: {
@@ -3180,9 +3123,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8UConvertI8x16High: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Palignr(dst, i.InputOperand(0), 8);
- __ Pmovzxbw(dst, dst);
+ __ I16x8UConvertI8x16High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg);
break;
}
case kIA32I16x8ShrU: {
@@ -3379,12 +3321,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
uint8_t laneidx = i.InputUint8(index + 1);
- if (laneidx == 0) {
- __ Movss(operand, i.InputSimd128Register(index));
- } else {
- DCHECK_GE(3, laneidx);
- __ Extractps(operand, i.InputSimd128Register(index), 1);
- }
+ __ S128Store32Lane(operand, i.InputSimd128Register(index), laneidx);
break;
}
case kSSEI8x16SConvertI16x8: {
@@ -3473,50 +3410,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI8x16Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16Add: {
+ __ Paddb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I8x16AddSatS: {
+ __ Paddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16AddSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddsb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16AddSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI8x16Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16Sub: {
+ __ Psubb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I8x16SubSatS: {
+ __ Psubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16SubSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubsb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16SubSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI8x16Mul: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
@@ -3596,41 +3509,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpor(dst, dst, tmp);
break;
}
- case kSSEI8x16MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16MinS: {
+ __ Pminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI8x16MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16MaxS: {
+ __ Pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I8x16Eq: {
+ __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI8x16Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
@@ -3647,15 +3540,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSEI8x16GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16GtS: {
+ __ Pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI8x16GeS: {
@@ -3689,26 +3576,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpackuswb(dst, dst, i.InputOperand(1));
break;
}
- case kSSEI8x16AddSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddusb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16AddSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI8x16SubSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubusb(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16AddSatU: {
+ __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16SubSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16SubSatU: {
+ __ Psubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32I8x16ShrU: {
@@ -3743,27 +3618,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kSSEI8x16MinU: {
- XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pminub(dst, i.InputOperand(1));
- break;
- }
- case kAVXI8x16MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI8x16MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmaxub(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I8x16MinU: {
+ __ Pminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI8x16MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16MaxU: {
+ __ Pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI8x16GtU: {
@@ -3816,6 +3678,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kIA32I8x16Popcnt: {
+ __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg, i.TempSimd128Register(0),
+ i.TempRegister(1));
+ break;
+ }
case kIA32S128Const: {
XMMRegister dst = i.OutputSimd128Register();
Register tmp = i.TempRegister(0);
@@ -3837,17 +3705,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pcmpeqd(dst, dst);
break;
}
- case kSSES128Not: {
+ case kIA32S128Not: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
- break;
- }
- case kAVXS128Not: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpxor(i.OutputSimd128Register(), kScratchDoubleReg, i.InputOperand(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(dst, kScratchDoubleReg);
+ } else {
+ __ Pcmpeqd(dst, dst);
+ __ Pxor(dst, src);
+ }
break;
}
case kSSES128And: {
@@ -4338,6 +4205,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
+ case kIA32V64x2AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
+ break;
case kIA32V32x4AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
@@ -4348,6 +4218,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
+ case kIA32Prefetch:
+ __ prefetch(i.MemoryOperand(), 1);
+ break;
+ case kIA32PrefetchNta:
+ __ prefetch(i.MemoryOperand(), 0);
+ break;
case kIA32Word32AtomicPairLoad: {
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ movq(tmp, i.MemoryOperand());
@@ -4646,7 +4522,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ wasm_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -4953,7 +4829,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ wasm_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
@@ -5014,7 +4890,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = ecx;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -5022,9 +4897,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 632eeace20..40f7b6e403 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -114,9 +114,6 @@ namespace compiler {
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
- V(IA32PushFloat32) \
- V(IA32PushFloat64) \
- V(IA32PushSimd128) \
V(IA32Poke) \
V(IA32Peek) \
V(IA32F64x2Splat) \
@@ -138,8 +135,12 @@ namespace compiler {
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
V(IA32F64x2Round) \
+ V(IA32F64x2ConvertLowI32x4S) \
+ V(IA32F64x2ConvertLowI32x4U) \
+ V(IA32F64x2PromoteLowF32x4) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
+ V(IA32I64x2Abs) \
V(IA32I64x2Neg) \
V(IA32I64x2Shl) \
V(IA32I64x2ShrS) \
@@ -149,6 +150,9 @@ namespace compiler {
V(IA32I64x2ShrU) \
V(IA32I64x2BitMask) \
V(IA32I64x2Eq) \
+ V(IA32I64x2Ne) \
+ V(IA32I64x2GtS) \
+ V(IA32I64x2GeS) \
V(IA32I64x2SignSelect) \
V(IA32I64x2ExtMulLowI32x4S) \
V(IA32I64x2ExtMulHighI32x4S) \
@@ -163,10 +167,8 @@ namespace compiler {
V(IA32Insertps) \
V(IA32F32x4SConvertI32x4) \
V(IA32F32x4UConvertI32x4) \
- V(SSEF32x4Abs) \
- V(AVXF32x4Abs) \
- V(SSEF32x4Neg) \
- V(AVXF32x4Neg) \
+ V(IA32F32x4Abs) \
+ V(IA32F32x4Neg) \
V(IA32F32x4Sqrt) \
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
@@ -190,6 +192,7 @@ namespace compiler {
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
+ V(IA32F32x4DemoteF64x2Zero) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(IA32I32x4SConvertF32x4) \
@@ -241,6 +244,8 @@ namespace compiler {
V(IA32I32x4ExtMulHighI16x8U) \
V(IA32I32x4ExtAddPairwiseI16x8S) \
V(IA32I32x4ExtAddPairwiseI16x8U) \
+ V(IA32I32x4TruncSatF64x2SZero) \
+ V(IA32I32x4TruncSatF64x2UZero) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneS) \
V(IA32I16x8SConvertI8x16Low) \
@@ -315,39 +320,27 @@ namespace compiler {
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
- V(SSEI8x16Add) \
- V(AVXI8x16Add) \
- V(SSEI8x16AddSatS) \
- V(AVXI8x16AddSatS) \
- V(SSEI8x16Sub) \
- V(AVXI8x16Sub) \
- V(SSEI8x16SubSatS) \
- V(AVXI8x16SubSatS) \
+ V(IA32I8x16Add) \
+ V(IA32I8x16AddSatS) \
+ V(IA32I8x16Sub) \
+ V(IA32I8x16SubSatS) \
V(SSEI8x16Mul) \
V(AVXI8x16Mul) \
- V(SSEI8x16MinS) \
- V(AVXI8x16MinS) \
- V(SSEI8x16MaxS) \
- V(AVXI8x16MaxS) \
- V(SSEI8x16Eq) \
- V(AVXI8x16Eq) \
+ V(IA32I8x16MinS) \
+ V(IA32I8x16MaxS) \
+ V(IA32I8x16Eq) \
V(SSEI8x16Ne) \
V(AVXI8x16Ne) \
- V(SSEI8x16GtS) \
- V(AVXI8x16GtS) \
+ V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
V(SSEI8x16UConvertI16x8) \
V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSatU) \
- V(AVXI8x16AddSatU) \
- V(SSEI8x16SubSatU) \
- V(AVXI8x16SubSatU) \
+ V(IA32I8x16AddSatU) \
+ V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
- V(SSEI8x16MinU) \
- V(AVXI8x16MinU) \
- V(SSEI8x16MaxU) \
- V(AVXI8x16MaxU) \
+ V(IA32I8x16MinU) \
+ V(IA32I8x16MaxU) \
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
@@ -356,11 +349,11 @@ namespace compiler {
V(IA32I8x16Abs) \
V(IA32I8x16BitMask) \
V(IA32I8x16SignSelect) \
+ V(IA32I8x16Popcnt) \
V(IA32S128Const) \
V(IA32S128Zero) \
V(IA32S128AllOnes) \
- V(SSES128Not) \
- V(AVXS128Not) \
+ V(IA32S128Not) \
V(SSES128And) \
V(AVXS128And) \
V(SSES128Or) \
@@ -417,9 +410,12 @@ namespace compiler {
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
V(IA32S128AnyTrue) \
+ V(IA32V64x2AllTrue) \
V(IA32V32x4AllTrue) \
V(IA32V16x8AllTrue) \
V(IA32V8x16AllTrue) \
+ V(IA32Prefetch) \
+ V(IA32PrefetchNta) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32AtomicPairStore) \
V(IA32Word32AtomicPairAdd) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index f82f299c5c..21b650cb61 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -120,8 +120,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Pmin:
case kIA32F64x2Pmax:
case kIA32F64x2Round:
+ case kIA32F64x2ConvertLowI32x4S:
+ case kIA32F64x2ConvertLowI32x4U:
+ case kIA32F64x2PromoteLowF32x4:
case kIA32I64x2SplatI32Pair:
case kIA32I64x2ReplaceLaneI32Pair:
+ case kIA32I64x2Abs:
case kIA32I64x2Neg:
case kIA32I64x2Shl:
case kIA32I64x2ShrS:
@@ -131,6 +135,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I64x2ShrU:
case kIA32I64x2BitMask:
case kIA32I64x2Eq:
+ case kIA32I64x2Ne:
+ case kIA32I64x2GtS:
+ case kIA32I64x2GeS:
case kIA32I64x2SignSelect:
case kIA32I64x2ExtMulLowI32x4S:
case kIA32I64x2ExtMulHighI32x4S:
@@ -145,10 +152,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Insertps:
case kIA32F32x4SConvertI32x4:
case kIA32F32x4UConvertI32x4:
- case kSSEF32x4Abs:
- case kAVXF32x4Abs:
- case kSSEF32x4Neg:
- case kAVXF32x4Neg:
+ case kIA32F32x4Abs:
+ case kIA32F32x4Neg:
case kIA32F32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
@@ -172,6 +177,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
+ case kIA32F32x4DemoteF64x2Zero:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kIA32I32x4SConvertF32x4:
@@ -223,6 +229,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4ExtMulHighI16x8U:
case kIA32I32x4ExtAddPairwiseI16x8S:
case kIA32I32x4ExtAddPairwiseI16x8U:
+ case kIA32I32x4TruncSatF64x2SZero:
+ case kIA32I32x4TruncSatF64x2UZero:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneS:
case kIA32I16x8SConvertI8x16Low:
@@ -297,39 +305,27 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16Neg:
case kIA32I8x16Shl:
case kIA32I8x16ShrS:
- case kSSEI8x16Add:
- case kAVXI8x16Add:
- case kSSEI8x16AddSatS:
- case kAVXI8x16AddSatS:
- case kSSEI8x16Sub:
- case kAVXI8x16Sub:
- case kSSEI8x16SubSatS:
- case kAVXI8x16SubSatS:
+ case kIA32I8x16Add:
+ case kIA32I8x16AddSatS:
+ case kIA32I8x16Sub:
+ case kIA32I8x16SubSatS:
case kSSEI8x16Mul:
case kAVXI8x16Mul:
- case kSSEI8x16MinS:
- case kAVXI8x16MinS:
- case kSSEI8x16MaxS:
- case kAVXI8x16MaxS:
- case kSSEI8x16Eq:
- case kAVXI8x16Eq:
+ case kIA32I8x16MinS:
+ case kIA32I8x16MaxS:
+ case kIA32I8x16Eq:
case kSSEI8x16Ne:
case kAVXI8x16Ne:
- case kSSEI8x16GtS:
- case kAVXI8x16GtS:
+ case kIA32I8x16GtS:
case kSSEI8x16GeS:
case kAVXI8x16GeS:
case kSSEI8x16UConvertI16x8:
case kAVXI8x16UConvertI16x8:
- case kSSEI8x16AddSatU:
- case kAVXI8x16AddSatU:
- case kSSEI8x16SubSatU:
- case kAVXI8x16SubSatU:
+ case kIA32I8x16AddSatU:
+ case kIA32I8x16SubSatU:
case kIA32I8x16ShrU:
- case kSSEI8x16MinU:
- case kAVXI8x16MinU:
- case kSSEI8x16MaxU:
- case kAVXI8x16MaxU:
+ case kIA32I8x16MinU:
+ case kIA32I8x16MaxU:
case kSSEI8x16GtU:
case kAVXI8x16GtU:
case kSSEI8x16GeU:
@@ -338,11 +334,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16Abs:
case kIA32I8x16BitMask:
case kIA32I8x16SignSelect:
+ case kIA32I8x16Popcnt:
case kIA32S128Const:
case kIA32S128Zero:
case kIA32S128AllOnes:
- case kSSES128Not:
- case kAVXS128Not:
+ case kIA32S128Not:
case kSSES128And:
case kAVXS128And:
case kSSES128Or:
@@ -389,6 +385,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSES8x2Reverse:
case kAVXS8x2Reverse:
case kIA32S128AnyTrue:
+ case kIA32V64x2AllTrue:
case kIA32V32x4AllTrue:
case kIA32V16x8AllTrue:
case kIA32V8x16AllTrue:
@@ -431,12 +428,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kIsLoadOperation;
case kIA32Push:
- case kIA32PushFloat32:
- case kIA32PushFloat64:
- case kIA32PushSimd128:
case kIA32Poke:
case kIA32MFence:
case kIA32LFence:
+ case kIA32Prefetch:
+ case kIA32PrefetchNta:
return kHasSideEffect;
case kIA32Word32AtomicPairLoad:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 0f266cd824..662b40ddf4 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -432,7 +432,8 @@ void InstructionSelector::VisitLoadLane(Node* node) {
}
IA32OperandGenerator g(this);
- InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand outputs[] = {IsSupported(AVX) ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node)};
// Input 0 is value node, 1 is lane idx, and GetEffectiveAddressMemoryOperand
// uses up to 3 inputs. This ordering is consistent with other operations that
// use the same opcode.
@@ -560,7 +561,7 @@ void InstructionSelector::VisitLoad(Node* node) {
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= MiscField::encode(kMemoryAccessPoisoned);
+ code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
@@ -701,6 +702,36 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitPrefetchTemporal(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionCode opcode = kIA32Prefetch;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ // The maximum number of inputs that can be generated by the function above is
+ // 3, but wasm cases only generate 2 inputs. This check will need to be
+ // modified for any non-wasm uses of prefetch.
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
+void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionCode opcode = kIA32PrefetchNta;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ // The maximum number of inputs that can be generated by the function above is
+ // 3, but wasm cases only generate 2 inputs. This check will need to be
+ // modified for any non-wasm uses of prefetch.
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
namespace {
// Shared routine for multiple binary operations.
@@ -1363,35 +1394,33 @@ void InstructionSelector::EmitPrepareArguments(
} else {
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
- // Skip any alignment holes in pushed nodes.
+ stack_decrement += kSystemPointerSize;
+ // Skip holes in the param array. These represent both extra slots for
+ // multi-slot values and padding slots for alignment.
if (input.node == nullptr) continue;
- if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) {
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ if (g.CanBeImmediate(input.node)) {
+ Emit(kIA32Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
+ } else if (IsSupported(ATOM) ||
+ sequence()->IsFP(GetVirtualRegister(input.node))) {
+ // TODO(bbudge): IA32Push cannot handle stack->stack double moves
+ // because there is no way to encode fixed double slots.
+ Emit(kIA32Push, g.NoOutput(), decrement, g.UseRegister(input.node));
+ } else if (g.CanBeMemoryOperand(kIA32Push, node, input.node,
+ effect_level)) {
InstructionOperand outputs[1];
- InstructionOperand inputs[4];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionCode opcode = kIA32Push;
+ inputs[input_count++] = decrement;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
input.node, inputs, &input_count);
- opcode |= AddressingModeField::encode(mode);
+ InstructionCode opcode = kIA32Push | AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- InstructionOperand value =
- g.CanBeImmediate(input.node)
- ? g.UseImmediate(input.node)
- : IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node))
- ? g.UseRegister(input.node)
- : g.Use(input.node);
- if (input.location.GetType() == MachineType::Float32()) {
- Emit(kIA32PushFloat32, g.NoOutput(), value);
- } else if (input.location.GetType() == MachineType::Float64()) {
- Emit(kIA32PushFloat64, g.NoOutput(), value);
- } else if (input.location.GetType() == MachineType::Simd128()) {
- Emit(kIA32PushSimd128, g.NoOutput(), value);
- } else {
- Emit(kIA32Push, g.NoOutput(), value);
- }
+ Emit(kIA32Push, g.NoOutput(), decrement, g.UseAny(input.node));
}
}
}
@@ -1424,8 +1453,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
-
namespace {
void VisitCompareWithMemoryOperand(InstructionSelector* selector,
@@ -2223,20 +2250,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8GtU) \
V(I16x8GeU) \
V(I8x16SConvertI16x8) \
- V(I8x16Add) \
- V(I8x16AddSatS) \
- V(I8x16Sub) \
- V(I8x16SubSatS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16Eq) \
V(I8x16Ne) \
- V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16AddSatU) \
- V(I8x16SubSatU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
V(I8x16GtU) \
V(I8x16GeU) \
V(S128And) \
@@ -2252,9 +2267,21 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
+ V(I64x2Ne) \
V(I32x4DotI16x8S) \
V(I16x8RoundingAverageU) \
- V(I16x8Q15MulRSatS) \
+ V(I8x16Add) \
+ V(I8x16AddSatS) \
+ V(I8x16Sub) \
+ V(I8x16SubSatS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16GtS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
V(I8x16RoundingAverageU)
// These opcodes require all inputs to be registers because the codegen is
@@ -2271,9 +2298,15 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8ExtMulLowI8x16S) \
V(I16x8ExtMulHighI8x16S) \
V(I16x8ExtMulLowI8x16U) \
- V(I16x8ExtMulHighI8x16U)
+ V(I16x8ExtMulHighI8x16U) \
+ V(I16x8Q15MulRSatS)
#define SIMD_UNOP_LIST(V) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2PromoteLowF32x4) \
+ V(F32x4DemoteF64x2Zero) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4RecipApprox) \
@@ -2298,19 +2331,11 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8Abs) \
V(I8x16Neg) \
V(I8x16Abs) \
- V(I8x16BitMask)
-
-#define SIMD_UNOP_PREFIX_LIST(V) \
- V(F32x4Abs) \
- V(F32x4Neg) \
+ V(I8x16BitMask) \
V(S128Not)
-#define SIMD_ANYTRUE_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue)
-
#define SIMD_ALLTRUE_LIST(V) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2607,36 +2632,12 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
-// TODO(v8:9198): SSE instructions that read 16 bytes from memory require the
-// operand to be 16-byte aligned. AVX instructions relax this requirement, but
-// might have reduced performance if the memory crosses cache line. But since we
-// have limited xmm registers, this might be okay to alleviate register
-// pressure.
-#define VISIT_SIMD_UNOP_PREFIX(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
- } else { \
- Emit(kSSE##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0))); \
- } \
- }
-SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
-#undef VISIT_SIMD_UNOP_PREFIX
-#undef SIMD_UNOP_PREFIX_LIST
-
-// The implementation of AnyTrue is the same for all shapes.
-#define VISIT_SIMD_ANYTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister()}; \
- Emit(kIA32S128AnyTrue, g.DefineAsRegister(node), \
- g.UseRegister(node->InputAt(0)), arraysize(temps), temps); \
- }
-SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
-#undef VISIT_SIMD_ANYTRUE
-#undef SIMD_ANYTRUE_LIST
+void InstructionSelector::VisitV128AnyTrue(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kIA32S128AnyTrue, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+}
#define VISIT_SIMD_ALLTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3111,22 +3112,120 @@ void InstructionSelector::VisitI64x2SignSelect(Node* node) {
VisitSignSelect(this, node, kIA32I64x2SignSelect);
}
+namespace {
+void VisitExtAddPairwise(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, bool need_temp) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand dst = (selector->IsSupported(AVX))
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+ if (need_temp) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, dst, operand0, arraysize(temps), temps);
+ } else {
+ selector->Emit(opcode, dst, operand0);
+ }
+}
+} // namespace
+
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4ExtAddPairwiseI16x8S);
+ VisitExtAddPairwise(this, node, kIA32I32x4ExtAddPairwiseI16x8S, true);
}
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4ExtAddPairwiseI16x8U);
+ VisitExtAddPairwise(this, node, kIA32I32x4ExtAddPairwiseI16x8U, false);
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kIA32I16x8ExtAddPairwiseI8x16S, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)));
+ VisitExtAddPairwise(this, node, kIA32I16x8ExtAddPairwiseI8x16S, true);
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
- VisitRRSimd(this, node, kIA32I16x8ExtAddPairwiseI8x16U);
+ VisitExtAddPairwise(this, node, kIA32I16x8ExtAddPairwiseI8x16U, true);
+}
+
+void InstructionSelector::VisitI8x16Popcnt(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand dst = CpuFeatures::IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
+ Emit(kIA32I8x16Popcnt, dst, g.UseUniqueRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32F64x2ConvertLowI32x4U, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ if (IsSupported(AVX)) {
+ // Requires dst != src.
+ Emit(kIA32I32x4TruncSatF64x2SZero, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
+ } else {
+ Emit(kIA32I32x4TruncSatF64x2SZero, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
+ }
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32I32x4TruncSatF64x2UZero, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitI64x2GtS(Node* node) {
+ IA32OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kIA32I64x2GtS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kIA32I64x2GtS, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kIA32I64x2GtS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2GeS(Node* node) {
+ IA32OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kIA32I64x2GeS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kIA32I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kIA32I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2Abs(Node* node) {
+ IA32OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kIA32I64x2Abs, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+ } else {
+ Emit(kIA32I64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+ }
}
// static
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 44f6d5bcbf..89cd7be864 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -23,6 +23,8 @@
#include "src/compiler/backend/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/backend/s390/instruction-codes-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/compiler/backend/riscv64/instruction-codes-riscv64.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
@@ -67,7 +69,6 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
/* Tail call opcodes are grouped together to make IsTailCall fast */ \
/* and Arch call opcodes are grouped together to make */ \
/* IsCallWithDescriptorFlags fast */ \
- V(ArchTailCallCodeObjectFromJSFunction) \
V(ArchTailCallCodeObject) \
V(ArchTailCallAddress) \
V(ArchTailCallWasm) \
@@ -281,6 +282,10 @@ using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+// LaneSizeField and AccessModeField are helper types to encode/decode a lane
+// size, an access mode, or both inside the overlapping MiscField.
+using LaneSizeField = base::BitField<int, 22, 8>;
+using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
using MiscField = base::BitField<int, 22, 10>;
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index 28195052df..99c36c923d 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -305,7 +305,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index a9b2010b7e..6571db1801 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -671,7 +671,7 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
// Returns the number of instruction operands added to inputs.
size_t InstructionSelector::AddInputsToFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
+ FrameStateDescriptor* descriptor, FrameState state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
@@ -682,15 +682,15 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
if (descriptor->outer_state()) {
entries += AddInputsToFrameStateDescriptor(
- descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
- g, deduplicator, inputs, kind, zone);
+ descriptor->outer_state(), state.outer_frame_state(), g, deduplicator,
+ inputs, kind, zone);
}
- Node* parameters = state->InputAt(kFrameStateParametersInput);
- Node* locals = state->InputAt(kFrameStateLocalsInput);
- Node* stack = state->InputAt(kFrameStateStackInput);
- Node* context = state->InputAt(kFrameStateContextInput);
- Node* function = state->InputAt(kFrameStateFunctionInput);
+ Node* parameters = state.parameters();
+ Node* locals = state.locals();
+ Node* stack = state.stack();
+ Node* context = state.context();
+ Node* function = state.function();
DCHECK_EQ(descriptor->parameters_count(),
StateValuesAccess(parameters).size());
@@ -803,7 +803,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
DeoptFrameStateOffsetField::encode(static_cast<int>(input_count));
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
cont->reason(), cont->feedback(),
- cont->frame_state());
+ FrameState{cont->frame_state()});
} else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
} else if (cont->IsTrap()) {
@@ -828,7 +828,7 @@ Instruction* InstructionSelector::EmitWithContinuation(
void InstructionSelector::AppendDeoptimizeArguments(
InstructionOperandVector* args, DeoptimizeKind kind,
DeoptimizeReason reason, FeedbackSource const& feedback,
- Node* frame_state) {
+ FrameState frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
DCHECK_NE(DeoptimizeKind::kLazy, kind);
@@ -951,18 +951,12 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
- // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
- // JS-linkage callers with a register code target. The problem is that the
- // code target register may be clobbered before the final jmp by
- // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
- // entirely remove support for tail-calls from JS-linkage callers.
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
- : is_tail_call ? g.UseUniqueRegister(callee)
- : g.UseRegister(callee));
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
@@ -1015,20 +1009,21 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
- Node* frame_state =
- call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+ FrameState frame_state{
+ call->InputAt(static_cast<int>(buffer->descriptor->InputCount()))};
// If it was a syntactic tail call we need to drop the current frame and
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
if (is_tail_call) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
+ frame_state = FrameState{NodeProperties::GetFrameStateInput(frame_state)};
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
buffer->frame_state_descriptor->type() ==
FrameStateType::kArgumentsAdaptor) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
+ frame_state =
+ FrameState{NodeProperties::GetFrameStateInput(frame_state)};
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
}
@@ -1169,8 +1164,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (!source_positions_) return true;
SourcePosition source_position = source_positions_->GetSourcePosition(node);
if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
- sequence()->SetSourcePosition(instructions_[instruction_start],
- source_position);
+ sequence()->SetSourcePosition(instructions_.back(), source_position);
}
return true;
};
@@ -1178,8 +1172,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
- if (!FinishEmittedInstructions(block->control_input(), current_block_end))
+ if (!FinishEmittedInstructions(block->control_input(), current_block_end)) {
return;
+ }
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
@@ -1288,7 +1283,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
- Node* value = input->InputAt(0);
+ FrameState value{input->InputAt(0)};
VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
break;
}
@@ -1945,6 +1940,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Trunc(node);
case IrOpcode::kF64x2NearestInt:
return MarkAsSimd128(node), VisitF64x2NearestInt(node);
+ case IrOpcode::kF64x2ConvertLowI32x4S:
+ return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4S(node);
+ case IrOpcode::kF64x2ConvertLowI32x4U:
+ return MarkAsSimd128(node), VisitF64x2ConvertLowI32x4U(node);
+ case IrOpcode::kF64x2PromoteLowF32x4:
+ return MarkAsSimd128(node), VisitF64x2PromoteLowF32x4(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -2003,6 +2004,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Trunc(node);
case IrOpcode::kF32x4NearestInt:
return MarkAsSimd128(node), VisitF32x4NearestInt(node);
+ case IrOpcode::kF32x4DemoteF64x2Zero:
+ return MarkAsSimd128(node), VisitF32x4DemoteF64x2Zero(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
@@ -2013,6 +2016,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
case IrOpcode::kI64x2ReplaceLaneI32Pair:
return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node);
+ case IrOpcode::kI64x2Abs:
+ return MarkAsSimd128(node), VisitI64x2Abs(node);
case IrOpcode::kI64x2Neg:
return MarkAsSimd128(node), VisitI64x2Neg(node);
case IrOpcode::kI64x2SConvertI32x4Low:
@@ -2037,6 +2042,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2Mul(node);
case IrOpcode::kI64x2Eq:
return MarkAsSimd128(node), VisitI64x2Eq(node);
+ case IrOpcode::kI64x2Ne:
+ return MarkAsSimd128(node), VisitI64x2Ne(node);
+ case IrOpcode::kI64x2GtS:
+ return MarkAsSimd128(node), VisitI64x2GtS(node);
+ case IrOpcode::kI64x2GeS:
+ return MarkAsSimd128(node), VisitI64x2GeS(node);
case IrOpcode::kI64x2ShrU:
return MarkAsSimd128(node), VisitI64x2ShrU(node);
case IrOpcode::kI64x2ExtMulLowI32x4S:
@@ -2123,6 +2134,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node);
+ case IrOpcode::kI32x4TruncSatF64x2SZero:
+ return MarkAsSimd128(node), VisitI32x4TruncSatF64x2SZero(node);
+ case IrOpcode::kI32x4TruncSatF64x2UZero:
+ return MarkAsSimd128(node), VisitI32x4TruncSatF64x2UZero(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2293,16 +2308,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Swizzle(node);
case IrOpcode::kI8x16Shuffle:
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
- case IrOpcode::kV32x4AnyTrue:
- return MarkAsWord32(node), VisitV32x4AnyTrue(node);
+ case IrOpcode::kV128AnyTrue:
+ return MarkAsWord32(node), VisitV128AnyTrue(node);
+ case IrOpcode::kV64x2AllTrue:
+ return MarkAsWord32(node), VisitV64x2AllTrue(node);
case IrOpcode::kV32x4AllTrue:
return MarkAsWord32(node), VisitV32x4AllTrue(node);
- case IrOpcode::kV16x8AnyTrue:
- return MarkAsWord32(node), VisitV16x8AnyTrue(node);
case IrOpcode::kV16x8AllTrue:
return MarkAsWord32(node), VisitV16x8AllTrue(node);
- case IrOpcode::kV8x16AnyTrue:
- return MarkAsWord32(node), VisitV8x16AnyTrue(node);
case IrOpcode::kV8x16AllTrue:
return MarkAsWord32(node), VisitV8x16AllTrue(node);
default:
@@ -2689,7 +2702,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2714,7 +2727,8 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
- // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
+ // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
+ // !V8_TARGET_ARCH_RISCV64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
@@ -2740,73 +2754,13 @@ void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_MIPS
-void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_MIPS
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM
-// TODO(v8:10972) Prototype i64x2 widen i32x4.
-void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI64x2SConvertI32x4High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI64x2UConvertI32x4Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM64 || !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
- // && !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_ARM64
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
// TODO(v8:11168): Prototyping prefetch.
void InstructionSelector::VisitPrefetchTemporal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM64
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
-// TODO(v8:11002) Prototype i8x16.popcnt.
-void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32
-// TODO(v8:11086) Prototype extended pairwise add.
-void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
- UNIMPLEMENTED();
-}
-void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
-// TODO(v8:10975): Prototyping load lane and store lane.
-void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 || !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
!V8_TARGET_ARCH_ARM
@@ -2938,8 +2892,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (call_descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(call_descriptor->InputCount())));
+ frame_state_descriptor = GetFrameStateDescriptor(FrameState{
+ node->InputAt(static_cast<int>(call_descriptor->InputCount()))});
}
CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
@@ -3040,32 +2994,20 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
InstructionOperandVector temps(zone());
- if (caller->IsJSFunctionCall()) {
- switch (call_descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObjectFromJSFunction;
- break;
- default:
- UNREACHABLE();
- }
- int temps_count = GetTempsCountForTailCallFromJSFunction();
- for (int i = 0; i < temps_count; i++) {
- temps.push_back(g.TempRegister());
- }
- } else {
- switch (call_descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallAddress:
- opcode = kArchTailCallAddress;
- break;
- case CallDescriptor::kCallWasmFunction:
- opcode = kArchTailCallWasm;
- break;
- default:
- UNREACHABLE();
- }
+ switch (call_descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallAddress:
+ DCHECK(!caller->IsJSFunctionCall());
+ opcode = kArchTailCallAddress;
+ break;
+ case CallDescriptor::kCallWasmFunction:
+ DCHECK(!caller->IsJSFunctionCall());
+ opcode = kArchTailCallWasm;
+ break;
+ default:
+ UNREACHABLE();
}
opcode = EncodeCallDescriptorFlags(opcode, call_descriptor->flags());
@@ -3202,7 +3144,7 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* frame_state) {
+ FrameState frame_state) {
InstructionOperandVector args(instruction_zone());
AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
Emit(kArchDeoptimize, 0, nullptr, args.size(), &args.front(), 0, nullptr);
@@ -3318,18 +3260,28 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node,
namespace {
-FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
+FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone,
+ FrameState state) {
DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
- DCHECK_EQ(kFrameStateInputCount, state->InputCount());
+ DCHECK_EQ(FrameState::kFrameStateInputCount, state->InputCount());
const FrameStateInfo& state_info = FrameStateInfoOf(state->op());
int parameters = state_info.parameter_count();
int locals = state_info.local_count();
- int stack = state_info.type() == FrameStateType::kInterpretedFunction ? 1 : 0;
+ int stack = state_info.type() == FrameStateType::kUnoptimizedFunction ? 1 : 0;
FrameStateDescriptor* outer_state = nullptr;
- Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
- if (outer_node->opcode() == IrOpcode::kFrameState) {
- outer_state = GetFrameStateDescriptorInternal(zone, outer_node);
+ if (state.has_outer_frame_state()) {
+ outer_state =
+ GetFrameStateDescriptorInternal(zone, state.outer_frame_state());
+ }
+
+ if (state_info.type() == FrameStateType::kJSToWasmBuiltinContinuation) {
+ auto function_info = static_cast<const JSToWasmFrameStateFunctionInfo*>(
+ state_info.function_info());
+ return zone->New<JSToWasmFrameStateDescriptor>(
+ zone, state_info.type(), state_info.bailout_id(),
+ state_info.state_combine(), parameters, locals, stack,
+ state_info.shared_info(), outer_state, function_info->signature());
}
return zone->New<FrameStateDescriptor>(
@@ -3341,7 +3293,7 @@ FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
} // namespace
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
- Node* state) {
+ FrameState state) {
auto* desc = GetFrameStateDescriptorInternal(instruction_zone(), state);
*max_unoptimized_frame_height_ =
std::max(*max_unoptimized_frame_height_,
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 18bc4ccfcb..4a65b5193e 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -491,7 +491,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void AppendDeoptimizeArguments(InstructionOperandVector* args,
DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* frame_state);
+ FrameState frame_state);
void EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand const& index_operand);
@@ -561,13 +561,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
CallBufferFlags flags, bool is_tail_call,
int stack_slot_delta = 0);
bool IsTailCallAddressImmediate();
- int GetTempsCountForTailCallFromJSFunction();
void UpdateMaxPushedArgumentCount(size_t count);
- FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+ FrameStateDescriptor* GetFrameStateDescriptor(FrameState node);
size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
- Node* state, OperandGenerator* g,
+ FrameState state, OperandGenerator* g,
StateObjectDeduplicator* deduplicator,
InstructionOperandVector* inputs,
FrameStateInputKind kind, Zone* zone);
@@ -628,7 +627,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* frame_state);
+ FeedbackSource const& feedback, FrameState frame_state);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index e1e54c9d9f..a14ae2a702 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -14,8 +14,10 @@
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
@@ -999,31 +1001,32 @@ namespace {
size_t GetConservativeFrameSizeInBytes(FrameStateType type,
size_t parameters_count,
size_t locals_count,
- BailoutId bailout_id) {
+ BytecodeOffset bailout_id) {
switch (type) {
- case FrameStateType::kInterpretedFunction: {
- auto info = InterpretedFrameInfo::Conservative(
+ case FrameStateType::kUnoptimizedFunction: {
+ auto info = UnoptimizedFrameInfo::Conservative(
static_cast<int>(parameters_count), static_cast<int>(locals_count));
return info.frame_size_in_bytes();
}
- case FrameStateType::kArgumentsAdaptor: {
- auto info = ArgumentsAdaptorFrameInfo::Conservative(
- static_cast<int>(parameters_count));
- return info.frame_size_in_bytes();
- }
+ case FrameStateType::kArgumentsAdaptor:
+ // The arguments adaptor frame state is only used in the deoptimizer and
+ // does not occupy any extra space in the stack. Check out the design doc:
+ // https://docs.google.com/document/d/150wGaUREaZI6YWqOQFD5l2mWQXaPbbZjcAIJLOFrzMs/edit
+ return 0;
case FrameStateType::kConstructStub: {
auto info = ConstructStubFrameInfo::Conservative(
static_cast<int>(parameters_count));
return info.frame_size_in_bytes();
}
case FrameStateType::kBuiltinContinuation:
+ case FrameStateType::kJSToWasmBuiltinContinuation:
case FrameStateType::kJavaScriptBuiltinContinuation:
case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
const RegisterConfiguration* config = RegisterConfiguration::Default();
auto info = BuiltinContinuationFrameInfo::Conservative(
static_cast<int>(parameters_count),
Builtins::CallInterfaceDescriptorFor(
- Builtins::GetBuiltinFromBailoutId(bailout_id)),
+ Builtins::GetBuiltinFromBytecodeOffset(bailout_id)),
config);
return info.frame_size_in_bytes();
}
@@ -1034,7 +1037,7 @@ size_t GetConservativeFrameSizeInBytes(FrameStateType type,
size_t GetTotalConservativeFrameSizeInBytes(FrameStateType type,
size_t parameters_count,
size_t locals_count,
- BailoutId bailout_id,
+ BytecodeOffset bailout_id,
FrameStateDescriptor* outer_state) {
size_t outer_total_conservative_frame_size_in_bytes =
(outer_state == nullptr)
@@ -1048,7 +1051,7 @@ size_t GetTotalConservativeFrameSizeInBytes(FrameStateType type,
} // namespace
FrameStateDescriptor::FrameStateDescriptor(
- Zone* zone, FrameStateType type, BailoutId bailout_id,
+ Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine, size_t parameters_count,
size_t locals_count, size_t stack_count,
MaybeHandle<SharedFunctionInfo> shared_info,
@@ -1068,9 +1071,10 @@ FrameStateDescriptor::FrameStateDescriptor(
size_t FrameStateDescriptor::GetHeight() const {
switch (type()) {
- case FrameStateType::kInterpretedFunction:
+ case FrameStateType::kUnoptimizedFunction:
return locals_count(); // The accumulator is *not* included.
case FrameStateType::kBuiltinContinuation:
+ case FrameStateType::kJSToWasmBuiltinContinuation:
// Custom, non-JS calling convention (that does not have a notion of
// a receiver or context).
return parameters_count();
@@ -1122,6 +1126,17 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
return count;
}
+JSToWasmFrameStateDescriptor::JSToWasmFrameStateDescriptor(
+ Zone* zone, FrameStateType type, BytecodeOffset bailout_id,
+ OutputFrameStateCombine state_combine, size_t parameters_count,
+ size_t locals_count, size_t stack_count,
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state, const wasm::FunctionSig* wasm_signature)
+ : FrameStateDescriptor(zone, type, bailout_id, state_combine,
+ parameters_count, locals_count, stack_count,
+ shared_info, outer_state),
+ return_type_(wasm::WasmReturnTypeFromSignature(wasm_signature)) {}
+
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
return os << rpo.ToSize();
}
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 55fce0aeeb..9aa808491a 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1300,7 +1300,8 @@ class StateValueList {
class FrameStateDescriptor : public ZoneObject {
public:
- FrameStateDescriptor(Zone* zone, FrameStateType type, BailoutId bailout_id,
+ FrameStateDescriptor(Zone* zone, FrameStateType type,
+ BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine,
size_t parameters_count, size_t locals_count,
size_t stack_count,
@@ -1308,7 +1309,7 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* outer_state = nullptr);
FrameStateType type() const { return type_; }
- BailoutId bailout_id() const { return bailout_id_; }
+ BytecodeOffset bailout_id() const { return bailout_id_; }
OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
size_t parameters_count() const { return parameters_count_; }
size_t locals_count() const { return locals_count_; }
@@ -1318,6 +1319,7 @@ class FrameStateDescriptor : public ZoneObject {
bool HasContext() const {
return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
type_ == FrameStateType::kBuiltinContinuation ||
+ type_ == FrameStateType::kJSToWasmBuiltinContinuation ||
type_ == FrameStateType::kConstructStub;
}
@@ -1346,7 +1348,7 @@ class FrameStateDescriptor : public ZoneObject {
private:
FrameStateType type_;
- BailoutId bailout_id_;
+ BytecodeOffset bailout_id_;
OutputFrameStateCombine frame_state_combine_;
const size_t parameters_count_;
const size_t locals_count_;
@@ -1357,6 +1359,23 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* const outer_state_;
};
+class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
+ public:
+ JSToWasmFrameStateDescriptor(Zone* zone, FrameStateType type,
+ BytecodeOffset bailout_id,
+ OutputFrameStateCombine state_combine,
+ size_t parameters_count, size_t locals_count,
+ size_t stack_count,
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state,
+ const wasm::FunctionSig* wasm_signature);
+
+ base::Optional<wasm::ValueKind> return_type() const { return return_type_; }
+
+ private:
+ base::Optional<wasm::ValueKind> return_type_;
+};
+
// A deoptimization entry is a pair of the reason why we deoptimize and the
// frame state descriptor that we have to go back to.
class DeoptimizationEntry final {
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index 43808526a8..e84f0d9439 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -263,8 +263,8 @@ class DeferredBlocksRegion final {
// a spill slot until we enter this deferred block region.
void DeferSpillOutputUntilEntry(int vreg) { spilled_vregs_.insert(vreg); }
- ZoneSet<int>::iterator begin() const { return spilled_vregs_.begin(); }
- ZoneSet<int>::iterator end() const { return spilled_vregs_.end(); }
+ ZoneSet<int>::const_iterator begin() const { return spilled_vregs_.begin(); }
+ ZoneSet<int>::const_iterator end() const { return spilled_vregs_.end(); }
const BitVector* blocks_covered() const { return &blocks_covered_; }
@@ -295,17 +295,18 @@ class VirtualRegisterData final {
// Spill an operand that is assigned to this virtual register.
void SpillOperand(InstructionOperand* operand, int instr_index,
+ bool has_constant_policy,
MidTierRegisterAllocationData* data);
// Emit gap moves to / from the spill slot.
- void EmitGapMoveToInputFromSpillSlot(AllocatedOperand to_operand,
+ void EmitGapMoveToInputFromSpillSlot(InstructionOperand to_operand,
int instr_index,
MidTierRegisterAllocationData* data);
- void EmitGapMoveFromOutputToSpillSlot(AllocatedOperand from_operand,
+ void EmitGapMoveFromOutputToSpillSlot(InstructionOperand from_operand,
const InstructionBlock* current_block,
int instr_index,
MidTierRegisterAllocationData* data);
- void EmitGapMoveToSpillSlot(AllocatedOperand from_operand, int instr_index,
+ void EmitGapMoveToSpillSlot(InstructionOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data);
// Adds pending spills for deferred-blocks.
@@ -328,14 +329,14 @@ class VirtualRegisterData final {
return HasSpillOperand() && spill_operand_->IsAllocated();
}
bool HasConstantSpillOperand() const {
- DCHECK_EQ(is_constant(), HasSpillOperand() && spill_operand_->IsConstant());
- return is_constant();
+ return HasSpillOperand() && spill_operand_->IsConstant();
}
// Returns true if the virtual register should be spilled when it is output.
bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
+
void MarkAsNeedsSpillAtOutput() {
- if (is_constant()) return;
+ if (HasConstantSpillOperand()) return;
needs_spill_at_output_ = true;
if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
}
@@ -548,7 +549,8 @@ void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
void VirtualRegisterData::EnsureSpillRange(
MidTierRegisterAllocationData* data) {
- DCHECK(!is_constant());
+ DCHECK(!HasConstantSpillOperand());
+
if (HasSpillRange()) return;
const InstructionBlock* definition_block =
@@ -578,13 +580,15 @@ void VirtualRegisterData::EnsureSpillRange(
void VirtualRegisterData::AddSpillUse(int instr_index,
MidTierRegisterAllocationData* data) {
- if (is_constant()) return;
+ if (HasConstantSpillOperand()) return;
EnsureSpillRange(data);
spill_range_->ExtendRangeTo(instr_index);
const InstructionBlock* block = data->GetBlock(instr_index);
if (CouldSpillOnEntryToDeferred(block)) {
+ // TODO(1180335): Remove once crbug.com/1180335 is fixed.
+ CHECK(HasSpillRange());
data->block_state(block->rpo_number())
.deferred_blocks_region()
->DeferSpillOutputUntilEntry(vreg());
@@ -610,12 +614,21 @@ void VirtualRegisterData::AddDeferredSpillOutput(
AllocatedOperand allocated_op, int instr_index,
MidTierRegisterAllocationData* data) {
DCHECK(!NeedsSpillAtOutput());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(HasSpillRange());
spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
}
void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
int instr_index,
+ bool has_constant_policy,
MidTierRegisterAllocationData* data) {
+ if (!has_constant_policy && HasConstantSpillOperand()) {
+ // Reset the constant spill operand to force a real spill slot since this
+ // operand can't use the constant spill operand.
+ spill_operand_ = nullptr;
+ DCHECK(!HasConstantSpillOperand());
+ }
AddSpillUse(instr_index, data);
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
InstructionOperand::ReplaceWith(operand, spill_operand());
@@ -640,7 +653,7 @@ void VirtualRegisterData::EmitDeferredSpillOutputs(
}
void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
- AllocatedOperand to_operand, int instr_index,
+ InstructionOperand to_operand, int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
DCHECK(!to_operand.IsPending());
@@ -656,7 +669,7 @@ void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
}
void VirtualRegisterData::EmitGapMoveToSpillSlot(
- AllocatedOperand from_operand, int instr_index,
+ InstructionOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
@@ -671,7 +684,7 @@ void VirtualRegisterData::EmitGapMoveToSpillSlot(
}
void VirtualRegisterData::EmitGapMoveFromOutputToSpillSlot(
- AllocatedOperand from_operand, const InstructionBlock* current_block,
+ InstructionOperand from_operand, const InstructionBlock* current_block,
int instr_index, MidTierRegisterAllocationData* data) {
DCHECK_EQ(data->GetBlock(instr_index), current_block);
if (instr_index == current_block->last_instruction_index()) {
@@ -760,7 +773,8 @@ class RegisterState final : public ZoneObject {
// this register, then |operand| will be too, otherwise |operand| will be
// replaced with |virtual_register|'s spill operand.
void AllocatePendingUse(RegisterIndex reg, int virtual_register,
- InstructionOperand* operand, int instr_index);
+ InstructionOperand* operand, bool can_be_constant,
+ int instr_index);
// Mark that the register is holding a phi operand that is yet to be allocated
// by the source block in the gap just before the last instruction in the
@@ -816,7 +830,7 @@ class RegisterState final : public ZoneObject {
MidTierRegisterAllocationData* data);
void Use(int virtual_register, int instr_index);
void PendingUse(InstructionOperand* operand, int virtual_register,
- int instr_index);
+ bool can_be_constant, int instr_index);
void SpillForDeferred(AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data);
void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
@@ -881,6 +895,7 @@ class RegisterState final : public ZoneObject {
bool needs_gap_move_on_spill_;
bool is_shared_;
bool is_phi_gap_move_;
+ bool pending_uses_can_use_constant_;
int last_use_instr_index_;
int num_commits_required_;
@@ -910,6 +925,7 @@ void RegisterState::Register::Reset() {
is_shared_ = false;
is_phi_gap_move_ = false;
needs_gap_move_on_spill_ = false;
+ pending_uses_can_use_constant_ = true;
last_use_instr_index_ = -1;
num_commits_required_ = 0;
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
@@ -921,7 +937,9 @@ void RegisterState::Register::Use(int virtual_register, int instr_index) {
// A register can have many pending uses, but should only ever have a single
// non-pending use, since any subsiquent use will commit the preceeding use
// first.
- DCHECK(!is_allocated());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!is_allocated());
+ CHECK(!is_shared());
needs_gap_move_on_spill_ = true;
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -930,13 +948,17 @@ void RegisterState::Register::Use(int virtual_register, int instr_index) {
void RegisterState::Register::PendingUse(InstructionOperand* operand,
int virtual_register,
+ bool can_be_constant,
int instr_index) {
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!was_spilled_while_shared());
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
num_commits_required_ = 1;
}
DCHECK_EQ(virtual_register_, virtual_register);
+ pending_uses_can_use_constant_ &= can_be_constant;
PendingOperand pending_op(pending_uses());
InstructionOperand::ReplaceWith(operand, &pending_op);
@@ -950,7 +972,8 @@ void RegisterState::Register::MarkAsPhiMove() {
void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
bool on_exit, Zone* zone) {
- DCHECK(is_allocated());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_allocated());
if (!deferred_block_spills_) {
deferred_block_spills_.emplace(zone);
}
@@ -958,23 +981,27 @@ void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
}
void RegisterState::Register::AddSharedUses(int shared_use_count) {
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!was_spilled_while_shared());
is_shared_ = true;
num_commits_required_ += shared_use_count;
}
void RegisterState::Register::CommitAtMerge() {
- DCHECK(is_shared());
- DCHECK(is_allocated());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_shared());
+ CHECK(is_allocated());
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
- DCHECK_GT(num_commits_required_, 0);
+ CHECK_GT(num_commits_required_, 0);
}
void RegisterState::Register::Commit(AllocatedOperand allocated_op,
MidTierRegisterAllocationData* data) {
- DCHECK(is_allocated());
- DCHECK_GT(num_commits_required_, 0);
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_allocated());
+ CHECK_GT(num_commits_required_, 0);
if (--num_commits_required_ == 0) {
// Allocate all pending uses to |allocated_op| if this commit is non-shared,
@@ -1011,7 +1038,8 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op,
vreg_data.EmitDeferredSpillOutputs(data);
}
}
- DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK_IMPLIES(num_commits_required_ > 0, is_shared());
}
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
@@ -1030,7 +1058,12 @@ void RegisterState::Register::Spill(AllocatedOperand allocated_op,
if (has_deferred_block_spills() || !current_block->IsDeferred()) {
vreg_data.MarkAsNeedsSpillAtOutput();
}
- virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
+ // TODO(1180335): Doing a full reset here shouldn't be necessary, but
+ // investigate if it fixes crbug.com/1180335.
+ bool is_shared = is_shared_;
+ Reset();
+ is_shared_ = is_shared;
+ CHECK_IMPLIES(is_shared_, was_spilled_while_shared());
}
void RegisterState::Register::SpillPhiGapMove(
@@ -1063,7 +1096,8 @@ void RegisterState::Register::SpillPendingUses(
while (pending_use) {
// Spill all the pending operands associated with this register.
PendingOperand* next = pending_use->next();
- vreg_data.SpillOperand(pending_use, last_use_instr_index(), data);
+ vreg_data.SpillOperand(pending_use, last_use_instr_index(),
+ pending_uses_can_use_constant_, data);
pending_use = next;
}
pending_uses_ = nullptr;
@@ -1072,8 +1106,9 @@ void RegisterState::Register::SpillPendingUses(
void RegisterState::Register::SpillForDeferred(
AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data) {
- DCHECK(is_allocated());
- DCHECK(is_shared());
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(is_allocated());
+ CHECK(is_shared());
// Add a pending deferred spill, then commit the register (with the commit
// being fullfilled by the deferred spill if the register is fully commited).
data->VirtualRegisterDataFor(virtual_register())
@@ -1085,6 +1120,8 @@ void RegisterState::Register::SpillForDeferred(
void RegisterState::Register::MoveToSpillSlotOnDeferred(
int virtual_register, int instr_index,
MidTierRegisterAllocationData* data) {
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(!was_spilled_while_shared());
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -1158,9 +1195,10 @@ void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
void RegisterState::AllocatePendingUse(RegisterIndex reg, int virtual_register,
InstructionOperand* operand,
- int instr_index) {
+ bool can_be_constant, int instr_index) {
EnsureRegisterData(reg);
- reg_data(reg).PendingUse(operand, virtual_register, instr_index);
+ reg_data(reg).PendingUse(operand, virtual_register, can_be_constant,
+ instr_index);
}
void RegisterState::UseForPhiGapMove(RegisterIndex reg) {
@@ -1297,7 +1335,7 @@ class SinglePassRegisterAllocator final {
// Allocation routines used to allocate a particular operand to either a
// register or a spill slot.
- void AllocateConstantOutput(ConstantOperand* operand);
+ void AllocateConstantOutput(ConstantOperand* operand, int instr_index);
void AllocateOutput(UnallocatedOperand* operand, int instr_index);
void AllocateInput(UnallocatedOperand* operand, int instr_index);
void AllocateSameInputOutput(UnallocatedOperand* output,
@@ -1387,7 +1425,8 @@ class SinglePassRegisterAllocator final {
// register is not subsequently spilled) for |operand| of the instruction at
// |instr_index|.
void AllocatePendingUse(RegisterIndex reg, int virtual_register,
- InstructionOperand* operand, int instr_index);
+ InstructionOperand* operand, bool can_be_constant,
+ int instr_index);
// Allocate |operand| to |reg| and add a gap move to move |virtual_register|
// to this register for the instruction at |instr_index|. |reg| will be
@@ -1498,6 +1537,7 @@ class SinglePassRegisterAllocator final {
RegisterBitVector in_use_at_instr_start_bits_;
RegisterBitVector in_use_at_instr_end_bits_;
RegisterBitVector allocated_registers_bits_;
+ RegisterBitVector same_input_output_registers_bits_;
// These fields are only used when kSimpleFPAliasing == false.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
@@ -1523,7 +1563,8 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
data_(data),
in_use_at_instr_start_bits_(),
in_use_at_instr_end_bits_(),
- allocated_registers_bits_() {
+ allocated_registers_bits_(),
+ same_input_output_registers_bits_() {
for (int i = 0; i < num_allocatable_registers_; i++) {
int reg_code = index_to_reg_code_[i];
reg_code_to_index_[reg_code] = RegisterIndex(i);
@@ -1591,6 +1632,7 @@ void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
void SinglePassRegisterAllocator::EndInstruction() {
in_use_at_instr_end_bits_.Reset();
in_use_at_instr_start_bits_.Reset();
+ same_input_output_registers_bits_.Reset();
}
void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
@@ -1599,6 +1641,7 @@ void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
DCHECK(in_use_at_instr_start_bits_.IsEmpty());
DCHECK(in_use_at_instr_end_bits_.IsEmpty());
DCHECK(allocated_registers_bits_.IsEmpty());
+ DCHECK(same_input_output_registers_bits_.IsEmpty());
// Update the current block we are processing.
current_block_ = block;
@@ -1617,6 +1660,7 @@ void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
DCHECK(in_use_at_instr_start_bits_.IsEmpty());
DCHECK(in_use_at_instr_end_bits_.IsEmpty());
+ DCHECK(same_input_output_registers_bits_.IsEmpty());
// If we didn't allocate any registers of this kind, or we have reached the
// start, nothing to do here.
@@ -1766,7 +1810,8 @@ void SinglePassRegisterAllocator::MoveRegisterOnMerge(
data()->AddPendingOperandGapMove(instr_index, Instruction::START);
succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register),
&move->destination(), data());
- AllocatePendingUse(from, virtual_register, &move->source(), instr_index);
+ AllocatePendingUse(from, virtual_register, &move->source(), true,
+ instr_index);
}
void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
@@ -1903,6 +1948,9 @@ void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
bool must_use_register) {
+ DCHECK_NE(pos, UsePosition::kNone);
+ MachineRepresentation rep = RepresentationFor(virtual_register.vreg());
+
// If register is already allocated to the virtual register, use that.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
@@ -1910,14 +1958,24 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
// register hasn't yet been spilled, to try to avoid spilling it.
if (!reg.is_valid() && (must_use_register ||
!virtual_register.IsSpilledAt(instr_index, data()))) {
- reg = ChooseRegisterFor(RepresentationFor(virtual_register.vreg()), pos,
- must_use_register);
+ reg = ChooseRegisterFor(rep, pos, must_use_register);
+ } else if (reg.is_valid() &&
+ same_input_output_registers_bits_.Contains(reg, rep) &&
+ pos != UsePosition::kStart) {
+ // If we are trying to allocate a register that was used as a
+ // same_input_output operand, then we can't use it for an input that expands
+ // past UsePosition::kStart. This should only happen for REGISTER_OR_SLOT
+ // operands that are used for the deopt state, so we can just use a spill
+ // slot.
+ CHECK(!must_use_register);
+ return RegisterIndex::Invalid();
}
return reg;
}
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
MachineRepresentation rep, UsePosition pos, bool must_use_register) {
+ DCHECK_NE(pos, UsePosition::kNone);
RegisterIndex reg = ChooseFreeRegister(rep, pos);
if (!reg.is_valid() && must_use_register) {
reg = ChooseRegisterToSpill(rep, pos);
@@ -2082,6 +2140,8 @@ void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
VirtualRegisterData& vreg_data =
data()->VirtualRegisterDataFor(virtual_register);
+ // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
+ CHECK(vreg_data.HasSpillRange());
if (!vreg_data.NeedsSpillAtOutput() &&
!DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
// If a register has been assigned to the virtual register, and the virtual
@@ -2127,12 +2187,12 @@ void SinglePassRegisterAllocator::AllocateUse(RegisterIndex reg,
void SinglePassRegisterAllocator::AllocatePendingUse(
RegisterIndex reg, int virtual_register, InstructionOperand* operand,
- int instr_index) {
+ bool can_be_constant, int instr_index) {
DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register));
register_state()->AllocatePendingUse(reg, virtual_register, operand,
- instr_index);
+ can_be_constant, instr_index);
// Since this is a pending use and the operand doesn't need to use a register,
// allocate with UsePosition::kNone to avoid blocking it's use by other
// operands in this instruction.
@@ -2145,7 +2205,7 @@ void SinglePassRegisterAllocator::AllocateUseWithMove(
int instr_index, UsePosition pos) {
AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register);
UnallocatedOperand from = UnallocatedOperand(
- UnallocatedOperand::REGISTER_OR_SLOT, virtual_register);
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register);
data()->AddGapMove(instr_index, Instruction::END, from, to);
InstructionOperand::ReplaceWith(operand, &to);
MarkRegisterUse(reg, RepresentationFor(virtual_register), pos);
@@ -2169,17 +2229,17 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
// instruction since the allocation needs to reflect the state before
// the instruction (at the gap move). For now spilling is fine since
// fixed slot inputs are uncommon.
- UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
- virtual_register);
+ UnallocatedOperand input_copy(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, virtual_register);
AllocatedOperand allocated = AllocatedOperand(
AllocatedOperand::STACK_SLOT, rep, operand->fixed_slot_index());
InstructionOperand::ReplaceWith(operand, &allocated);
MoveOperands* move_op =
data()->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
- vreg_data.SpillOperand(&move_op->source(), instr_index, data());
+ vreg_data.SpillOperand(&move_op->source(), instr_index, true, data());
return;
} else if (operand->HasSlotPolicy()) {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index, false, data());
return;
}
@@ -2199,9 +2259,7 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
AllocateUse(reg, virtual_register, operand, instr_index, pos);
}
} else {
- bool must_use_register = operand->HasRegisterPolicy() ||
- (vreg_data.is_constant() &&
- !operand->HasRegisterOrSlotOrConstantPolicy());
+ bool must_use_register = operand->HasRegisterPolicy();
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
@@ -2209,10 +2267,14 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
if (must_use_register) {
AllocateUse(reg, virtual_register, operand, instr_index, pos);
} else {
- AllocatePendingUse(reg, virtual_register, operand, instr_index);
+ AllocatePendingUse(reg, virtual_register, operand,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ instr_index);
}
} else {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ data());
}
}
}
@@ -2224,23 +2286,28 @@ void SinglePassRegisterAllocator::AllocateGapMoveInput(
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
// Gap move inputs should be unconstrained.
- DCHECK(operand->HasRegisterOrSlotPolicy());
+ DCHECK(operand->HasRegisterOrSlotOrConstantPolicy());
RegisterIndex reg =
ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
if (reg.is_valid()) {
- AllocatePendingUse(reg, virtual_register, operand, instr_index);
+ AllocatePendingUse(reg, virtual_register, operand, true, instr_index);
} else {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index, true, data());
}
}
void SinglePassRegisterAllocator::AllocateConstantOutput(
- ConstantOperand* operand) {
+ ConstantOperand* operand, int instr_index) {
EnsureRegisterState();
// If the constant is allocated to a register, spill it now to add the
// necessary gap moves from the constant operand to the register.
int virtual_register = operand->virtual_register();
+ VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
SpillRegisterForVirtualRegister(virtual_register);
+ if (vreg_data.NeedsSpillAtOutput()) {
+ vreg_data.EmitGapMoveFromOutputToSpillSlot(*operand, current_block(),
+ instr_index, data());
+ }
}
void SinglePassRegisterAllocator::AllocateOutput(UnallocatedOperand* operand,
@@ -2270,7 +2337,7 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
// TODO(rmcilroy): support secondary storage.
if (!reg.is_valid()) {
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index, false, data());
} else {
InstructionOperand move_output_to;
if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
@@ -2323,6 +2390,7 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
MachineRepresentation rep = RepresentationFor(input_vreg);
UnallocatedOperand fixed_input(policy, ToRegCode(reg, rep), input_vreg);
InstructionOperand::ReplaceWith(input, &fixed_input);
+ same_input_output_registers_bits_.Add(reg, rep);
} else {
// Output was spilled. Due to the SameAsInput allocation policy, we need to
// make the input operand the same as the output, i.e., the output virtual
@@ -2330,14 +2398,14 @@ void SinglePassRegisterAllocator::AllocateSameInputOutput(
// virtual register's spill slot, then add a gap-move to move the input
// value into this spill slot.
VirtualRegisterData& output_vreg_data = VirtualRegisterDataFor(output_vreg);
- output_vreg_data.SpillOperand(input, instr_index, data());
+ output_vreg_data.SpillOperand(input, instr_index, false, data());
// Add an unconstrained gap move for the input virtual register.
- UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
- input_vreg);
+ UnallocatedOperand unconstrained_input(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, input_vreg);
MoveOperands* move_ops = data()->AddGapMove(
instr_index, Instruction::END, unconstrained_input, PendingOperand());
- output_vreg_data.SpillOperand(&move_ops->destination(), instr_index,
+ output_vreg_data.SpillOperand(&move_ops->destination(), instr_index, true,
data());
}
}
@@ -2365,7 +2433,9 @@ void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand,
CommitRegister(reg, virtual_register, operand, UsePosition::kAll);
} else {
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
- vreg_data.SpillOperand(operand, instr_index, data());
+ vreg_data.SpillOperand(operand, instr_index,
+ operand->HasRegisterOrSlotOrConstantPolicy(),
+ data());
}
}
@@ -2444,12 +2514,12 @@ void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
CommitRegister(to_register, to_vreg, to_operand, UsePosition::kAll);
} else {
VirtualRegisterDataFor(to_vreg).SpillOperand(to_operand, instr_index,
- data());
+ true, data());
}
// The from side is unconstrained.
- UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
- from_vreg);
+ UnallocatedOperand unconstrained_input(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, from_vreg);
InstructionOperand::ReplaceWith(from_operand, &unconstrained_input);
}
}
@@ -2729,9 +2799,8 @@ void MidTierRegisterAllocator::AllocateRegisters(
for (RpoNumber successor : block->successors()) {
if (!data()->GetBlock(successor)->IsDeferred()) continue;
DCHECK_GT(successor, block_rpo);
- for (int virtual_register :
+ for (const int virtual_register :
*data()->block_state(successor).deferred_blocks_region()) {
- USE(virtual_register);
AllocatorFor(RepresentationFor(virtual_register))
.AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
successor, virtual_register);
@@ -2756,7 +2825,8 @@ void MidTierRegisterAllocator::AllocateRegisters(
DCHECK(!output->IsAllocated());
if (output->IsConstant()) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
- AllocatorFor(constant_operand).AllocateConstantOutput(constant_operand);
+ AllocatorFor(constant_operand)
+ .AllocateConstantOutput(constant_operand, instr_index);
} else {
UnallocatedOperand* unallocated_output =
UnallocatedOperand::cast(output);
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 08f8ef7d07..79e8836bd0 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -305,8 +305,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
@@ -546,30 +545,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
}
frame_access_state()->SetFrameAccessToSP();
}
-
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Branch(&done, ne, scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ lw(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -694,13 +669,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -821,7 +790,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
@@ -2075,6 +2044,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI32x4ExtAddPairwiseI16x8S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4ExtAddPairwiseI16x8U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsF64x2Abs: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63);
@@ -2233,6 +2214,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ctcmsa(MSACSR, kScratchReg);
break;
}
+ case kMipsF64x2ConvertLowI32x4S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMipsF64x2ConvertLowI32x4U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMipsF64x2PromoteLowF32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMipsI64x2Add: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2295,6 +2297,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI64x2Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kMipsI64x2GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI64x2GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI64x2Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ adds_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
+ case kMipsI64x2SConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMipsI64x2SConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMipsI64x2UConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI64x2UConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsI64x2ExtMulLowI32x4S:
ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d);
break;
@@ -2364,6 +2425,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kMipsF32x4DemoteF64x2Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsI32x4Mul: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2648,6 +2716,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI32x4TruncSatF64x2SZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI32x4TruncSatF64x2UZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2838,6 +2924,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsI16x8ExtAddPairwiseI8x16S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8ExtAddPairwiseI8x16U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ hadd_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
case kMipsI8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3005,6 +3103,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI8x16Popcnt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMipsI8x16BitMask: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -3047,9 +3150,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMipsV32x4AnyTrue:
- case kMipsV16x8AnyTrue:
- case kMipsV8x16AnyTrue: {
+ case kMipsV128AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3061,6 +3162,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
+ case kMipsV64x2AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
case kMipsV32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -3769,7 +3881,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4059,7 +4171,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4130,7 +4242,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Operand(static_cast<int64_t>(0)));
}
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4138,9 +4249,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 6bc14ca317..2048cbfe40 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -160,6 +160,9 @@ namespace compiler {
V(MipsF64x2Floor) \
V(MipsF64x2Trunc) \
V(MipsF64x2NearestInt) \
+ V(MipsF64x2ConvertLowI32x4S) \
+ V(MipsF64x2ConvertLowI32x4U) \
+ V(MipsF64x2PromoteLowF32x4) \
V(MipsI64x2Add) \
V(MipsI64x2Sub) \
V(MipsI64x2Mul) \
@@ -169,6 +172,14 @@ namespace compiler {
V(MipsI64x2ShrU) \
V(MipsI64x2BitMask) \
V(MipsI64x2Eq) \
+ V(MipsI64x2Ne) \
+ V(MipsI64x2GtS) \
+ V(MipsI64x2GeS) \
+ V(MipsI64x2Abs) \
+ V(MipsI64x2SConvertI32x4Low) \
+ V(MipsI64x2SConvertI32x4High) \
+ V(MipsI64x2UConvertI32x4Low) \
+ V(MipsI64x2UConvertI32x4High) \
V(MipsI64x2ExtMulLowI32x4S) \
V(MipsI64x2ExtMulHighI32x4S) \
V(MipsI64x2ExtMulLowI32x4U) \
@@ -178,6 +189,7 @@ namespace compiler {
V(MipsF32x4ReplaceLane) \
V(MipsF32x4SConvertI32x4) \
V(MipsF32x4UConvertI32x4) \
+ V(MipsF32x4DemoteF64x2Zero) \
V(MipsI32x4Mul) \
V(MipsI32x4MaxS) \
V(MipsI32x4MinS) \
@@ -227,6 +239,10 @@ namespace compiler {
V(MipsI32x4ExtMulHighI16x8S) \
V(MipsI32x4ExtMulLowI16x8U) \
V(MipsI32x4ExtMulHighI16x8U) \
+ V(MipsI32x4TruncSatF64x2SZero) \
+ V(MipsI32x4TruncSatF64x2UZero) \
+ V(MipsI32x4ExtAddPairwiseI16x8S) \
+ V(MipsI32x4ExtAddPairwiseI16x8U) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -261,6 +277,8 @@ namespace compiler {
V(MipsI16x8ExtMulHighI8x16S) \
V(MipsI16x8ExtMulLowI8x16U) \
V(MipsI16x8ExtMulHighI8x16U) \
+ V(MipsI16x8ExtAddPairwiseI8x16S) \
+ V(MipsI16x8ExtAddPairwiseI8x16U) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLaneU) \
V(MipsI8x16ExtractLaneS) \
@@ -288,6 +306,7 @@ namespace compiler {
V(MipsI8x16GeU) \
V(MipsI8x16RoundingAverageU) \
V(MipsI8x16Abs) \
+ V(MipsI8x16Popcnt) \
V(MipsI8x16BitMask) \
V(MipsS128And) \
V(MipsS128Or) \
@@ -295,12 +314,11 @@ namespace compiler {
V(MipsS128Not) \
V(MipsS128Select) \
V(MipsS128AndNot) \
- V(MipsV32x4AnyTrue) \
+ V(MipsV64x2AllTrue) \
V(MipsV32x4AllTrue) \
- V(MipsV16x8AnyTrue) \
V(MipsV16x8AllTrue) \
- V(MipsV8x16AnyTrue) \
V(MipsV8x16AllTrue) \
+ V(MipsV128AnyTrue) \
V(MipsS32x4InterleaveRight) \
V(MipsS32x4InterleaveLeft) \
V(MipsS32x4PackEven) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 404f9e4951..291f063053 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -63,6 +63,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF64x2Floor:
case kMipsF64x2Trunc:
case kMipsF64x2NearestInt:
+ case kMipsF64x2ConvertLowI32x4S:
+ case kMipsF64x2ConvertLowI32x4U:
+ case kMipsF64x2PromoteLowF32x4:
case kMipsI64x2Add:
case kMipsI64x2Sub:
case kMipsI64x2Mul:
@@ -72,6 +75,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI64x2ShrU:
case kMipsI64x2BitMask:
case kMipsI64x2Eq:
+ case kMipsI64x2Ne:
+ case kMipsI64x2GtS:
+ case kMipsI64x2GeS:
+ case kMipsI64x2Abs:
+ case kMipsI64x2SConvertI32x4Low:
+ case kMipsI64x2SConvertI32x4High:
+ case kMipsI64x2UConvertI32x4Low:
+ case kMipsI64x2UConvertI32x4High:
case kMipsI64x2ExtMulLowI32x4S:
case kMipsI64x2ExtMulHighI32x4S:
case kMipsI64x2ExtMulLowI32x4U:
@@ -103,6 +114,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Floor:
case kMipsF32x4Trunc:
case kMipsF32x4NearestInt:
+ case kMipsF32x4DemoteF64x2Zero:
case kMipsFloat32Max:
case kMipsFloat32Min:
case kMipsFloat32RoundDown:
@@ -162,6 +174,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8ExtMulHighI8x16S:
case kMipsI16x8ExtMulLowI8x16U:
case kMipsI16x8ExtMulHighI8x16U:
+ case kMipsI16x8ExtAddPairwiseI8x16S:
+ case kMipsI16x8ExtAddPairwiseI8x16U:
+ case kMipsI32x4ExtAddPairwiseI16x8S:
+ case kMipsI32x4ExtAddPairwiseI16x8U:
case kMipsI32x4Add:
case kMipsI32x4AddHoriz:
case kMipsI32x4Eq:
@@ -196,6 +212,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4ExtMulHighI16x8S:
case kMipsI32x4ExtMulLowI16x8U:
case kMipsI32x4ExtMulHighI16x8U:
+ case kMipsI32x4TruncSatF64x2SZero:
+ case kMipsI32x4TruncSatF64x2UZero:
case kMipsI8x16Add:
case kMipsI8x16AddSatS:
case kMipsI8x16AddSatU:
@@ -225,6 +243,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16SubSatU:
case kMipsI8x16UConvertI16x8:
case kMipsI8x16Abs:
+ case kMipsI8x16Popcnt:
case kMipsI8x16BitMask:
case kMipsIns:
case kMipsLsa:
@@ -269,12 +288,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsS16x8InterleaveRight:
case kMipsS16x8PackEven:
case kMipsS16x8PackOdd:
- case kMipsV8x16AllTrue:
- case kMipsV8x16AnyTrue:
+ case kMipsV64x2AllTrue:
case kMipsV32x4AllTrue:
- case kMipsV32x4AnyTrue:
case kMipsV16x8AllTrue:
- case kMipsV16x8AnyTrue:
+ case kMipsV8x16AllTrue:
+ case kMipsV128AnyTrue:
case kMipsS32x4InterleaveEven:
case kMipsS32x4InterleaveLeft:
case kMipsS32x4InterleaveOdd:
@@ -1263,11 +1281,6 @@ int PrepareForTailCallLatency() {
Latency::BRANCH + 2 * SubuLatency(false) + 2 + Latency::BRANCH + 1;
}
-int AssemblePopArgumentsAdaptorFrameLatency() {
- return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
- PrepareForTailCallLatency();
-}
-
int JumpLatency() {
// Estimated max.
return 1 + AdduLatency(false) + Latency::BRANCH + 2;
@@ -1380,14 +1393,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArchCallCodeObject:
case kArchCallWasmFunction:
return CallLatency();
- case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- int latency = 0;
- if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) {
- latency = AssemblePopArgumentsAdaptorFrameLatency();
- }
- return latency + JumpLatency();
- }
+ case kArchTailCallCodeObject:
case kArchTailCallWasm:
case kArchTailCallAddress:
return JumpLatency();
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 423540b455..be8c17ad9c 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -377,7 +377,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
if (g.CanBeImmediate(index, opcode)) {
@@ -481,6 +481,10 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1395,8 +1399,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitUnalignedLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
@@ -2113,50 +2115,65 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kMipsF64x2Abs) \
- V(F64x2Neg, kMipsF64x2Neg) \
- V(F64x2Sqrt, kMipsF64x2Sqrt) \
- V(F64x2Ceil, kMipsF64x2Ceil) \
- V(F64x2Floor, kMipsF64x2Floor) \
- V(F64x2Trunc, kMipsF64x2Trunc) \
- V(F64x2NearestInt, kMipsF64x2NearestInt) \
- V(I64x2Neg, kMipsI64x2Neg) \
- V(I64x2BitMask, kMipsI64x2BitMask) \
- V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
- V(F32x4Abs, kMipsF32x4Abs) \
- V(F32x4Neg, kMipsF32x4Neg) \
- V(F32x4Sqrt, kMipsF32x4Sqrt) \
- V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
- V(F32x4Ceil, kMipsF32x4Ceil) \
- V(F32x4Floor, kMipsF32x4Floor) \
- V(F32x4Trunc, kMipsF32x4Trunc) \
- V(F32x4NearestInt, kMipsF32x4NearestInt) \
- V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
- V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
- V(I32x4Neg, kMipsI32x4Neg) \
- V(I32x4BitMask, kMipsI32x4BitMask) \
- V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
- V(I16x8Neg, kMipsI16x8Neg) \
- V(I16x8BitMask, kMipsI16x8BitMask) \
- V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
- V(I8x16Neg, kMipsI8x16Neg) \
- V(I8x16BitMask, kMipsI8x16BitMask) \
- V(S128Not, kMipsS128Not) \
- V(V32x4AnyTrue, kMipsV32x4AnyTrue) \
- V(V32x4AllTrue, kMipsV32x4AllTrue) \
- V(V16x8AnyTrue, kMipsV16x8AnyTrue) \
- V(V16x8AllTrue, kMipsV16x8AllTrue) \
- V(V8x16AnyTrue, kMipsV8x16AnyTrue) \
- V(V8x16AllTrue, kMipsV8x16AllTrue)
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kMipsF64x2Abs) \
+ V(F64x2Neg, kMipsF64x2Neg) \
+ V(F64x2Sqrt, kMipsF64x2Sqrt) \
+ V(F64x2Ceil, kMipsF64x2Ceil) \
+ V(F64x2Floor, kMipsF64x2Floor) \
+ V(F64x2Trunc, kMipsF64x2Trunc) \
+ V(F64x2NearestInt, kMipsF64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S, kMipsF64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kMipsF64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kMipsF64x2PromoteLowF32x4) \
+ V(I64x2Neg, kMipsI64x2Neg) \
+ V(I64x2BitMask, kMipsI64x2BitMask) \
+ V(I64x2Abs, kMipsI64x2Abs) \
+ V(I64x2SConvertI32x4Low, kMipsI64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kMipsI64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kMipsI64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kMipsI64x2UConvertI32x4High) \
+ V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
+ V(F32x4Abs, kMipsF32x4Abs) \
+ V(F32x4Neg, kMipsF32x4Neg) \
+ V(F32x4Sqrt, kMipsF32x4Sqrt) \
+ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kMipsF32x4Ceil) \
+ V(F32x4Floor, kMipsF32x4Floor) \
+ V(F32x4Trunc, kMipsF32x4Trunc) \
+ V(F32x4NearestInt, kMipsF32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kMipsF32x4DemoteF64x2Zero) \
+ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
+ V(I32x4Neg, kMipsI32x4Neg) \
+ V(I32x4BitMask, kMipsI32x4BitMask) \
+ V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
+ V(I32x4ExtAddPairwiseI16x8S, kMipsI32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U, kMipsI32x4ExtAddPairwiseI16x8U) \
+ V(I32x4TruncSatF64x2SZero, kMipsI32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kMipsI32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kMipsI16x8Neg) \
+ V(I16x8BitMask, kMipsI16x8BitMask) \
+ V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
+ V(I16x8ExtAddPairwiseI8x16S, kMipsI16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U, kMipsI16x8ExtAddPairwiseI8x16U) \
+ V(I8x16Neg, kMipsI8x16Neg) \
+ V(I8x16Popcnt, kMipsI8x16Popcnt) \
+ V(I8x16BitMask, kMipsI8x16BitMask) \
+ V(S128Not, kMipsS128Not) \
+ V(V64x2AllTrue, kMipsV64x2AllTrue) \
+ V(V32x4AllTrue, kMipsV32x4AllTrue) \
+ V(V16x8AllTrue, kMipsV16x8AllTrue) \
+ V(V8x16AllTrue, kMipsV8x16AllTrue) \
+ V(V128AnyTrue, kMipsV128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2184,9 +2201,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Lt, kMipsF64x2Lt) \
V(F64x2Le, kMipsF64x2Le) \
V(I64x2Eq, kMipsI64x2Eq) \
+ V(I64x2Ne, kMipsI64x2Ne) \
V(I64x2Add, kMipsI64x2Add) \
V(I64x2Sub, kMipsI64x2Sub) \
V(I64x2Mul, kMipsI64x2Mul) \
+ V(I64x2GtS, kMipsI64x2GtS) \
+ V(I64x2GeS, kMipsI64x2GeS) \
V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S) \
V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \
V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 868134ff04..d6e720b6de 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -313,8 +313,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
@@ -509,29 +508,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Branch(&done, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ Ld(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -646,11 +622,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallWasmFunction: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt64());
@@ -663,13 +634,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
@@ -790,7 +755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
@@ -1852,28 +1817,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kMips64S128Load8Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Lb(kScratchReg, i.MemoryOperand());
- __ fill_b(i.OutputSimd128Register(), kScratchReg);
- break;
- }
- case kMips64S128Load16Splat: {
+ case kMips64S128LoadSplat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Lh(kScratchReg, i.MemoryOperand());
- __ fill_h(i.OutputSimd128Register(), kScratchReg);
- break;
- }
- case kMips64S128Load32Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Lw(kScratchReg, i.MemoryOperand());
- __ fill_w(i.OutputSimd128Register(), kScratchReg);
- break;
- }
- case kMips64S128Load64Splat: {
- CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ Ld(kScratchReg, i.MemoryOperand());
- __ fill_d(i.OutputSimd128Register(), kScratchReg);
+ auto sz = static_cast<MSASize>(MiscField::decode(instr->opcode()));
+ __ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kMips64S128Load8x8S: {
@@ -2351,6 +2298,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kRoundToNearest);
break;
}
+ case kMips64F64x2ConvertLowI32x4S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
+ __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMips64F64x2ConvertLowI32x4U: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0));
+ __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero);
+ break;
+ }
+ case kMips64F64x2PromoteLowF32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMips64I64x2ReplaceLane: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
@@ -2441,6 +2409,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64I64x2Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kMips64I64x2GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I64x2GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I64x2Abs: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kSimd128RegZero);
+ break;
+ }
+ case kMips64I64x2SConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMips64I64x2SConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_w(kSimd128ScratchReg, src, src);
+ __ slli_d(dst, kSimd128ScratchReg, 32);
+ __ srai_d(dst, dst, 32);
+ break;
+ }
+ case kMips64I64x2UConvertI32x4Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I64x2UConvertI32x4High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMips64ExtMulLow: {
auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
__ ExtMulLow(dt, i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2453,6 +2480,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64ExtAddPairwise: {
+ auto dt = static_cast<MSADataType>(MiscField::decode(instr->opcode()));
+ __ ExtAddPairwise(dt, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
case kMips64F32x4Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
@@ -2740,6 +2773,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kRoundToNearest);
break;
}
+ case kMips64F32x4DemoteF64x2Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2814,6 +2854,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64I32x4TruncSatF64x2SZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I32x4TruncSatF64x2UZero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero,
+ kSimd128ScratchReg);
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3209,6 +3267,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I8x16Popcnt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kMips64I8x16BitMask: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -3251,9 +3314,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64V32x4AnyTrue:
- case kMips64V16x8AnyTrue:
- case kMips64V8x16AnyTrue: {
+ case kMips64V128AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3264,6 +3325,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
+ case kMips64V64x2AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, 1); // branch delay slot
+ __ li(dst, 0l);
+ __ bind(&all_true);
+ break;
+ }
case kMips64V32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
@@ -4011,7 +4083,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4313,7 +4385,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4386,7 +4458,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
Operand(static_cast<int64_t>(0)));
}
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
+
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4394,9 +4466,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -4430,7 +4499,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ dsll(t0, t0, kSystemPointerSizeLog2);
__ Daddu(sp, sp, t0);
} else if (additional_pop_count->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_count + additional_count);
} else {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 5a162d90f7..a6bed82ea8 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -210,6 +210,9 @@ namespace compiler {
V(Mips64F64x2Floor) \
V(Mips64F64x2Trunc) \
V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
@@ -222,8 +225,17 @@ namespace compiler {
V(Mips64I64x2ShrU) \
V(Mips64I64x2BitMask) \
V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
V(Mips64ExtMulLow) \
V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4Sqrt) \
@@ -246,6 +258,7 @@ namespace compiler {
V(Mips64F32x4Floor) \
V(Mips64F32x4Trunc) \
V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
@@ -256,6 +269,8 @@ namespace compiler {
V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \
V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -313,6 +328,7 @@ namespace compiler {
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
@@ -320,12 +336,11 @@ namespace compiler {
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
- V(Mips64V32x4AnyTrue) \
+ V(Mips64V64x2AllTrue) \
V(Mips64V32x4AllTrue) \
- V(Mips64V16x8AnyTrue) \
V(Mips64V16x8AllTrue) \
- V(Mips64V8x16AnyTrue) \
V(Mips64V8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
@@ -353,10 +368,7 @@ namespace compiler {
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
- V(Mips64S128Load8Splat) \
- V(Mips64S128Load16Splat) \
- V(Mips64S128Load32Splat) \
- V(Mips64S128Load64Splat) \
+ V(Mips64S128LoadSplat) \
V(Mips64S128Load8x8S) \
V(Mips64S128Load8x8U) \
V(Mips64S128Load16x4S) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 3c7a7738a7..6baff2905e 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -88,6 +88,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Floor:
case kMips64F64x2Trunc:
case kMips64F64x2NearestInt:
+ case kMips64F64x2ConvertLowI32x4S:
+ case kMips64F64x2ConvertLowI32x4U:
+ case kMips64F64x2PromoteLowF32x4:
case kMips64I64x2Splat:
case kMips64I64x2ExtractLane:
case kMips64I64x2ReplaceLane:
@@ -100,8 +103,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I64x2ShrU:
case kMips64I64x2BitMask:
case kMips64I64x2Eq:
+ case kMips64I64x2Ne:
+ case kMips64I64x2GtS:
+ case kMips64I64x2GeS:
+ case kMips64I64x2Abs:
+ case kMips64I64x2SConvertI32x4Low:
+ case kMips64I64x2SConvertI32x4High:
+ case kMips64I64x2UConvertI32x4Low:
+ case kMips64I64x2UConvertI32x4High:
case kMips64ExtMulLow:
case kMips64ExtMulHigh:
+ case kMips64ExtAddPairwise:
case kMips64F32x4Abs:
case kMips64F32x4Add:
case kMips64F32x4AddHoriz:
@@ -129,6 +141,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Floor:
case kMips64F32x4Trunc:
case kMips64F32x4NearestInt:
+ case kMips64F32x4DemoteF64x2Zero:
case kMips64F64x2Splat:
case kMips64F64x2ExtractLane:
case kMips64F64x2ReplaceLane:
@@ -219,6 +232,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4Abs:
case kMips64I32x4BitMask:
case kMips64I32x4DotI16x8S:
+ case kMips64I32x4TruncSatF64x2SZero:
+ case kMips64I32x4TruncSatF64x2UZero:
case kMips64I8x16Add:
case kMips64I8x16AddSatS:
case kMips64I8x16AddSatU:
@@ -246,6 +261,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16SubSatU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
+ case kMips64I8x16Popcnt:
case kMips64I8x16BitMask:
case kMips64Ins:
case kMips64Lsa:
@@ -288,12 +304,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S16x8PackOdd:
case kMips64S16x2Reverse:
case kMips64S16x4Reverse:
- case kMips64V8x16AllTrue:
- case kMips64V8x16AnyTrue:
+ case kMips64V64x2AllTrue:
case kMips64V32x4AllTrue:
- case kMips64V32x4AnyTrue:
case kMips64V16x8AllTrue:
- case kMips64V16x8AnyTrue:
+ case kMips64V8x16AllTrue:
+ case kMips64V128AnyTrue:
case kMips64S32x4InterleaveEven:
case kMips64S32x4InterleaveOdd:
case kMips64S32x4InterleaveLeft:
@@ -354,10 +369,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ulw:
case kMips64Ulwu:
case kMips64Ulwc1:
- case kMips64S128Load8Splat:
- case kMips64S128Load16Splat:
- case kMips64S128Load32Splat:
- case kMips64S128Load64Splat:
+ case kMips64S128LoadSplat:
case kMips64S128Load8x8S:
case kMips64S128Load8x8U:
case kMips64S128Load16x4S:
@@ -790,11 +802,6 @@ int PrepareForTailCallLatency() {
Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1;
}
-int AssemblePopArgumentsAdoptFrameLatency() {
- return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
- PrepareForTailCallLatency();
-}
-
int AssertLatency() { return 1; }
int PrepareCallCFunctionLatency() {
@@ -1289,14 +1296,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArchCallCodeObject:
case kArchCallWasmFunction:
return CallLatency();
- case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- int latency = 0;
- if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) {
- latency = AssemblePopArgumentsAdoptFrameLatency();
- }
- return latency + JumpLatency();
- }
+ case kArchTailCallCodeObject:
case kArchTailCallWasm:
case kArchTailCallAddress:
return JumpLatency();
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 8bb2f5fc03..f704a03af8 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -397,36 +397,11 @@ InstructionOperand EmitAddBeforeS128LoadStore(InstructionSelector* selector,
return addr_reg;
}
-// Helper struct for load lane and store lane to indicate what memory size
-// to be encoded in the opcode, and the new lane index.
-struct LoadStoreLaneParams {
- MSASize sz;
- uint8_t laneidx;
- LoadStoreLaneParams(uint8_t laneidx, MSASize sz, int lanes)
- : sz(sz), laneidx(laneidx % lanes) {}
-};
-
-LoadStoreLaneParams GetLoadStoreLaneParams(MachineRepresentation rep,
- uint8_t laneidx) {
- switch (rep) {
- case MachineRepresentation::kWord8:
- return LoadStoreLaneParams(laneidx, MSA_B, 16);
- case MachineRepresentation::kWord16:
- return LoadStoreLaneParams(laneidx, MSA_H, 8);
- case MachineRepresentation::kWord32:
- return LoadStoreLaneParams(laneidx, MSA_W, 4);
- case MachineRepresentation::kWord64:
- return LoadStoreLaneParams(laneidx, MSA_D, 2);
- default:
- break;
- }
- UNREACHABLE();
-}
} // namespace
void InstructionSelector::VisitStoreLane(Node* node) {
StoreLaneParameters params = StoreLaneParametersOf(node->op());
- LoadStoreLaneParams f = GetLoadStoreLaneParams(params.rep, params.laneidx);
+ LoadStoreLaneParams f(params.rep, params.laneidx);
InstructionCode opcode = kMips64S128StoreLane;
opcode |= MiscField::encode(f.sz);
@@ -443,8 +418,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
void InstructionSelector::VisitLoadLane(Node* node) {
LoadLaneParameters params = LoadLaneParametersOf(node->op());
- LoadStoreLaneParams f =
- GetLoadStoreLaneParams(params.rep.representation(), params.laneidx);
+ LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
InstructionCode opcode = kMips64S128LoadLane;
opcode |= MiscField::encode(f.sz);
@@ -460,16 +434,20 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
switch (params.transformation) {
case LoadTransformation::kS128Load8Splat:
- opcode = kMips64S128Load8Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_B);
break;
case LoadTransformation::kS128Load16Splat:
- opcode = kMips64S128Load16Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_H);
break;
case LoadTransformation::kS128Load32Splat:
- opcode = kMips64S128Load32Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_W);
break;
case LoadTransformation::kS128Load64Splat:
- opcode = kMips64S128Load64Splat;
+ opcode = kMips64S128LoadSplat;
+ opcode |= MiscField::encode(MSASize::MSA_D);
break;
case LoadTransformation::kS128Load8x8S:
opcode = kMips64S128Load8x8S;
@@ -539,7 +517,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
EmitLoad(this, node, opcode);
@@ -1839,8 +1817,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitUnalignedLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
@@ -2874,53 +2850,64 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kMips64F64x2Abs) \
- V(F64x2Neg, kMips64F64x2Neg) \
- V(F64x2Sqrt, kMips64F64x2Sqrt) \
- V(F64x2Ceil, kMips64F64x2Ceil) \
- V(F64x2Floor, kMips64F64x2Floor) \
- V(F64x2Trunc, kMips64F64x2Trunc) \
- V(F64x2NearestInt, kMips64F64x2NearestInt) \
- V(I64x2Neg, kMips64I64x2Neg) \
- V(I64x2BitMask, kMips64I64x2BitMask) \
- V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
- V(F32x4Abs, kMips64F32x4Abs) \
- V(F32x4Neg, kMips64F32x4Neg) \
- V(F32x4Sqrt, kMips64F32x4Sqrt) \
- V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
- V(F32x4Ceil, kMips64F32x4Ceil) \
- V(F32x4Floor, kMips64F32x4Floor) \
- V(F32x4Trunc, kMips64F32x4Trunc) \
- V(F32x4NearestInt, kMips64F32x4NearestInt) \
- V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
- V(I32x4Neg, kMips64I32x4Neg) \
- V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
- V(I32x4Abs, kMips64I32x4Abs) \
- V(I32x4BitMask, kMips64I32x4BitMask) \
- V(I16x8Neg, kMips64I16x8Neg) \
- V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
- V(I16x8Abs, kMips64I16x8Abs) \
- V(I16x8BitMask, kMips64I16x8BitMask) \
- V(I8x16Neg, kMips64I8x16Neg) \
- V(I8x16Abs, kMips64I8x16Abs) \
- V(I8x16BitMask, kMips64I8x16BitMask) \
- V(S128Not, kMips64S128Not) \
- V(V32x4AnyTrue, kMips64V32x4AnyTrue) \
- V(V32x4AllTrue, kMips64V32x4AllTrue) \
- V(V16x8AnyTrue, kMips64V16x8AnyTrue) \
- V(V16x8AllTrue, kMips64V16x8AllTrue) \
- V(V8x16AnyTrue, kMips64V8x16AnyTrue) \
- V(V8x16AllTrue, kMips64V8x16AllTrue)
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kMips64F64x2Abs) \
+ V(F64x2Neg, kMips64F64x2Neg) \
+ V(F64x2Sqrt, kMips64F64x2Sqrt) \
+ V(F64x2Ceil, kMips64F64x2Ceil) \
+ V(F64x2Floor, kMips64F64x2Floor) \
+ V(F64x2Trunc, kMips64F64x2Trunc) \
+ V(F64x2NearestInt, kMips64F64x2NearestInt) \
+ V(I64x2Neg, kMips64I64x2Neg) \
+ V(I64x2BitMask, kMips64I64x2BitMask) \
+ V(F64x2ConvertLowI32x4S, kMips64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kMips64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kMips64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
+ V(F32x4Abs, kMips64F32x4Abs) \
+ V(F32x4Neg, kMips64F32x4Neg) \
+ V(F32x4Sqrt, kMips64F32x4Sqrt) \
+ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kMips64F32x4Ceil) \
+ V(F32x4Floor, kMips64F32x4Floor) \
+ V(F32x4Trunc, kMips64F32x4Trunc) \
+ V(F32x4NearestInt, kMips64F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kMips64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kMips64I64x2Abs) \
+ V(I64x2SConvertI32x4Low, kMips64I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kMips64I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kMips64I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kMips64I64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
+ V(I32x4Neg, kMips64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kMips64I32x4Abs) \
+ V(I32x4BitMask, kMips64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kMips64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kMips64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kMips64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kMips64I16x8Abs) \
+ V(I16x8BitMask, kMips64I16x8BitMask) \
+ V(I8x16Neg, kMips64I8x16Neg) \
+ V(I8x16Abs, kMips64I8x16Abs) \
+ V(I8x16Popcnt, kMips64I8x16Popcnt) \
+ V(I8x16BitMask, kMips64I8x16BitMask) \
+ V(S128Not, kMips64S128Not) \
+ V(V64x2AllTrue, kMips64V64x2AllTrue) \
+ V(V32x4AllTrue, kMips64V32x4AllTrue) \
+ V(V16x8AllTrue, kMips64V16x8AllTrue) \
+ V(V8x16AllTrue, kMips64V8x16AllTrue) \
+ V(V128AnyTrue, kMips64V128AnyTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2948,9 +2935,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Lt, kMips64F64x2Lt) \
V(F64x2Le, kMips64F64x2Le) \
V(I64x2Eq, kMips64I64x2Eq) \
+ V(I64x2Ne, kMips64I64x2Ne) \
V(I64x2Add, kMips64I64x2Add) \
V(I64x2Sub, kMips64I64x2Sub) \
V(I64x2Mul, kMips64I64x2Mul) \
+ V(I64x2GtS, kMips64I64x2GtS) \
+ V(I64x2GeS, kMips64I64x2GeS) \
V(F32x4Add, kMips64F32x4Add) \
V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
V(F32x4Sub, kMips64F32x4Sub) \
@@ -3288,6 +3278,18 @@ VISIT_EXT_MUL(I16x8, I8x16S, MSAS8)
VISIT_EXT_MUL(I16x8, I8x16U, MSAU8)
#undef VISIT_EXT_MUL
+#define VISIT_EXTADD_PAIRWISE(OPCODE, TYPE) \
+ void InstructionSelector::Visit##OPCODE(Node* node) { \
+ Mips64OperandGenerator g(this); \
+ Emit(kMips64ExtAddPairwise | MiscField::encode(TYPE), \
+ g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); \
+ }
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S, MSAS8)
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U, MSAU8)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, MSAS16)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, MSAU16)
+#undef VISIT_EXTADD_PAIRWISE
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 3d2e9d9364..4e5393bd22 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -276,8 +276,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
PPCOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
@@ -697,30 +696,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ cmpi(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
@@ -915,13 +890,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@@ -1076,7 +1045,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// f5ab7d3.
if (isWasmCapiFunction) {
CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1741,39 +1710,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CanonicalizeNaN(result, value);
break;
}
- case kPPC_Push:
- if (instr->InputAt(0)->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- switch (op->representation()) {
- case MachineRepresentation::kFloat32:
- __ StoreSingleU(i.InputDoubleRegister(0),
- MemOperand(sp, -kSystemPointerSize), r0);
- frame_access_state()->IncreaseSPDelta(1);
- break;
- case MachineRepresentation::kFloat64:
- __ StoreDoubleU(i.InputDoubleRegister(0),
- MemOperand(sp, -kDoubleSize), r0);
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- break;
- case MachineRepresentation::kSimd128: {
- __ addi(sp, sp, Operand(-kSimd128Size));
- __ StoreSimd128(i.InputSimd128Register(0), MemOperand(r0, sp), r0,
- kScratchSimd128Reg);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- __ StorePU(i.InputRegister(0), MemOperand(sp, -kSystemPointerSize), r0);
- frame_access_state()->IncreaseSPDelta(1);
+ case kPPC_Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
+ MachineRepresentation rep = op->representation();
+ int pushed_slots = ElementSizeInPointers(rep);
+ // Slot-sized arguments are never padded but there may be a gap if
+ // the slot allocator reclaimed other padding slots. Adjust the stack
+ // here to skip any gap.
+ if (slots > pushed_slots) {
+ __ addi(sp, sp,
+ Operand(-((slots - pushed_slots) * kSystemPointerSize)));
+ }
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ __ StoreSingleU(i.InputDoubleRegister(1),
+ MemOperand(sp, -kSystemPointerSize), r0);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ StoreDoubleU(i.InputDoubleRegister(1),
+ MemOperand(sp, -kDoubleSize), r0);
+ break;
+ case MachineRepresentation::kSimd128:
+ __ addi(sp, sp, Operand(-kSimd128Size));
+ __ StoreSimd128(i.InputSimd128Register(1), MemOperand(r0, sp), r0,
+ kScratchSimd128Reg);
+ break;
+ default:
+ __ StorePU(i.InputRegister(1), MemOperand(sp, -kSystemPointerSize),
+ r0);
+ break;
}
+ frame_access_state()->IncreaseSPDelta(slots);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ }
case kPPC_PushFrame: {
int num_slots = i.InputInt32(1);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -3046,10 +3018,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vsububm(i.OutputSimd128Register(), tempFPReg1, kScratchSimd128Reg);
break;
}
- case kPPC_V64x2AnyTrue:
- case kPPC_V32x4AnyTrue:
- case kPPC_V16x8AnyTrue:
- case kPPC_V8x16AnyTrue: {
+ case kPPC_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
constexpr int bit_number = 24;
@@ -3111,6 +3080,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xvcvuxwsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+
+ case kPPC_I64x2SConvertI32x4Low: {
+ __ vupklsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2SConvertI32x4High: {
+ __ vupkhsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2UConvertI32x4Low: {
+ constexpr int lane_width_in_bytes = 8;
+ __ vupklsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ // Zero extend.
+ __ mov(ip, Operand(0xFFFFFFFF));
+ __ mtvsrd(kScratchSimd128Reg, ip);
+ __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vand(i.OutputSimd128Register(), kScratchSimd128Reg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kPPC_I64x2UConvertI32x4High: {
+ constexpr int lane_width_in_bytes = 8;
+ __ vupkhsw(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ // Zero extend.
+ __ mov(ip, Operand(0xFFFFFFFF));
+ __ mtvsrd(kScratchSimd128Reg, ip);
+ __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
+ Operand(1 * lane_width_in_bytes));
+ __ vand(i.OutputSimd128Register(), kScratchSimd128Reg,
+ i.OutputSimd128Register());
+ break;
+ }
+
case kPPC_I32x4SConvertI16x8Low: {
__ vupklsh(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3720,6 +3723,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchSimd128Reg);
break;
}
+#define SIGN_SELECT(compare_gt) \
+ Simd128Register src0 = i.InputSimd128Register(0); \
+ Simd128Register src1 = i.InputSimd128Register(1); \
+ Simd128Register src2 = i.InputSimd128Register(2); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg); \
+ __ compare_gt(kScratchSimd128Reg, kScratchSimd128Reg, src2); \
+ __ vsel(dst, src1, src0, kScratchSimd128Reg);
+ case kPPC_I8x16SignSelect: {
+ SIGN_SELECT(vcmpgtsb)
+ break;
+ }
+ case kPPC_I16x8SignSelect: {
+ SIGN_SELECT(vcmpgtsh)
+ break;
+ }
+ case kPPC_I32x4SignSelect: {
+ SIGN_SELECT(vcmpgtsw)
+ break;
+ }
+ case kPPC_I64x2SignSelect: {
+ SIGN_SELECT(vcmpgtsd)
+ break;
+ }
+#undef SIGN_SELECT
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3833,7 +3861,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4073,7 +4101,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4152,7 +4180,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = r6;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4160,9 +4187,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index b0aa6529c7..2ef553a4f5 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -264,6 +264,11 @@ namespace compiler {
V(PPC_I64x2ShrU) \
V(PPC_I64x2Neg) \
V(PPC_I64x2BitMask) \
+ V(PPC_I64x2SConvertI32x4Low) \
+ V(PPC_I64x2SConvertI32x4High) \
+ V(PPC_I64x2UConvertI32x4Low) \
+ V(PPC_I64x2UConvertI32x4High) \
+ V(PPC_I64x2SignSelect) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
V(PPC_I32x4ReplaceLane) \
@@ -296,6 +301,7 @@ namespace compiler {
V(PPC_I32x4DotI16x8S) \
V(PPC_I32x4ExtAddPairwiseI16x8S) \
V(PPC_I32x4ExtAddPairwiseI16x8U) \
+ V(PPC_I32x4SignSelect) \
V(PPC_F32x4Qfma) \
V(PPC_F32x4Qfms) \
V(PPC_I16x8Splat) \
@@ -336,6 +342,7 @@ namespace compiler {
V(PPC_I16x8ExtAddPairwiseI8x16S) \
V(PPC_I16x8ExtAddPairwiseI8x16U) \
V(PPC_I16x8Q15MulRSatS) \
+ V(PPC_I16x8SignSelect) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
@@ -368,14 +375,12 @@ namespace compiler {
V(PPC_I8x16Shuffle) \
V(PPC_I8x16Swizzle) \
V(PPC_I8x16BitMask) \
- V(PPC_V64x2AnyTrue) \
- V(PPC_V32x4AnyTrue) \
- V(PPC_V16x8AnyTrue) \
- V(PPC_V8x16AnyTrue) \
+ V(PPC_I8x16SignSelect) \
V(PPC_V64x2AllTrue) \
V(PPC_V32x4AllTrue) \
V(PPC_V16x8AllTrue) \
V(PPC_V8x16AllTrue) \
+ V(PPC_V128AnyTrue) \
V(PPC_S128And) \
V(PPC_S128Or) \
V(PPC_S128Xor) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index a737d23e9a..90025c5a82 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -189,6 +189,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I64x2ShrU:
case kPPC_I64x2Neg:
case kPPC_I64x2BitMask:
+ case kPPC_I64x2SConvertI32x4Low:
+ case kPPC_I64x2SConvertI32x4High:
+ case kPPC_I64x2UConvertI32x4Low:
+ case kPPC_I64x2UConvertI32x4High:
+ case kPPC_I64x2SignSelect:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
case kPPC_I32x4ReplaceLane:
@@ -221,6 +226,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4DotI16x8S:
case kPPC_I32x4ExtAddPairwiseI16x8S:
case kPPC_I32x4ExtAddPairwiseI16x8U:
+ case kPPC_I32x4SignSelect:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
@@ -259,6 +265,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8ExtAddPairwiseI8x16S:
case kPPC_I16x8ExtAddPairwiseI8x16U:
case kPPC_I16x8Q15MulRSatS:
+ case kPPC_I16x8SignSelect:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
@@ -291,14 +298,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16Shuffle:
case kPPC_I8x16Swizzle:
case kPPC_I8x16BitMask:
- case kPPC_V64x2AnyTrue:
- case kPPC_V32x4AnyTrue:
- case kPPC_V16x8AnyTrue:
- case kPPC_V8x16AnyTrue:
+ case kPPC_I8x16SignSelect:
case kPPC_V64x2AllTrue:
case kPPC_V32x4AllTrue:
case kPPC_V16x8AllTrue:
case kPPC_V8x16AllTrue:
+ case kPPC_V128AnyTrue:
case kPPC_S128And:
case kPPC_S128Or:
case kPPC_S128Xor:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 3d4697b380..05fa443b41 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -230,7 +230,7 @@ void InstructionSelector::VisitLoad(Node* node) {
if (node->opcode() == IrOpcode::kPoisonedLoad &&
poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
@@ -1862,18 +1862,20 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
+ stack_decrement += kSystemPointerSize;
// Skip any alignment holes in pushed nodes.
if (input.node == nullptr) continue;
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node));
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ Emit(kPPC_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
}
}
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
@@ -2174,6 +2176,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2237,42 +2241,50 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(S128Xor) \
V(S128AndNot)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs) \
- V(F64x2Neg) \
- V(F64x2Sqrt) \
- V(F64x2Ceil) \
- V(F64x2Floor) \
- V(F64x2Trunc) \
- V(F64x2NearestInt) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(F32x4Sqrt) \
- V(F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4) \
- V(F32x4Ceil) \
- V(F32x4Floor) \
- V(F32x4Trunc) \
- V(F32x4NearestInt) \
- V(I64x2Neg) \
- V(I32x4Neg) \
- V(I32x4Abs) \
- V(I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4) \
- V(I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High) \
- V(I16x8Neg) \
- V(I16x8Abs) \
- V(I8x16Neg) \
- V(I8x16Abs) \
- V(I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(F32x4Sqrt) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
+ V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I32x4Neg) \
+ V(I32x4Abs) \
+ V(I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8Neg) \
+ V(I16x8Abs) \
+ V(I8x16Neg) \
+ V(I8x16Abs) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
V(S128Not)
#define SIMD_SHIFT_LIST(V) \
@@ -2290,9 +2302,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue) \
+ V(V128AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2496,6 +2507,28 @@ void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ConvertLowI32x4S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitF32x4DemoteF64x2Zero(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
@@ -2522,6 +2555,28 @@ void InstructionSelector::EmitPrepareResults(
}
}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kPPC_S128Load8Lane;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kPPC_S128Load16Lane;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kPPC_S128Load32Lane;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kPPC_S128Load64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ PPCOperandGenerator g(this);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(params.laneidx));
+}
+
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
PPCOperandGenerator g(this);
@@ -2573,6 +2628,32 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
+void InstructionSelector::VisitStoreLane(Node* node) {
+ PPCOperandGenerator g(this);
+
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kPPC_S128Store8Lane;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kPPC_S128Store16Lane;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kPPC_S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kPPC_S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ InstructionOperand inputs[4];
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
+ inputs[0] = value_operand;
+ inputs[1] = g.UseRegister(node->InputAt(0));
+ inputs[2] = g.UseRegister(node->InputAt(1));
+ inputs[3] = g.UseImmediate(params.laneidx);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, 4, inputs);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 8280665c90..84145c8779 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -2650,7 +2650,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) {
LiveRangeBundle* LiveRangeBundle::TryMerge(LiveRangeBundle* lhs,
LiveRangeBundle* rhs,
bool trace_alloc) {
- if (rhs == lhs) return nullptr;
+ if (rhs == lhs) return lhs;
auto iter1 = lhs->uses_.begin();
auto iter2 = rhs->uses_.begin();
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
new file mode 100644
index 0000000000..cc83f22c65
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -0,0 +1,2775 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/riscv64/constants-riscv64.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/wasm-code-manager.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ tasm()->
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_riscv64: %s at line %d\n", \
+ __FUNCTION__, __LINE__)
+
+// Adds RISC-V-specific methods to convert InstructionOperands.
+class RiscvOperandConverter final : public InstructionOperandConverter {
+ public:
+ RiscvOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(size_t index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(size_t index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on RISC-V,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK_EQ(0, InputInt32(index));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
+ case Constant::kHeapObject:
+ // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+ // maybe not done on arm due to const pool ??
+ break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ Operand InputOperand(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ // TODO(plind): r6 address mode, to be implemented ...
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ }
+
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode, StubCallMode stub_mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode),
+ stub_mode_(stub_mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ __ Add64(scratch1_, object_, index_);
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ if (must_save_lr_) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode, wasm::WasmCode::kRecordWrite);
+ } else {
+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ save_fp_mode);
+ }
+ if (must_save_lr_) {
+ __ Pop(ra);
+ }
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+ StubCallMode const stub_mode_;
+ bool must_save_lr_;
+ Zone* zone_;
+};
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return Uless;
+ case kUnsignedGreaterThanOrEqual:
+ return Ugreater_equal;
+ case kUnsignedLessThanOrEqual:
+ return Uless_equal;
+ case kUnsignedGreaterThan:
+ return Ugreater;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return EQ;
+ case kNotEqual:
+ *predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return LT;
+ case kUnsignedGreaterThanOrEqual:
+ *predicate = false;
+ return LT;
+ case kUnsignedLessThanOrEqual:
+ *predicate = true;
+ return LE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return LE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ *predicate = true;
+ break;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ RiscvOperandConverter const& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
+ }
+}
+
+} // namespace
+
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr, representation) \
+ do { \
+ Label binop; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ And(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub64(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ Sll32(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, ne, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
+ do { \
+ Label exchange; \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ Move(i.TempRegister(1), i.InputRegister(2)); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, ne, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label exchange; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub64(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, ne, i.TempRegister(2), Operand(zero_reg)); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ Move(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, ne, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ And(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub64(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ Sll32(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, ne, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
+ do { \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ Move(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
+ if (frame_access_state()->has_frame()) {
+ __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+namespace {
+
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ tasm->Sub64(sp, sp, stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ tasm->Add64(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(kScratchReg);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
+}
+
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Lw(kScratchReg,
+ FieldMemOperand(kScratchReg,
+ CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(kScratchReg, kScratchReg,
+ Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
+}
+
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerSystemPointer - 1))
+ __ ComputeCodeStartAddress(kScratchReg);
+ __ Move(kSpeculationPoisonRegister, kScratchReg);
+ __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ kScratchReg);
+ __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerSystemPointer - 1);
+ __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(sp, sp, kSpeculationPoisonRegister);
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
+ RiscvOperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
+ case kArchCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ CallCodeObject(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
+ } else {
+ __ Add64(kScratchReg, i.InputRegister(0), 0);
+ __ Call(kScratchReg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Add64(kScratchReg, i.InputRegister(0), 0);
+ __ Jump(kScratchReg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
+ }
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(a2);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ // kReturnRegister0 should have been saved before entering the stub.
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
+ break;
+ }
+ case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ // Don't overwrite the returned value.
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ Label after_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ if (isWasmCapiFunction) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &after_call, RelocInfo::EXTERNAL_REFERENCE);
+ __ Sd(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ __ bind(&after_call);
+ if (isWasmCapiFunction) {
+ RecordSafepoint(instr->reference_map());
+ }
+
+ frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
+ frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchAbortCSAAssert:
+ DCHECK(i.InputRegister(0) == a0);
+ {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(tasm(), StackFrame::NONE);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
+ }
+ __ stop();
+ break;
+ case kArchDebugBreak:
+ __ DebugBreak();
+ break;
+ case kArchComment:
+ __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ DeoptimizationExit* exit =
+ BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
+ __ Branch(exit->label());
+ break;
+ }
+ case kArchRet:
+ AssembleReturn(instr->InputAt(0));
+ break;
+ case kArchStackPointerGreaterThan:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kArchStackCheckOffset:
+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
+ break;
+ case kArchFramePointer:
+ __ Move(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ Ld(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ Move(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0), DetermineStubCallMode());
+ break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = zone()->New<OutOfLineRecordWrite>(this, object, index, value,
+ scratch0, scratch1, mode,
+ DetermineStubCallMode());
+ __ Add64(kScratchReg, object, index);
+ __ Sd(value, MemOperand(kScratchReg));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Add64(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ int alignment = i.InputInt32(1);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
+ alignment == 16);
+ if (FLAG_debug_code && alignment > 0) {
+ // Verify that the output_register is properly aligned
+ __ And(kScratchReg, i.OutputRegister(),
+ Operand(kSystemPointerSize - 1));
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
+ Operand(zero_reg));
+ }
+ if (alignment == 2 * kSystemPointerSize) {
+ Label done;
+ __ Add64(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ Add64(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize);
+ __ bind(&done);
+ } else if (alignment > 2 * kSystemPointerSize) {
+ Label done;
+ __ Add64(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ li(kScratchReg2, alignment);
+ __ Sub64(kScratchReg2, kScratchReg2, Operand(kScratchReg));
+ __ Add64(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
+ __ bind(&done);
+ }
+
+ break;
+ }
+ case kArchWordPoisonOnSpeculation:
+ __ And(i.OutputRegister(), i.InputRegister(0),
+ kSpeculationPoisonRegister);
+ break;
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kRiscvAdd32:
+ __ Add32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAdd64:
+ __ Add64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAddOvf64:
+ __ AddOverflow64(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kScratchReg);
+ break;
+ case kRiscvSub32:
+ __ Sub32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvSub64:
+ __ Sub64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvSubOvf64:
+ __ SubOverflow64(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kScratchReg);
+ break;
+ case kRiscvMul32:
+ __ Mul32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvMulOvf32:
+ __ MulOverflow32(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), kScratchReg);
+ break;
+ case kRiscvMulHigh32:
+ __ Mulh32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvMulHighU32:
+ __ Mulhu32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg, kScratchReg2);
+ break;
+ case kRiscvMulHigh64:
+ __ Mulh64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvDiv32: {
+ __ Div32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvDivU32: {
+ __ Divu32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvMod32:
+ __ Mod32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvModU32:
+ __ Modu32(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvMul64:
+ __ Mul64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvDiv64: {
+ __ Div64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvDivU64: {
+ __ Divu64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ // Set ouput to zero if divisor == 0
+ __ LoadZeroIfConditionZero(i.OutputRegister(), i.InputRegister(1));
+ break;
+ }
+ case kRiscvMod64:
+ __ Mod64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvModU64:
+ __ Modu64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAnd:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvAnd32:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ break;
+ case kRiscvOr:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvOr32:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ break;
+ case kRiscvNor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
+ case kRiscvNor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ }
+ break;
+ case kRiscvXor:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvXor32:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ Sll32(i.OutputRegister(), i.OutputRegister(), 0x0);
+ break;
+ case kRiscvClz32:
+ __ Clz32(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvClz64:
+ __ Clz64(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvCtz32: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Ctz32(dst, src);
+ } break;
+ case kRiscvCtz64: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Ctz64(dst, src);
+ } break;
+ case kRiscvPopcnt32: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Popcnt32(dst, src);
+ } break;
+ case kRiscvPopcnt64: {
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ __ Popcnt64(dst, src);
+ } break;
+ case kRiscvShl32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ Sll32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kRiscvShr32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ Srl32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kRiscvSar32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ Sra32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kRiscvZeroExtendWord: {
+ __ ZeroExtendWord(i.OutputRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvSignExtendWord: {
+ __ SignExtendWord(i.OutputRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvShl64:
+ __ Sll64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvShr64:
+ __ Srl64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvSar64:
+ __ Sra64(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvRor32:
+ __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvRor64:
+ __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kRiscvTst:
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kRiscvCmp:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kRiscvMov:
+ // TODO(plind): Should we combine mov/li like this, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ Move(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kRiscvCmpS: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ // compare result set to kScratchReg
+ __ CompareF32(kScratchReg, cc, left, right);
+ } break;
+ case kRiscvAddS:
+ // TODO(plind): add special case: combine mult & add.
+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvSubS:
+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMulS:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvDivS:
+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvModS: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputSingleRegister());
+ break;
+ }
+ case kRiscvAbsS:
+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kRiscvNegS:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kRiscvSqrtS: {
+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kRiscvMaxS:
+ __ fmax_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMinS:
+ __ fmin_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvCmpD: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ }
+ // compare result set to kScratchReg
+ __ CompareF64(kScratchReg, cc, left, right);
+ } break;
+ case kRiscvAddD:
+ // TODO(plind): add special case: combine mult & add.
+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvSubD:
+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMulD:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvDivD:
+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvModD: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ break;
+ }
+ case kRiscvAbsD:
+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvNegD:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvSqrtD: {
+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kRiscvMaxD:
+ __ fmax_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvMinD:
+ __ fmin_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kRiscvFloat64RoundDown: {
+ __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundDown: {
+ __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat64RoundTruncate: {
+ __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundTruncate: {
+ __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat64RoundUp: {
+ __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundUp: {
+ __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat64RoundTiesEven: {
+ __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32RoundTiesEven: {
+ __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kRiscvFloat32Max: {
+ __ Float32Max(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat64Max: {
+ __ Float64Max(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat32Min: {
+ __ Float32Min(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat64Min: {
+ __ Float64Min(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1));
+ break;
+ }
+ case kRiscvFloat64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvCvtSD:
+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvCvtDS:
+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ case kRiscvCvtDW: {
+ __ fcvt_d_w(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSW: {
+ __ fcvt_s_w(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSUw: {
+ __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSL: {
+ __ fcvt_s_l(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtDL: {
+ __ fcvt_d_l(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtDUw: {
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtDUl: {
+ __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvCvtSUl: {
+ __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvFloorWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Floor_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvCeilWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ceil_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvRoundWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Round_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncWD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_w_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvFloorWS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Floor_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvCeilWS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ceil_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvRoundWS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Round_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncWS: {
+ Label done;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ Trunc_w_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+
+ // On RISCV, if the input value exceeds INT32_MAX, the result of fcvt
+ // is INT32_MAX. Note that, since INT32_MAX means the lower 31-bits are
+ // all 1s, INT32_MAX cannot be represented precisely as a float, so an
+ // fcvt result of INT32_MAX always indicate overflow.
+ //
+ // In wasm_compiler, to detect overflow in converting a FP value, fval, to
+ // integer, V8 checks whether I2F(F2I(fval)) equals fval. However, if fval
+ // == INT32_MAX+1, the value of I2F(F2I(fval)) happens to be fval. So,
+ // INT32_MAX is not a good value to indicate overflow. Instead, we will
+ // use INT32_MIN as the converted result of an out-of-range FP value,
+ // exploiting the fact that INT32_MAX+1 is INT32_MIN.
+ //
+ // If the result of conversion overflow, the result will be set to
+ // INT32_MIN. Here we detect overflow by testing whether output + 1 <
+ // output (i.e., kScratchReg < output)
+ if (set_overflow_to_min_i32) {
+ __ Add32(kScratchReg, i.OutputRegister(), 1);
+ __ Branch(&done, lt, i.OutputRegister(), Operand(kScratchReg));
+ __ Move(i.OutputRegister(), kScratchReg);
+ __ bind(&done);
+ }
+ break;
+ }
+ case kRiscvTruncLS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_l_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncLD: {
+ Label done;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
+ __ Trunc_l_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ if (set_overflow_to_min_i64) {
+ __ Add64(kScratchReg, i.OutputRegister(), 1);
+ __ Branch(&done, lt, i.OutputRegister(), Operand(kScratchReg));
+ __ Move(i.OutputRegister(), kScratchReg);
+ __ bind(&done);
+ }
+ break;
+ }
+ case kRiscvTruncUwD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncUwS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ bool set_overflow_to_min_u32 = MiscField::decode(instr->opcode());
+ __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+
+ // On RISCV, if the input value exceeds UINT32_MAX, the result of fcvt
+ // is UINT32_MAX. Note that, since UINT32_MAX means all 32-bits are 1s,
+ // UINT32_MAX cannot be represented precisely as float, so an fcvt result
+ // of UINT32_MAX always indicates overflow.
+ //
+ // In wasm_compiler.cc, to detect overflow in converting a FP value, fval,
+ // to integer, V8 checks whether I2F(F2I(fval)) equals fval. However, if
+ // fval == UINT32_MAX+1, the value of I2F(F2I(fval)) happens to be fval.
+ // So, UINT32_MAX is not a good value to indicate overflow. Instead, we
+ // will use 0 as the converted result of an out-of-range FP value,
+ // exploiting the fact that UINT32_MAX+1 is 0.
+ if (set_overflow_to_min_u32) {
+ __ Add32(kScratchReg, i.OutputRegister(), 1);
+ // Set ouput to zero if result overflows (i.e., UINT32_MAX)
+ __ LoadZeroIfConditionZero(i.OutputRegister(), kScratchReg);
+ }
+ break;
+ }
+ case kRiscvTruncUlS: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvTruncUlD: {
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), result);
+ break;
+ }
+ case kRiscvBitcastDL:
+ __ fmv_x_d(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvBitcastLD:
+ __ fmv_d_x(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kRiscvBitcastInt32ToFloat32:
+ __ fmv_w_x(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kRiscvBitcastFloat32ToInt32:
+ __ fmv_x_w(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvFloat64ExtractLowWord32:
+ __ ExtractLowWordFromF64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvFloat64ExtractHighWord32:
+ __ ExtractHighWordFromF64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kRiscvFloat64InsertLowWord32:
+ __ InsertLowWordF64(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kRiscvFloat64InsertHighWord32:
+ __ InsertHighWordF64(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ // ... more basic instructions ...
+
+ case kRiscvSignExtendByte:
+ __ SignExtendByte(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvSignExtendShort:
+ __ SignExtendShort(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kRiscvLbu:
+ __ Lbu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLb:
+ __ Lb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvSb:
+ __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvLhu:
+ __ Lhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlhu:
+ __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLh:
+ __ Lh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlh:
+ __ Ulh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvSh:
+ __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvUsh:
+ __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvLw:
+ __ Lw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlw:
+ __ Ulw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLwu:
+ __ Lwu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUlwu:
+ __ Ulwu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvLd:
+ __ Ld(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvUld:
+ __ Uld(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kRiscvSw:
+ __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvUsw:
+ __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvSd:
+ __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvUsd:
+ __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kRiscvLoadFloat: {
+ __ LoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kRiscvULoadFloat: {
+ __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kRiscvStoreFloat: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ __ StoreFloat(ft, operand);
+ break;
+ }
+ case kRiscvUStoreFloat: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ __ UStoreFloat(ft, operand);
+ break;
+ }
+ case kRiscvLoadDouble:
+ __ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kRiscvULoadDouble:
+ __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kRiscvStoreDouble: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ }
+ __ StoreDouble(ft, i.MemoryOperand());
+ break;
+ }
+ case kRiscvUStoreDouble: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ }
+ __ UStoreDouble(ft, i.MemoryOperand());
+ break;
+ }
+ case kRiscvSync: {
+ __ sync();
+ break;
+ }
+ case kRiscvPush:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sub32(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kRiscvPeek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ LoadFloat(
+ i.OutputSingleRegister(0),
+ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
+ }
+ } else {
+ __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
+ case kRiscvStackClaim: {
+ __ Sub64(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
+ break;
+ }
+ case kRiscvStoreToStackSlot: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ UNREACHABLE();
+ } else {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, i.InputInt32(1)));
+ }
+ } else {
+ __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
+ break;
+ }
+ case kRiscvByteSwap64: {
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kRiscvByteSwap32: {
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
+ case kWord32AtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
+ break;
+ case kWord32AtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
+ break;
+ case kWord32AtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
+ break;
+ case kWord32AtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
+ break;
+ case kWord32AtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
+ break;
+ case kRiscvWord64AtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
+ break;
+ case kRiscvWord64AtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
+ break;
+ case kRiscvWord64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ break;
+ case kRiscvWord64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
+ break;
+ case kWord32AtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
+ break;
+ case kWord32AtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
+ break;
+ case kWord32AtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
+ break;
+ case kRiscvWord64AtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
+ break;
+ case kRiscvWord64AtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
+ break;
+ case kRiscvWord64AtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
+ break;
+ case kRiscvWord64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
+ break;
+ case kWord32AtomicExchangeInt8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
+ break;
+ case kWord32AtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case kWord32AtomicExchangeInt16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
+ break;
+ case kWord32AtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case kWord32AtomicExchangeWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kRiscvWord64AtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ case kRiscvWord64AtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ case kRiscvWord64AtomicExchangeUint32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ case kRiscvWord64AtomicExchangeUint64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+ case kWord32AtomicCompareExchangeInt8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
+ break;
+ case kWord32AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case kWord32AtomicCompareExchangeInt16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
+ break;
+ case kWord32AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case kWord32AtomicCompareExchangeWord32:
+ __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ case kRiscvWord64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
+ break; \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add32)
+ ATOMIC_BINOP_CASE(Sub, Sub32)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kRiscvWord64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
+ break; \
+ case kRiscvWord64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
+ break; \
+ case kRiscvWord64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
+ break; \
+ case kRiscvWord64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add64)
+ ATOMIC_BINOP_CASE(Sub, Sub64)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+ case kRiscvAssertEqual:
+ __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
+ i.InputRegister(0), Operand(i.InputRegister(1)));
+ break;
+
+ default:
+ UNIMPLEMENTED();
+ }
+ return kSuccess;
+} // NOLINT(readability/fn_size)
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
+ UNIMPLEMENTED();
+
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ tasm->
+ RiscvOperandConverter i(gen, instr);
+
+ Condition cc = kNoCondition;
+ // RISC-V does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit riscv64 pseudo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare pseudo-op are not modified before this branch op, as
+ // they are tested here.
+
+ if (instr->arch_opcode() == kRiscvTst) {
+ cc = FlagsConditionToConditionTst(condition);
+ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kRiscvAdd64 ||
+ instr->arch_opcode() == kRiscvSub64) {
+ cc = FlagsConditionToConditionOvf(condition);
+ __ Sra64(kScratchReg, i.OutputRegister(), 32);
+ __ Sra64(kScratchReg2, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kRiscvAddOvf64 ||
+ instr->arch_opcode() == kRiscvSubOvf64) {
+ switch (condition) {
+ // Overflow occurs if overflow register is negative
+ case kOverflow:
+ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kRiscvMulOvf32) {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(kRiscvMulOvf32, condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kRiscvCmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(0);
+ __ Sub64(lhs_register, sp, offset);
+ }
+ __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0)));
+ } else if (instr->arch_opcode() == kRiscvCmpS ||
+ instr->arch_opcode() == kRiscvCmpD) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ // floating-point compare result is set in kScratchReg
+ if (predicate) {
+ __ BranchTrueF(kScratchReg, tlabel);
+ } else {
+ __ BranchFalseF(kScratchReg, tlabel);
+ }
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ tasm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
+}
+
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ RiscvOperandConverter i(this, instr);
+ condition = NegateFlagsCondition(condition);
+
+ switch (instr->arch_opcode()) {
+ case kRiscvCmp: {
+ __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
+ FlagsConditionToConditionCmp(condition));
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
+ }
+ return;
+ case kRiscvTst: {
+ switch (condition) {
+ case kEqual:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ case kNotEqual:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return;
+ case kRiscvAdd64:
+ case kRiscvSub64: {
+ // Check for overflow creates 1 or 0 for result.
+ __ Srl64(kScratchReg, i.OutputRegister(), 63);
+ __ Srl32(kScratchReg2, i.OutputRegister(), 31);
+ __ Xor(kScratchReg2, kScratchReg, kScratchReg2);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg2);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kRiscvAddOvf64:
+ case kRiscvSubOvf64: {
+ // Overflow occurs if overflow register is negative
+ __ Slt(kScratchReg2, kScratchReg, zero_reg);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg2);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kRiscvMulOvf32: {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kRiscvCmpS:
+ case kRiscvCmpD: {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ if (predicate) {
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
+ } else {
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ }
+ }
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+#undef UNSUPPORTED_COND
+
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
+ void Generate() final {
+ RiscvOperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+ }
+ }
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+}
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ RiscvOperandConverter i(this, instr);
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ // RISC-V does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit riscv64 pseudo-instructions, which are checked and handled here.
+
+ if (instr->arch_opcode() == kRiscvTst) {
+ cc = FlagsConditionToConditionTst(condition);
+ if (cc == eq) {
+ __ Sltu(result, kScratchReg, 1);
+ } else {
+ __ Sltu(result, zero_reg, kScratchReg);
+ }
+ return;
+ } else if (instr->arch_opcode() == kRiscvAdd64 ||
+ instr->arch_opcode() == kRiscvSub64) {
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ Srl64(kScratchReg, i.OutputRegister(), 63);
+ __ Srl32(kScratchReg2, i.OutputRegister(), 31);
+ __ Xor(result, kScratchReg, kScratchReg2);
+ if (cc == eq) // Toggle result for not overflow.
+ __ Xor(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kRiscvAddOvf64 ||
+ instr->arch_opcode() == kRiscvSubOvf64) {
+ // Overflow occurs if overflow register is negative
+ __ Slt(result, kScratchReg, zero_reg);
+ } else if (instr->arch_opcode() == kRiscvMulOvf32) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, kScratchReg, zero_reg);
+ } else if (instr->arch_opcode() == kRiscvCmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ if (instr->InputAt(1)->IsImmediate()) {
+ if (is_int12(-right.immediate())) {
+ if (right.immediate() == 0) {
+ if (cc == eq) {
+ __ Sltu(result, left, 1);
+ } else {
+ __ Sltu(result, zero_reg, left);
+ }
+ } else {
+ __ Add64(result, left, Operand(-right.immediate()));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ if (is_uint12(right.immediate())) {
+ __ Xor(result, left, right);
+ } else {
+ __ li(kScratchReg, right);
+ __ Xor(result, left, kScratchReg);
+ }
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, right);
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case Uless:
+ case Ugreater_equal: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == Ugreater_equal) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case Ugreater:
+ case Uless_equal: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == Uless_equal) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kRiscvCmpD ||
+ instr->arch_opcode() == kRiscvCmpS) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((instr->arch_opcode() == kRiscvCmpD) &&
+ (left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0);
+ } else if ((instr->arch_opcode() == kRiscvCmpS) &&
+ (left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsSingleZeroRegSet()) {
+ __ LoadFPRImmediate(kDoubleRegZero, 0.0f);
+ }
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ // RISCV compare returns 0 or 1, do nothing when predicate; otherwise
+ // toggle kScratchReg (i.e., 0 -> 1, 1 -> 0)
+ if (predicate) {
+ __ Move(result, kScratchReg);
+ } else {
+ __ Xor(result, kScratchReg, 1);
+ }
+ return;
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+}
+
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ RiscvOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ RiscvOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+
+ __ Branch(GetLabel(i.InputRpo(1)), Ugreater_equal, input,
+ Operand(case_count));
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
+}
+
+void CodeGenerator::FinishFrame(Frame* frame) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kSystemPointerSize));
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation(saves);
+ DCHECK_EQ(kNumCalleeSaved, count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (call_descriptor->IsCFunctionCall()) {
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Sub64(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ Move(fp, sp);
+ }
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+ if (call_descriptor->IsWasmFunctionCall()) {
+ __ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
+ // Wasm import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ Ld(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ Ld(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Sub64(sp, sp, Operand(kSystemPointerSize));
+ }
+ }
+ }
+ }
+
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ ResetSpeculationPoison();
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+ if (required_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
+ if (info()->IsWasm() && required_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
+ __ Ld(
+ kScratchReg,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ Ld(kScratchReg, MemOperand(kScratchReg));
+ __ Add64(kScratchReg, kScratchReg,
+ Operand(required_slots * kSystemPointerSize));
+ __ Branch(&done, uge, sp, Operand(kScratchReg));
+ }
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
+ RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+
+ __ bind(&done);
+ }
+ }
+
+ const int returns = frame()->GetReturnSlotCount();
+
+ // Skip callee-saved and return slots, which are pushed below.
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Sub64(sp, sp, Operand(required_slots * kSystemPointerSize));
+ }
+
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ }
+
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
+ }
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Sub64(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+}
+
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Add64(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+
+ // Restore GP registers.
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ RiscvOperandConverter g(this, nullptr);
+
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+
+ if (call_descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ }
+ }
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
+ }
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Add64(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ Label done;
+ __ li(kScratchReg, parameter_count);
+ __ Branch(&done, ge, t0, Operand(kScratchReg));
+ __ Move(t0, kScratchReg);
+ __ bind(&done);
+ }
+ __ Sll64(t0, t0, kSystemPointerSizeLog2);
+ __ Add64(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ // it should be a kInt32 or a kInt64
+ DCHECK_LE(g.ToConstant(additional_pop_count).type(), Constant::kInt64);
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
+ } else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_count);
+ __ Sll64(pop_reg, pop_reg, kSystemPointerSizeLog2);
+ __ Add64(sp, sp, pop_reg);
+ }
+ __ Ret();
+}
+
+void CodeGenerator::FinishCode() {}
+
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ RiscvOperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Move(g.ToRegister(destination), src);
+ } else {
+ __ Sd(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ld(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ Ld(temp, src);
+ __ Sd(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
+ break;
+ case Constant::kInt64:
+ if (RelocInfo::IsWasmReference(src.rmode())) {
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt64()));
+ }
+ break;
+ case Constant::kFloat64:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, src.ToExternalReference());
+ break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers
+ break;
+ }
+ if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsFPStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ Sw(zero_reg, dst);
+ } else {
+ __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ Sw(kScratchReg, dst);
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ LoadFPRImmediate(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ LoadFPRImmediate(dst, src.ToFloat64().value());
+ if (destination->IsFPStackSlot()) {
+ __ StoreDouble(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsFPRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ if (rep == MachineRepresentation::kFloat32) {
+ __ StoreFloat(src, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ StoreDouble(src, g.ToMemOperand(destination));
+ }
+ }
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ if (destination->IsFPRegister()) {
+ if (rep == MachineRepresentation::kFloat32) {
+ __ LoadFloat(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ LoadDouble(g.ToDoubleRegister(destination), src);
+ }
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ if (rep == MachineRepresentation::kFloat32) {
+ __ LoadFloat(temp, src);
+ __ StoreFloat(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ LoadDouble(temp, src);
+ __ StoreDouble(temp, g.ToMemOperand(destination));
+ }
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ RiscvOperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ Ld(src, dst);
+ __ Sd(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = kScratchReg2;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Ld(temp_0, src);
+ __ Ld(temp_1, dst);
+ __ Sd(temp_0, dst);
+ __ Sd(temp_1, src);
+ } else if (source->IsFPRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ if (rep == MachineRepresentation::kFloat32) {
+ __ MoveFloat(temp, src);
+ __ LoadFloat(src, dst);
+ __ StoreFloat(temp, dst);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ MoveDouble(temp, src);
+ __ LoadDouble(src, dst);
+ __ StoreDouble(temp, dst);
+ }
+ }
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
+ Register temp_0 = kScratchReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ UNIMPLEMENTED();
+ } else {
+ FPURegister temp_1 = kScratchDoubleReg;
+ if (rep == MachineRepresentation::kFloat32) {
+ __ LoadFloat(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ StoreFloat(temp_1, src0);
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kFloat64);
+ __ LoadDouble(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ StoreDouble(temp_1, src0);
+ }
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit RISC-V we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
new file mode 100644
index 0000000000..fae854ec02
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -0,0 +1,447 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
+#define V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// RISC-V-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(RiscvAdd32) \
+ V(RiscvAdd64) \
+ V(RiscvAddOvf64) \
+ V(RiscvSub32) \
+ V(RiscvSub64) \
+ V(RiscvSubOvf64) \
+ V(RiscvMul32) \
+ V(RiscvMulOvf32) \
+ V(RiscvMulHigh32) \
+ V(RiscvMulHigh64) \
+ V(RiscvMulHighU32) \
+ V(RiscvMul64) \
+ V(RiscvDiv32) \
+ V(RiscvDiv64) \
+ V(RiscvDivU32) \
+ V(RiscvDivU64) \
+ V(RiscvMod32) \
+ V(RiscvMod64) \
+ V(RiscvModU32) \
+ V(RiscvModU64) \
+ V(RiscvAnd) \
+ V(RiscvAnd32) \
+ V(RiscvOr) \
+ V(RiscvOr32) \
+ V(RiscvNor) \
+ V(RiscvNor32) \
+ V(RiscvXor) \
+ V(RiscvXor32) \
+ V(RiscvClz32) \
+ V(RiscvShl32) \
+ V(RiscvShr32) \
+ V(RiscvSar32) \
+ V(RiscvZeroExtendWord) \
+ V(RiscvSignExtendWord) \
+ V(RiscvClz64) \
+ V(RiscvCtz32) \
+ V(RiscvCtz64) \
+ V(RiscvPopcnt32) \
+ V(RiscvPopcnt64) \
+ V(RiscvShl64) \
+ V(RiscvShr64) \
+ V(RiscvSar64) \
+ V(RiscvRor32) \
+ V(RiscvRor64) \
+ V(RiscvMov) \
+ V(RiscvTst) \
+ V(RiscvCmp) \
+ V(RiscvCmpS) \
+ V(RiscvAddS) \
+ V(RiscvSubS) \
+ V(RiscvMulS) \
+ V(RiscvDivS) \
+ V(RiscvModS) \
+ V(RiscvAbsS) \
+ V(RiscvNegS) \
+ V(RiscvSqrtS) \
+ V(RiscvMaxS) \
+ V(RiscvMinS) \
+ V(RiscvCmpD) \
+ V(RiscvAddD) \
+ V(RiscvSubD) \
+ V(RiscvMulD) \
+ V(RiscvDivD) \
+ V(RiscvModD) \
+ V(RiscvAbsD) \
+ V(RiscvNegD) \
+ V(RiscvSqrtD) \
+ V(RiscvMaxD) \
+ V(RiscvMinD) \
+ V(RiscvFloat64RoundDown) \
+ V(RiscvFloat64RoundTruncate) \
+ V(RiscvFloat64RoundUp) \
+ V(RiscvFloat64RoundTiesEven) \
+ V(RiscvFloat32RoundDown) \
+ V(RiscvFloat32RoundTruncate) \
+ V(RiscvFloat32RoundUp) \
+ V(RiscvFloat32RoundTiesEven) \
+ V(RiscvCvtSD) \
+ V(RiscvCvtDS) \
+ V(RiscvTruncWD) \
+ V(RiscvRoundWD) \
+ V(RiscvFloorWD) \
+ V(RiscvCeilWD) \
+ V(RiscvTruncWS) \
+ V(RiscvRoundWS) \
+ V(RiscvFloorWS) \
+ V(RiscvCeilWS) \
+ V(RiscvTruncLS) \
+ V(RiscvTruncLD) \
+ V(RiscvTruncUwD) \
+ V(RiscvTruncUwS) \
+ V(RiscvTruncUlS) \
+ V(RiscvTruncUlD) \
+ V(RiscvCvtDW) \
+ V(RiscvCvtSL) \
+ V(RiscvCvtSW) \
+ V(RiscvCvtSUw) \
+ V(RiscvCvtSUl) \
+ V(RiscvCvtDL) \
+ V(RiscvCvtDUw) \
+ V(RiscvCvtDUl) \
+ V(RiscvLb) \
+ V(RiscvLbu) \
+ V(RiscvSb) \
+ V(RiscvLh) \
+ V(RiscvUlh) \
+ V(RiscvLhu) \
+ V(RiscvUlhu) \
+ V(RiscvSh) \
+ V(RiscvUsh) \
+ V(RiscvLd) \
+ V(RiscvUld) \
+ V(RiscvLw) \
+ V(RiscvUlw) \
+ V(RiscvLwu) \
+ V(RiscvUlwu) \
+ V(RiscvSw) \
+ V(RiscvUsw) \
+ V(RiscvSd) \
+ V(RiscvUsd) \
+ V(RiscvLoadFloat) \
+ V(RiscvULoadFloat) \
+ V(RiscvStoreFloat) \
+ V(RiscvUStoreFloat) \
+ V(RiscvLoadDouble) \
+ V(RiscvULoadDouble) \
+ V(RiscvStoreDouble) \
+ V(RiscvUStoreDouble) \
+ V(RiscvBitcastDL) \
+ V(RiscvBitcastLD) \
+ V(RiscvBitcastInt32ToFloat32) \
+ V(RiscvBitcastFloat32ToInt32) \
+ V(RiscvFloat64ExtractLowWord32) \
+ V(RiscvFloat64ExtractHighWord32) \
+ V(RiscvFloat64InsertLowWord32) \
+ V(RiscvFloat64InsertHighWord32) \
+ V(RiscvFloat32Max) \
+ V(RiscvFloat64Max) \
+ V(RiscvFloat32Min) \
+ V(RiscvFloat64Min) \
+ V(RiscvFloat64SilenceNaN) \
+ V(RiscvPush) \
+ V(RiscvPeek) \
+ V(RiscvByteSwap64) \
+ V(RiscvByteSwap32) \
+ V(RiscvStoreToStackSlot) \
+ V(RiscvStackClaim) \
+ V(RiscvSignExtendByte) \
+ V(RiscvSignExtendShort) \
+ V(RiscvSync) \
+ V(RiscvAssertEqual) \
+ V(RiscvS128Const) \
+ V(RiscvS128Zero) \
+ V(RiscvS128AllOnes) \
+ V(RiscvI32x4Splat) \
+ V(RiscvI32x4ExtractLane) \
+ V(RiscvI32x4ReplaceLane) \
+ V(RiscvI32x4Add) \
+ V(RiscvI32x4AddHoriz) \
+ V(RiscvI32x4Sub) \
+ V(RiscvF64x2Abs) \
+ V(RiscvF64x2Neg) \
+ V(RiscvF32x4Splat) \
+ V(RiscvF32x4ExtractLane) \
+ V(RiscvF32x4ReplaceLane) \
+ V(RiscvF32x4SConvertI32x4) \
+ V(RiscvF32x4UConvertI32x4) \
+ V(RiscvI64x2SConvertI32x4Low) \
+ V(RiscvI64x2SConvertI32x4High) \
+ V(RiscvI64x2UConvertI32x4Low) \
+ V(RiscvI64x2UConvertI32x4High) \
+ V(RiscvI32x4Mul) \
+ V(RiscvI32x4MaxS) \
+ V(RiscvI32x4MinS) \
+ V(RiscvI32x4Eq) \
+ V(RiscvI32x4Ne) \
+ V(RiscvI32x4Shl) \
+ V(RiscvI32x4ShrS) \
+ V(RiscvI32x4ShrU) \
+ V(RiscvI32x4MaxU) \
+ V(RiscvI32x4MinU) \
+ V(RiscvI64x2Eq) \
+ V(RiscvF64x2Sqrt) \
+ V(RiscvF64x2Add) \
+ V(RiscvF64x2Sub) \
+ V(RiscvF64x2Mul) \
+ V(RiscvF64x2Div) \
+ V(RiscvF64x2Min) \
+ V(RiscvF64x2Max) \
+ V(RiscvF64x2ConvertLowI32x4S) \
+ V(RiscvF64x2ConvertLowI32x4U) \
+ V(RiscvF64x2PromoteLowF32x4) \
+ V(RiscvF64x2Eq) \
+ V(RiscvF64x2Ne) \
+ V(RiscvF64x2Lt) \
+ V(RiscvF64x2Le) \
+ V(RiscvF64x2Splat) \
+ V(RiscvF64x2ExtractLane) \
+ V(RiscvF64x2ReplaceLane) \
+ V(RiscvF64x2Pmin) \
+ V(RiscvF64x2Pmax) \
+ V(RiscvF64x2Ceil) \
+ V(RiscvF64x2Floor) \
+ V(RiscvF64x2Trunc) \
+ V(RiscvF64x2NearestInt) \
+ V(RiscvI64x2Splat) \
+ V(RiscvI64x2ExtractLane) \
+ V(RiscvI64x2ReplaceLane) \
+ V(RiscvI64x2Add) \
+ V(RiscvI64x2Sub) \
+ V(RiscvI64x2Mul) \
+ V(RiscvI64x2Neg) \
+ V(RiscvI64x2Shl) \
+ V(RiscvI64x2ShrS) \
+ V(RiscvI64x2ShrU) \
+ V(RiscvI64x2BitMask) \
+ V(RiscvF32x4Abs) \
+ V(RiscvF32x4Neg) \
+ V(RiscvF32x4Sqrt) \
+ V(RiscvF32x4RecipApprox) \
+ V(RiscvF32x4RecipSqrtApprox) \
+ V(RiscvF32x4Add) \
+ V(RiscvF32x4AddHoriz) \
+ V(RiscvF32x4Sub) \
+ V(RiscvF32x4Mul) \
+ V(RiscvF32x4Div) \
+ V(RiscvF32x4Max) \
+ V(RiscvF32x4Min) \
+ V(RiscvF32x4Eq) \
+ V(RiscvF32x4Ne) \
+ V(RiscvF32x4Lt) \
+ V(RiscvF32x4Le) \
+ V(RiscvF32x4Pmin) \
+ V(RiscvF32x4Pmax) \
+ V(RiscvF32x4DemoteF64x2Zero) \
+ V(RiscvF32x4Ceil) \
+ V(RiscvF32x4Floor) \
+ V(RiscvF32x4Trunc) \
+ V(RiscvF32x4NearestInt) \
+ V(RiscvI32x4SConvertF32x4) \
+ V(RiscvI32x4UConvertF32x4) \
+ V(RiscvI32x4Neg) \
+ V(RiscvI32x4GtS) \
+ V(RiscvI32x4GeS) \
+ V(RiscvI32x4GtU) \
+ V(RiscvI32x4GeU) \
+ V(RiscvI32x4Abs) \
+ V(RiscvI32x4BitMask) \
+ V(RiscvI32x4DotI16x8S) \
+ V(RiscvI32x4TruncSatF64x2SZero) \
+ V(RiscvI32x4TruncSatF64x2UZero) \
+ V(RiscvI16x8Splat) \
+ V(RiscvI16x8ExtractLaneU) \
+ V(RiscvI16x8ExtractLaneS) \
+ V(RiscvI16x8ReplaceLane) \
+ V(RiscvI16x8Neg) \
+ V(RiscvI16x8Shl) \
+ V(RiscvI16x8ShrS) \
+ V(RiscvI16x8ShrU) \
+ V(RiscvI16x8Add) \
+ V(RiscvI16x8AddSatS) \
+ V(RiscvI16x8AddHoriz) \
+ V(RiscvI16x8Sub) \
+ V(RiscvI16x8SubSatS) \
+ V(RiscvI16x8Mul) \
+ V(RiscvI16x8MaxS) \
+ V(RiscvI16x8MinS) \
+ V(RiscvI16x8Eq) \
+ V(RiscvI16x8Ne) \
+ V(RiscvI16x8GtS) \
+ V(RiscvI16x8GeS) \
+ V(RiscvI16x8AddSatU) \
+ V(RiscvI16x8SubSatU) \
+ V(RiscvI16x8MaxU) \
+ V(RiscvI16x8MinU) \
+ V(RiscvI16x8GtU) \
+ V(RiscvI16x8GeU) \
+ V(RiscvI16x8RoundingAverageU) \
+ V(RiscvI16x8Q15MulRSatS) \
+ V(RiscvI16x8Abs) \
+ V(RiscvI16x8BitMask) \
+ V(RiscvI8x16Splat) \
+ V(RiscvI8x16ExtractLaneU) \
+ V(RiscvI8x16ExtractLaneS) \
+ V(RiscvI8x16ReplaceLane) \
+ V(RiscvI8x16Neg) \
+ V(RiscvI8x16Shl) \
+ V(RiscvI8x16ShrS) \
+ V(RiscvI8x16Add) \
+ V(RiscvI8x16AddSatS) \
+ V(RiscvI8x16Sub) \
+ V(RiscvI8x16SubSatS) \
+ V(RiscvI8x16Mul) \
+ V(RiscvI8x16MaxS) \
+ V(RiscvI8x16MinS) \
+ V(RiscvI8x16Eq) \
+ V(RiscvI8x16Ne) \
+ V(RiscvI8x16GtS) \
+ V(RiscvI8x16GeS) \
+ V(RiscvI8x16ShrU) \
+ V(RiscvI8x16AddSatU) \
+ V(RiscvI8x16SubSatU) \
+ V(RiscvI8x16MaxU) \
+ V(RiscvI8x16MinU) \
+ V(RiscvI8x16GtU) \
+ V(RiscvI8x16GeU) \
+ V(RiscvI8x16RoundingAverageU) \
+ V(RiscvI8x16Abs) \
+ V(RiscvI8x16BitMask) \
+ V(RiscvI8x16Popcnt) \
+ V(RiscvS128And) \
+ V(RiscvS128Or) \
+ V(RiscvS128Xor) \
+ V(RiscvS128Not) \
+ V(RiscvS128Select) \
+ V(RiscvS128AndNot) \
+ V(RiscvV32x4AllTrue) \
+ V(RiscvV16x8AllTrue) \
+ V(RiscvV128AnyTrue) \
+ V(RiscvV8x16AllTrue) \
+ V(RiscvS32x4InterleaveRight) \
+ V(RiscvS32x4InterleaveLeft) \
+ V(RiscvS32x4PackEven) \
+ V(RiscvS32x4PackOdd) \
+ V(RiscvS32x4InterleaveEven) \
+ V(RiscvS32x4InterleaveOdd) \
+ V(RiscvS32x4Shuffle) \
+ V(RiscvS16x8InterleaveRight) \
+ V(RiscvS16x8InterleaveLeft) \
+ V(RiscvS16x8PackEven) \
+ V(RiscvS16x8PackOdd) \
+ V(RiscvS16x8InterleaveEven) \
+ V(RiscvS16x8InterleaveOdd) \
+ V(RiscvS16x4Reverse) \
+ V(RiscvS16x2Reverse) \
+ V(RiscvS8x16InterleaveRight) \
+ V(RiscvS8x16InterleaveLeft) \
+ V(RiscvS8x16PackEven) \
+ V(RiscvS8x16PackOdd) \
+ V(RiscvS8x16InterleaveEven) \
+ V(RiscvS8x16InterleaveOdd) \
+ V(RiscvS8x16Shuffle) \
+ V(RiscvI8x16Swizzle) \
+ V(RiscvS8x16Concat) \
+ V(RiscvS8x8Reverse) \
+ V(RiscvS8x4Reverse) \
+ V(RiscvS8x2Reverse) \
+ V(RiscvS128Load8Splat) \
+ V(RiscvS128Load16Splat) \
+ V(RiscvS128Load32Splat) \
+ V(RiscvS128Load64Splat) \
+ V(RiscvS128Load8x8S) \
+ V(RiscvS128Load8x8U) \
+ V(RiscvS128Load16x4S) \
+ V(RiscvS128Load16x4U) \
+ V(RiscvS128Load32x2S) \
+ V(RiscvS128Load32x2U) \
+ V(RiscvS128LoadLane) \
+ V(RiscvS128StoreLane) \
+ V(RiscvMsaLd) \
+ V(RiscvMsaSt) \
+ V(RiscvI32x4SConvertI16x8Low) \
+ V(RiscvI32x4SConvertI16x8High) \
+ V(RiscvI32x4UConvertI16x8Low) \
+ V(RiscvI32x4UConvertI16x8High) \
+ V(RiscvI16x8SConvertI8x16Low) \
+ V(RiscvI16x8SConvertI8x16High) \
+ V(RiscvI16x8SConvertI32x4) \
+ V(RiscvI16x8UConvertI32x4) \
+ V(RiscvI16x8UConvertI8x16Low) \
+ V(RiscvI16x8UConvertI8x16High) \
+ V(RiscvI8x16SConvertI16x8) \
+ V(RiscvI8x16UConvertI16x8) \
+ V(RiscvWord64AtomicLoadUint8) \
+ V(RiscvWord64AtomicLoadUint16) \
+ V(RiscvWord64AtomicLoadUint32) \
+ V(RiscvWord64AtomicLoadUint64) \
+ V(RiscvWord64AtomicStoreWord8) \
+ V(RiscvWord64AtomicStoreWord16) \
+ V(RiscvWord64AtomicStoreWord32) \
+ V(RiscvWord64AtomicStoreWord64) \
+ V(RiscvWord64AtomicAddUint8) \
+ V(RiscvWord64AtomicAddUint16) \
+ V(RiscvWord64AtomicAddUint32) \
+ V(RiscvWord64AtomicAddUint64) \
+ V(RiscvWord64AtomicSubUint8) \
+ V(RiscvWord64AtomicSubUint16) \
+ V(RiscvWord64AtomicSubUint32) \
+ V(RiscvWord64AtomicSubUint64) \
+ V(RiscvWord64AtomicAndUint8) \
+ V(RiscvWord64AtomicAndUint16) \
+ V(RiscvWord64AtomicAndUint32) \
+ V(RiscvWord64AtomicAndUint64) \
+ V(RiscvWord64AtomicOrUint8) \
+ V(RiscvWord64AtomicOrUint16) \
+ V(RiscvWord64AtomicOrUint32) \
+ V(RiscvWord64AtomicOrUint64) \
+ V(RiscvWord64AtomicXorUint8) \
+ V(RiscvWord64AtomicXorUint16) \
+ V(RiscvWord64AtomicXorUint32) \
+ V(RiscvWord64AtomicXorUint64) \
+ V(RiscvWord64AtomicExchangeUint8) \
+ V(RiscvWord64AtomicExchangeUint16) \
+ V(RiscvWord64AtomicExchangeUint32) \
+ V(RiscvWord64AtomicExchangeUint64) \
+ V(RiscvWord64AtomicCompareExchangeUint8) \
+ V(RiscvWord64AtomicCompareExchangeUint16) \
+ V(RiscvWord64AtomicCompareExchangeUint32) \
+ V(RiscvWord64AtomicCompareExchangeUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_RISCV64_INSTRUCTION_CODES_RISCV64_H_
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
new file mode 100644
index 0000000000..fdc1346902
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -0,0 +1,1579 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kRiscvAbsD:
+ case kRiscvAbsS:
+ case kRiscvAdd32:
+ case kRiscvAddD:
+ case kRiscvAddS:
+ case kRiscvAnd:
+ case kRiscvAnd32:
+ case kRiscvAssertEqual:
+ case kRiscvBitcastDL:
+ case kRiscvBitcastLD:
+ case kRiscvBitcastInt32ToFloat32:
+ case kRiscvBitcastFloat32ToInt32:
+ case kRiscvByteSwap32:
+ case kRiscvByteSwap64:
+ case kRiscvCeilWD:
+ case kRiscvCeilWS:
+ case kRiscvClz32:
+ case kRiscvCmp:
+ case kRiscvCmpD:
+ case kRiscvCmpS:
+ case kRiscvCtz32:
+ case kRiscvCvtDL:
+ case kRiscvCvtDS:
+ case kRiscvCvtDUl:
+ case kRiscvCvtDUw:
+ case kRiscvCvtDW:
+ case kRiscvCvtSD:
+ case kRiscvCvtSL:
+ case kRiscvCvtSUl:
+ case kRiscvCvtSUw:
+ case kRiscvCvtSW:
+ case kRiscvMulHigh64:
+ case kRiscvMulHighU32:
+ case kRiscvAdd64:
+ case kRiscvAddOvf64:
+ case kRiscvClz64:
+ case kRiscvCtz64:
+ case kRiscvDiv64:
+ case kRiscvDivU64:
+ case kRiscvZeroExtendWord:
+ case kRiscvSignExtendWord:
+ case kRiscvDiv32:
+ case kRiscvDivD:
+ case kRiscvDivS:
+ case kRiscvDivU32:
+ case kRiscvMod64:
+ case kRiscvModU64:
+ case kRiscvMul64:
+ case kRiscvPopcnt64:
+ case kRiscvRor64:
+ case kRiscvSar64:
+ case kRiscvShl64:
+ case kRiscvShr64:
+ case kRiscvSub64:
+ case kRiscvSubOvf64:
+ case kRiscvF64x2Abs:
+ case kRiscvF64x2Neg:
+ case kRiscvF64x2Sqrt:
+ case kRiscvF64x2Add:
+ case kRiscvF64x2Sub:
+ case kRiscvF64x2Mul:
+ case kRiscvF64x2Div:
+ case kRiscvF64x2Min:
+ case kRiscvF64x2Max:
+ case kRiscvF64x2Eq:
+ case kRiscvF64x2Ne:
+ case kRiscvF64x2Lt:
+ case kRiscvF64x2Le:
+ case kRiscvF64x2Pmin:
+ case kRiscvF64x2Pmax:
+ case kRiscvF64x2ConvertLowI32x4S:
+ case kRiscvF64x2ConvertLowI32x4U:
+ case kRiscvF64x2PromoteLowF32x4:
+ case kRiscvF64x2Ceil:
+ case kRiscvF64x2Floor:
+ case kRiscvF64x2Trunc:
+ case kRiscvF64x2NearestInt:
+ case kRiscvI64x2Splat:
+ case kRiscvI64x2ExtractLane:
+ case kRiscvI64x2ReplaceLane:
+ case kRiscvI64x2Add:
+ case kRiscvI64x2Sub:
+ case kRiscvI64x2Mul:
+ case kRiscvI64x2Neg:
+ case kRiscvI64x2Shl:
+ case kRiscvI64x2ShrS:
+ case kRiscvI64x2ShrU:
+ case kRiscvI64x2BitMask:
+ case kRiscvF32x4Abs:
+ case kRiscvF32x4Add:
+ case kRiscvF32x4AddHoriz:
+ case kRiscvF32x4Eq:
+ case kRiscvF32x4ExtractLane:
+ case kRiscvF32x4Lt:
+ case kRiscvF32x4Le:
+ case kRiscvF32x4Max:
+ case kRiscvF32x4Min:
+ case kRiscvF32x4Mul:
+ case kRiscvF32x4Div:
+ case kRiscvF32x4Ne:
+ case kRiscvF32x4Neg:
+ case kRiscvF32x4Sqrt:
+ case kRiscvF32x4RecipApprox:
+ case kRiscvF32x4RecipSqrtApprox:
+ case kRiscvF32x4ReplaceLane:
+ case kRiscvF32x4SConvertI32x4:
+ case kRiscvF32x4Splat:
+ case kRiscvF32x4Sub:
+ case kRiscvF32x4UConvertI32x4:
+ case kRiscvF32x4Pmin:
+ case kRiscvF32x4Pmax:
+ case kRiscvF32x4DemoteF64x2Zero:
+ case kRiscvF32x4Ceil:
+ case kRiscvF32x4Floor:
+ case kRiscvF32x4Trunc:
+ case kRiscvF32x4NearestInt:
+ case kRiscvI64x2Eq:
+ case kRiscvF64x2Splat:
+ case kRiscvF64x2ExtractLane:
+ case kRiscvF64x2ReplaceLane:
+ case kRiscvFloat32Max:
+ case kRiscvFloat32Min:
+ case kRiscvFloat32RoundDown:
+ case kRiscvFloat32RoundTiesEven:
+ case kRiscvFloat32RoundTruncate:
+ case kRiscvFloat32RoundUp:
+ case kRiscvFloat64ExtractLowWord32:
+ case kRiscvFloat64ExtractHighWord32:
+ case kRiscvFloat64InsertLowWord32:
+ case kRiscvFloat64InsertHighWord32:
+ case kRiscvFloat64Max:
+ case kRiscvFloat64Min:
+ case kRiscvFloat64RoundDown:
+ case kRiscvFloat64RoundTiesEven:
+ case kRiscvFloat64RoundTruncate:
+ case kRiscvFloat64RoundUp:
+ case kRiscvFloat64SilenceNaN:
+ case kRiscvFloorWD:
+ case kRiscvFloorWS:
+ case kRiscvI64x2SConvertI32x4Low:
+ case kRiscvI64x2SConvertI32x4High:
+ case kRiscvI64x2UConvertI32x4Low:
+ case kRiscvI64x2UConvertI32x4High:
+ case kRiscvI16x8Add:
+ case kRiscvI16x8AddHoriz:
+ case kRiscvI16x8AddSatS:
+ case kRiscvI16x8AddSatU:
+ case kRiscvI16x8Eq:
+ case kRiscvI16x8ExtractLaneU:
+ case kRiscvI16x8ExtractLaneS:
+ case kRiscvI16x8GeS:
+ case kRiscvI16x8GeU:
+ case kRiscvI16x8GtS:
+ case kRiscvI16x8GtU:
+ case kRiscvI16x8MaxS:
+ case kRiscvI16x8MaxU:
+ case kRiscvI16x8MinS:
+ case kRiscvI16x8MinU:
+ case kRiscvI16x8Mul:
+ case kRiscvI16x8Ne:
+ case kRiscvI16x8Neg:
+ case kRiscvI16x8ReplaceLane:
+ case kRiscvI8x16SConvertI16x8:
+ case kRiscvI16x8SConvertI32x4:
+ case kRiscvI16x8SConvertI8x16High:
+ case kRiscvI16x8SConvertI8x16Low:
+ case kRiscvI16x8Shl:
+ case kRiscvI16x8ShrS:
+ case kRiscvI16x8ShrU:
+ case kRiscvI32x4TruncSatF64x2SZero:
+ case kRiscvI32x4TruncSatF64x2UZero:
+ case kRiscvI16x8Splat:
+ case kRiscvI16x8Sub:
+ case kRiscvI16x8SubSatS:
+ case kRiscvI16x8SubSatU:
+ case kRiscvI8x16UConvertI16x8:
+ case kRiscvI16x8UConvertI32x4:
+ case kRiscvI16x8UConvertI8x16High:
+ case kRiscvI16x8UConvertI8x16Low:
+ case kRiscvI16x8RoundingAverageU:
+ case kRiscvI16x8Q15MulRSatS:
+ case kRiscvI16x8Abs:
+ case kRiscvI16x8BitMask:
+ case kRiscvI32x4Add:
+ case kRiscvI32x4AddHoriz:
+ case kRiscvI32x4Eq:
+ case kRiscvI32x4ExtractLane:
+ case kRiscvI32x4GeS:
+ case kRiscvI32x4GeU:
+ case kRiscvI32x4GtS:
+ case kRiscvI32x4GtU:
+ case kRiscvI32x4MaxS:
+ case kRiscvI32x4MaxU:
+ case kRiscvI32x4MinS:
+ case kRiscvI32x4MinU:
+ case kRiscvI32x4Mul:
+ case kRiscvI32x4Ne:
+ case kRiscvI32x4Neg:
+ case kRiscvI32x4ReplaceLane:
+ case kRiscvI32x4SConvertF32x4:
+ case kRiscvI32x4SConvertI16x8High:
+ case kRiscvI32x4SConvertI16x8Low:
+ case kRiscvI32x4Shl:
+ case kRiscvI32x4ShrS:
+ case kRiscvI32x4ShrU:
+ case kRiscvI32x4Splat:
+ case kRiscvI32x4Sub:
+ case kRiscvI32x4UConvertF32x4:
+ case kRiscvI32x4UConvertI16x8High:
+ case kRiscvI32x4UConvertI16x8Low:
+ case kRiscvI32x4Abs:
+ case kRiscvI32x4BitMask:
+ case kRiscvI32x4DotI16x8S:
+ case kRiscvI8x16Add:
+ case kRiscvI8x16AddSatS:
+ case kRiscvI8x16AddSatU:
+ case kRiscvI8x16Eq:
+ case kRiscvI8x16ExtractLaneU:
+ case kRiscvI8x16ExtractLaneS:
+ case kRiscvI8x16GeS:
+ case kRiscvI8x16GeU:
+ case kRiscvI8x16GtS:
+ case kRiscvI8x16GtU:
+ case kRiscvI8x16MaxS:
+ case kRiscvI8x16MaxU:
+ case kRiscvI8x16MinS:
+ case kRiscvI8x16MinU:
+ case kRiscvI8x16Mul:
+ case kRiscvI8x16Ne:
+ case kRiscvI8x16Neg:
+ case kRiscvI8x16ReplaceLane:
+ case kRiscvI8x16Shl:
+ case kRiscvI8x16ShrS:
+ case kRiscvI8x16ShrU:
+ case kRiscvI8x16Splat:
+ case kRiscvI8x16Sub:
+ case kRiscvI8x16SubSatS:
+ case kRiscvI8x16SubSatU:
+ case kRiscvI8x16RoundingAverageU:
+ case kRiscvI8x16Abs:
+ case kRiscvI8x16BitMask:
+ case kRiscvI8x16Popcnt:
+ case kRiscvMaxD:
+ case kRiscvMaxS:
+ case kRiscvMinD:
+ case kRiscvMinS:
+ case kRiscvMod32:
+ case kRiscvModU32:
+ case kRiscvMov:
+ case kRiscvMul32:
+ case kRiscvMulD:
+ case kRiscvMulHigh32:
+ case kRiscvMulOvf32:
+ case kRiscvMulS:
+ case kRiscvNegD:
+ case kRiscvNegS:
+ case kRiscvNor:
+ case kRiscvNor32:
+ case kRiscvOr:
+ case kRiscvOr32:
+ case kRiscvPopcnt32:
+ case kRiscvRor32:
+ case kRiscvRoundWD:
+ case kRiscvRoundWS:
+ case kRiscvS128And:
+ case kRiscvS128Or:
+ case kRiscvS128Not:
+ case kRiscvS128Select:
+ case kRiscvS128AndNot:
+ case kRiscvS128Xor:
+ case kRiscvS128Const:
+ case kRiscvS128Zero:
+ case kRiscvS128AllOnes:
+ case kRiscvS16x8InterleaveEven:
+ case kRiscvS16x8InterleaveOdd:
+ case kRiscvS16x8InterleaveLeft:
+ case kRiscvS16x8InterleaveRight:
+ case kRiscvS16x8PackEven:
+ case kRiscvS16x8PackOdd:
+ case kRiscvS16x2Reverse:
+ case kRiscvS16x4Reverse:
+ case kRiscvV8x16AllTrue:
+ case kRiscvV32x4AllTrue:
+ case kRiscvV16x8AllTrue:
+ case kRiscvV128AnyTrue:
+ case kRiscvS32x4InterleaveEven:
+ case kRiscvS32x4InterleaveOdd:
+ case kRiscvS32x4InterleaveLeft:
+ case kRiscvS32x4InterleaveRight:
+ case kRiscvS32x4PackEven:
+ case kRiscvS32x4PackOdd:
+ case kRiscvS32x4Shuffle:
+ case kRiscvS8x16Concat:
+ case kRiscvS8x16InterleaveEven:
+ case kRiscvS8x16InterleaveOdd:
+ case kRiscvS8x16InterleaveLeft:
+ case kRiscvS8x16InterleaveRight:
+ case kRiscvS8x16PackEven:
+ case kRiscvS8x16PackOdd:
+ case kRiscvS8x2Reverse:
+ case kRiscvS8x4Reverse:
+ case kRiscvS8x8Reverse:
+ case kRiscvS8x16Shuffle:
+ case kRiscvI8x16Swizzle:
+ case kRiscvSar32:
+ case kRiscvSignExtendByte:
+ case kRiscvSignExtendShort:
+ case kRiscvShl32:
+ case kRiscvShr32:
+ case kRiscvSqrtD:
+ case kRiscvSqrtS:
+ case kRiscvSub32:
+ case kRiscvSubD:
+ case kRiscvSubS:
+ case kRiscvTruncLD:
+ case kRiscvTruncLS:
+ case kRiscvTruncUlD:
+ case kRiscvTruncUlS:
+ case kRiscvTruncUwD:
+ case kRiscvTruncUwS:
+ case kRiscvTruncWD:
+ case kRiscvTruncWS:
+ case kRiscvTst:
+ case kRiscvXor:
+ case kRiscvXor32:
+ return kNoOpcodeFlags;
+
+ case kRiscvLb:
+ case kRiscvLbu:
+ case kRiscvLd:
+ case kRiscvLoadDouble:
+ case kRiscvLh:
+ case kRiscvLhu:
+ case kRiscvLw:
+ case kRiscvLoadFloat:
+ case kRiscvLwu:
+ case kRiscvMsaLd:
+ case kRiscvPeek:
+ case kRiscvUld:
+ case kRiscvULoadDouble:
+ case kRiscvUlh:
+ case kRiscvUlhu:
+ case kRiscvUlw:
+ case kRiscvUlwu:
+ case kRiscvULoadFloat:
+ case kRiscvS128Load8Splat:
+ case kRiscvS128Load16Splat:
+ case kRiscvS128Load32Splat:
+ case kRiscvS128Load64Splat:
+ case kRiscvS128Load8x8S:
+ case kRiscvS128Load8x8U:
+ case kRiscvS128Load16x4S:
+ case kRiscvS128Load16x4U:
+ case kRiscvS128Load32x2S:
+ case kRiscvS128Load32x2U:
+ case kRiscvS128LoadLane:
+ case kRiscvS128StoreLane:
+ case kRiscvWord64AtomicLoadUint8:
+ case kRiscvWord64AtomicLoadUint16:
+ case kRiscvWord64AtomicLoadUint32:
+ case kRiscvWord64AtomicLoadUint64:
+
+ return kIsLoadOperation;
+
+ case kRiscvModD:
+ case kRiscvModS:
+ case kRiscvMsaSt:
+ case kRiscvPush:
+ case kRiscvSb:
+ case kRiscvSd:
+ case kRiscvStoreDouble:
+ case kRiscvSh:
+ case kRiscvStackClaim:
+ case kRiscvStoreToStackSlot:
+ case kRiscvSw:
+ case kRiscvStoreFloat:
+ case kRiscvUsd:
+ case kRiscvUStoreDouble:
+ case kRiscvUsh:
+ case kRiscvUsw:
+ case kRiscvUStoreFloat:
+ case kRiscvSync:
+ case kRiscvWord64AtomicStoreWord8:
+ case kRiscvWord64AtomicStoreWord16:
+ case kRiscvWord64AtomicStoreWord32:
+ case kRiscvWord64AtomicStoreWord64:
+ case kRiscvWord64AtomicAddUint8:
+ case kRiscvWord64AtomicAddUint16:
+ case kRiscvWord64AtomicAddUint32:
+ case kRiscvWord64AtomicAddUint64:
+ case kRiscvWord64AtomicSubUint8:
+ case kRiscvWord64AtomicSubUint16:
+ case kRiscvWord64AtomicSubUint32:
+ case kRiscvWord64AtomicSubUint64:
+ case kRiscvWord64AtomicAndUint8:
+ case kRiscvWord64AtomicAndUint16:
+ case kRiscvWord64AtomicAndUint32:
+ case kRiscvWord64AtomicAndUint64:
+ case kRiscvWord64AtomicOrUint8:
+ case kRiscvWord64AtomicOrUint16:
+ case kRiscvWord64AtomicOrUint32:
+ case kRiscvWord64AtomicOrUint64:
+ case kRiscvWord64AtomicXorUint8:
+ case kRiscvWord64AtomicXorUint16:
+ case kRiscvWord64AtomicXorUint32:
+ case kRiscvWord64AtomicXorUint64:
+ case kRiscvWord64AtomicExchangeUint8:
+ case kRiscvWord64AtomicExchangeUint16:
+ case kRiscvWord64AtomicExchangeUint32:
+ case kRiscvWord64AtomicExchangeUint64:
+ case kRiscvWord64AtomicCompareExchangeUint8:
+ case kRiscvWord64AtomicCompareExchangeUint16:
+ case kRiscvWord64AtomicCompareExchangeUint32:
+ case kRiscvWord64AtomicCompareExchangeUint64:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+enum Latency {
+ BRANCH = 4, // Estimated max.
+ RINT_S = 4, // Estimated.
+ RINT_D = 4, // Estimated.
+
+ // TODO(RISCV): remove MULT instructions (MIPS legacy).
+ MULT = 4,
+ MULTU = 4,
+ DMULT = 4,
+
+ MUL32 = 7,
+
+ DIV32 = 50, // Min:11 Max:50
+ DIV64 = 50,
+ DIVU32 = 50,
+ DIVU64 = 50,
+
+ ABS_S = 4,
+ ABS_D = 4,
+ NEG_S = 4,
+ NEG_D = 4,
+ ADD_S = 4,
+ ADD_D = 4,
+ SUB_S = 4,
+ SUB_D = 4,
+ MAX_S = 4, // Estimated.
+ MIN_S = 4,
+ MAX_D = 4, // Estimated.
+ MIN_D = 4,
+ C_cond_S = 4,
+ C_cond_D = 4,
+ MUL_S = 4,
+
+ MADD_S = 4,
+ MSUB_S = 4,
+ NMADD_S = 4,
+ NMSUB_S = 4,
+
+ CABS_cond_S = 4,
+ CABS_cond_D = 4,
+
+ CVT_D_S = 4,
+ CVT_PS_PW = 4,
+
+ CVT_S_W = 4,
+ CVT_S_L = 4,
+ CVT_D_W = 4,
+ CVT_D_L = 4,
+
+ CVT_S_D = 4,
+
+ CVT_W_S = 4,
+ CVT_W_D = 4,
+ CVT_L_S = 4,
+ CVT_L_D = 4,
+
+ CEIL_W_S = 4,
+ CEIL_W_D = 4,
+ CEIL_L_S = 4,
+ CEIL_L_D = 4,
+
+ FLOOR_W_S = 4,
+ FLOOR_W_D = 4,
+ FLOOR_L_S = 4,
+ FLOOR_L_D = 4,
+
+ ROUND_W_S = 4,
+ ROUND_W_D = 4,
+ ROUND_L_S = 4,
+ ROUND_L_D = 4,
+
+ TRUNC_W_S = 4,
+ TRUNC_W_D = 4,
+ TRUNC_L_S = 4,
+ TRUNC_L_D = 4,
+
+ MOV_S = 4,
+ MOV_D = 4,
+
+ MOVF_S = 4,
+ MOVF_D = 4,
+
+ MOVN_S = 4,
+ MOVN_D = 4,
+
+ MOVT_S = 4,
+ MOVT_D = 4,
+
+ MOVZ_S = 4,
+ MOVZ_D = 4,
+
+ MUL_D = 5,
+ MADD_D = 5,
+ MSUB_D = 5,
+ NMADD_D = 5,
+ NMSUB_D = 5,
+
+ RECIP_S = 13,
+ RECIP_D = 26,
+
+ RSQRT_S = 17,
+ RSQRT_D = 36,
+
+ DIV_S = 17,
+ SQRT_S = 17,
+
+ DIV_D = 32,
+ SQRT_D = 32,
+
+ MOVT_FREG = 4,
+ MOVT_HIGH_FREG = 4,
+ MOVT_DREG = 4,
+ LOAD_FLOAT = 4,
+ LOAD_DOUBLE = 4,
+
+ MOVF_FREG = 1,
+ MOVF_HIGH_FREG = 1,
+ MOVF_HIGH_DREG = 1,
+ MOVF_HIGH = 1,
+ MOVF_LOW = 1,
+ STORE_FLOAT = 1,
+ STORE_DOUBLE = 1,
+};
+
+int Add64Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int Sub64Latency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int AndLatency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int OrLatency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int NorLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int XorLatency(bool is_operand_register = true) {
+ return Add64Latency(is_operand_register);
+}
+
+int Mul32Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::MUL32;
+ } else {
+ return Latency::MUL32 + 1;
+ }
+}
+
+int Mul64Latency(bool is_operand_register = true) {
+ int latency = Latency::DMULT + Latency::MOVF_LOW;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mulh32Latency(bool is_operand_register = true) {
+ int latency = Latency::MULT + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mulhu32Latency(bool is_operand_register = true) {
+ int latency = Latency::MULTU + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mulh64Latency(bool is_operand_register = true) {
+ int latency = Latency::DMULT + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Div32Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::DIV32;
+ } else {
+ return Latency::DIV32 + 1;
+ }
+}
+
+int Divu32Latency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return Latency::DIVU32;
+ } else {
+ return Latency::DIVU32 + 1;
+ }
+}
+
+int Div64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV64 + Latency::MOVF_LOW;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Divu64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIVU64 + Latency::MOVF_LOW;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mod32Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV32 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Modu32Latency(bool is_operand_register = true) {
+ int latency = Latency::DIVU32 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Mod64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV64 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int Modu64Latency(bool is_operand_register = true) {
+ int latency = Latency::DIV64 + Latency::MOVF_HIGH;
+ if (!is_operand_register) {
+ latency += 1;
+ }
+ return latency;
+}
+
+int MovzLatency() { return 1; }
+
+int MovnLatency() { return 1; }
+
+int CallLatency() {
+ // Estimated.
+ return Add64Latency(false) + Latency::BRANCH + 5;
+}
+
+int JumpLatency() {
+ // Estimated max.
+ return 1 + Add64Latency() + Latency::BRANCH + 2;
+}
+
+int SmiUntagLatency() { return 1; }
+
+int PrepareForTailCallLatency() {
+ // Estimated max.
+ return 2 * (Add64Latency() + 1 + Add64Latency(false)) + 2 + Latency::BRANCH +
+ Latency::BRANCH + 2 * Sub64Latency(false) + 2 + Latency::BRANCH + 1;
+}
+
+int AssemblePopArgumentsAdoptFrameLatency() {
+ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() +
+ PrepareForTailCallLatency();
+}
+
+int AssertLatency() { return 1; }
+
+int PrepareCallCFunctionLatency() {
+ int frame_alignment = TurboAssembler::ActivationFrameAlignment();
+ if (frame_alignment > kSystemPointerSize) {
+ return 1 + Sub64Latency(false) + AndLatency(false) + 1;
+ } else {
+ return Sub64Latency(false);
+ }
+}
+
+int AdjustBaseAndOffsetLatency() {
+ return 3; // Estimated max.
+}
+
+int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; }
+
+int UlhuLatency() {
+ return AdjustBaseAndOffsetLatency() + 2 * AlignedMemoryLatency() + 2;
+}
+
+int UlwLatency() {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 3;
+}
+
+int UlwuLatency() { return UlwLatency() + 1; }
+
+int UldLatency() {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 3;
+}
+
+int ULoadFloatLatency() { return UlwLatency() + Latency::MOVT_FREG; }
+
+int ULoadDoubleLatency() { return UldLatency() + Latency::MOVT_DREG; }
+
+int UshLatency() {
+ // Estimated max.
+ return AdjustBaseAndOffsetLatency() + 2 + 2 * AlignedMemoryLatency();
+}
+
+int UswLatency() { return AdjustBaseAndOffsetLatency() + 2; }
+
+int UsdLatency() { return AdjustBaseAndOffsetLatency() + 2; }
+
+int UStoreFloatLatency() { return Latency::MOVF_FREG + UswLatency(); }
+
+int UStoreDoubleLatency() { return Latency::MOVF_HIGH_DREG + UsdLatency(); }
+
+int LoadFloatLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::LOAD_FLOAT;
+}
+
+int StoreFloatLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::STORE_FLOAT;
+}
+
+int StoreDoubleLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::STORE_DOUBLE;
+}
+
+int LoadDoubleLatency() {
+ return AdjustBaseAndOffsetLatency() + Latency::LOAD_DOUBLE;
+}
+
+int MultiPushLatency() {
+ int latency = Sub64Latency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPushFPULatency() {
+ int latency = Sub64Latency(false);
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ latency += StoreDoubleLatency();
+ }
+ return latency;
+}
+
+int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPushLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPushFPULatency();
+ }
+ return latency;
+}
+
+int MultiPopLatency() {
+ int latency = Add64Latency(false);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency++;
+ }
+ return latency;
+}
+
+int MultiPopFPULatency() {
+ int latency = Add64Latency(false);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ latency += LoadDoubleLatency();
+ }
+ return latency;
+}
+
+int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
+ int latency = MultiPopLatency();
+ if (fp_mode == kSaveFPRegs) {
+ latency += MultiPopFPULatency();
+ }
+ return latency;
+}
+
+int CallCFunctionHelperLatency() {
+ // Estimated.
+ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency();
+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) {
+ latency++;
+ } else {
+ latency += Add64Latency(false);
+ }
+ return latency;
+}
+
+int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); }
+
+int AssembleArchJumpLatency() {
+ // Estimated max.
+ return Latency::BRANCH;
+}
+
+int GenerateSwitchTableLatency() {
+ int latency = 6;
+ latency += 2;
+ return latency;
+}
+
+int AssembleArchTableSwitchLatency() {
+ return Latency::BRANCH + GenerateSwitchTableLatency();
+}
+
+int DropAndRetLatency() {
+ // Estimated max.
+ return Add64Latency(false) + JumpLatency();
+}
+
+int AssemblerReturnLatency() {
+ // Estimated max.
+ return Add64Latency(false) + MultiPopLatency() + MultiPopFPULatency() +
+ Latency::BRANCH + Add64Latency() + 1 + DropAndRetLatency();
+}
+
+int TryInlineTruncateDoubleToILatency() {
+ return 2 + Latency::TRUNC_W_D + Latency::MOVF_FREG + 2 + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int CallStubDelayedLatency() { return 1 + CallLatency(); }
+
+int TruncateDoubleToIDelayedLatency() {
+ // TODO(riscv): This no longer reflects how TruncateDoubleToI is called.
+ return TryInlineTruncateDoubleToILatency() + 1 + Sub64Latency(false) +
+ StoreDoubleLatency() + CallStubDelayedLatency() + Add64Latency(false) +
+ 1;
+}
+
+int CheckPageFlagLatency() {
+ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) +
+ Latency::BRANCH;
+}
+
+int SltuLatency(bool is_operand_register = true) {
+ if (is_operand_register) {
+ return 1;
+ } else {
+ return 2; // Estimated max.
+ }
+}
+
+int BranchShortHelperLatency() {
+ return SltuLatency() + 2; // Estimated max.
+}
+
+int BranchShortLatency() { return BranchShortHelperLatency(); }
+
+int MoveLatency() { return 1; }
+
+int MovToFloatParametersLatency() { return 2 * MoveLatency(); }
+
+int MovFromFloatResultLatency() { return MoveLatency(); }
+
+int AddOverflow64Latency() {
+ // Estimated max.
+ return 6;
+}
+
+int SubOverflow64Latency() {
+ // Estimated max.
+ return 6;
+}
+
+int MulOverflow32Latency() {
+ // Estimated max.
+ return Mul32Latency() + Mulh32Latency() + 2;
+}
+
+// TODO(RISCV): This is incorrect for RISC-V.
+int Clz64Latency() { return 1; }
+
+int Ctz32Latency() {
+ return Add64Latency(false) + XorLatency() + AndLatency() + Clz64Latency() +
+ 1 + Sub64Latency();
+}
+
+int Ctz64Latency() {
+ return Add64Latency(false) + XorLatency() + AndLatency() + 1 + Sub64Latency();
+}
+
+int Popcnt32Latency() {
+ return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 +
+ AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() +
+ 1 + Mul32Latency() + 1;
+}
+
+int Popcnt64Latency() {
+ return 2 + AndLatency() + Sub64Latency() + 1 + AndLatency() + 1 +
+ AndLatency() + Add64Latency() + 1 + Add64Latency() + 1 + AndLatency() +
+ 1 + Mul64Latency() + 1;
+}
+
+int CompareFLatency() { return Latency::C_cond_S; }
+
+int CompareF32Latency() { return CompareFLatency(); }
+
+int CompareF64Latency() { return CompareFLatency(); }
+
+int CompareIsNanFLatency() { return CompareFLatency(); }
+
+int CompareIsNanF32Latency() { return CompareIsNanFLatency(); }
+
+int CompareIsNanF64Latency() { return CompareIsNanFLatency(); }
+
+int NegsLatency() {
+ // Estimated.
+ return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S +
+ Latency::MOVF_FREG + 1 + XorLatency() + Latency::MOVT_FREG;
+}
+
+int NegdLatency() {
+ // Estimated.
+ return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D +
+ Latency::MOVF_HIGH_DREG + 1 + XorLatency() + Latency::MOVT_DREG;
+}
+
+int Float64RoundLatency() {
+ // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4.
+ return Latency::MOVF_HIGH_DREG + 1 + Latency::BRANCH + Latency::MOV_D + 4 +
+ Latency::MOVF_HIGH_DREG + Latency::BRANCH + Latency::CVT_D_L + 2 +
+ Latency::MOVT_HIGH_FREG;
+}
+
+int Float32RoundLatency() {
+ // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4.
+ return Latency::MOVF_FREG + 1 + Latency::BRANCH + Latency::MOV_S + 4 +
+ Latency::MOVF_FREG + Latency::BRANCH + Latency::CVT_S_W + 2 +
+ Latency::MOVT_FREG;
+}
+
+int Float32MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MOVF_FREG + 1 + Latency::MOV_S;
+}
+
+int Float64MaxLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() +
+ Latency::MOVF_HIGH_DREG + Latency::MOV_D;
+}
+
+int Float32MinLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF32Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MOVF_FREG + 1 + Latency::MOV_S;
+}
+
+int Float64MinLatency() {
+ // Estimated max.
+ int latency = CompareIsNanF64Latency() + Latency::BRANCH;
+ return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() +
+ Latency::MOVF_HIGH_DREG + Latency::MOV_D;
+}
+
+int TruncLSLatency(bool load_status) {
+ int latency = Latency::TRUNC_L_S + Latency::MOVF_HIGH_DREG;
+ if (load_status) {
+ latency += SltuLatency() + 7;
+ }
+ return latency;
+}
+
+int TruncLDLatency(bool load_status) {
+ int latency = Latency::TRUNC_L_D + Latency::MOVF_HIGH_DREG;
+ if (load_status) {
+ latency += SltuLatency() + 7;
+ }
+ return latency;
+}
+
+int TruncUlSLatency() {
+ // Estimated max.
+ return 2 * CompareF32Latency() + CompareIsNanF32Latency() +
+ 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S +
+ 3 * Latency::MOVF_HIGH_DREG + OrLatency() + Latency::MOVT_FREG +
+ Latency::MOV_S + SltuLatency() + 4;
+}
+
+int TruncUlDLatency() {
+ // Estimated max.
+ return 2 * CompareF64Latency() + CompareIsNanF64Latency() +
+ 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D +
+ 3 * Latency::MOVF_HIGH_DREG + OrLatency() + Latency::MOVT_DREG +
+ Latency::MOV_D + SltuLatency() + 4;
+}
+
+int PushLatency() { return Add64Latency() + AlignedMemoryLatency(); }
+
+int ByteSwapSignedLatency() { return 2; }
+
+int LlLatency(int offset) {
+ bool is_one_instruction = is_int12(offset);
+ if (is_one_instruction) {
+ return 1;
+ } else {
+ return 3;
+ }
+}
+
+int ExtractBitsLatency(bool sign_extend, int size) {
+ int latency = 2;
+ if (sign_extend) {
+ switch (size) {
+ case 8:
+ case 16:
+ case 32:
+ latency += 1;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return latency;
+}
+
+int InsertBitsLatency() { return 2 + Sub64Latency(false) + 2; }
+
+int ScLatency(int offset) { return 3; }
+
+int Word32AtomicExchangeLatency(bool sign_extend, int size) {
+ return Add64Latency(false) + 1 + Sub64Latency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
+
+int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) {
+ return 2 + Sub64Latency() + 2 + LlLatency(0) +
+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() +
+ ScLatency(0) + BranchShortLatency() + 1;
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(RISCV): Verify these latencies for RISC-V (currently using MIPS
+ // numbers).
+ switch (instr->arch_opcode()) {
+ case kArchCallCodeObject:
+ case kArchCallWasmFunction:
+ return CallLatency();
+ case kArchTailCallCodeObject:
+ case kArchTailCallWasm:
+ case kArchTailCallAddress:
+ return JumpLatency();
+ case kArchCallJSFunction: {
+ int latency = 0;
+ if (FLAG_debug_code) {
+ latency = 1 + AssertLatency();
+ }
+ return latency + 1 + Add64Latency(false) + CallLatency();
+ }
+ case kArchPrepareCallCFunction:
+ return PrepareCallCFunctionLatency();
+ case kArchSaveCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PushCallerSavedLatency(fp_mode);
+ }
+ case kArchRestoreCallerRegisters: {
+ auto fp_mode =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ return PopCallerSavedLatency(fp_mode);
+ }
+ case kArchPrepareTailCall:
+ return 2;
+ case kArchCallCFunction:
+ return CallCFunctionLatency();
+ case kArchJmp:
+ return AssembleArchJumpLatency();
+ case kArchTableSwitch:
+ return AssembleArchTableSwitchLatency();
+ case kArchAbortCSAAssert:
+ return CallLatency() + 1;
+ case kArchDebugBreak:
+ return 1;
+ case kArchComment:
+ case kArchNop:
+ case kArchThrowTerminator:
+ case kArchDeoptimize:
+ return 0;
+ case kArchRet:
+ return AssemblerReturnLatency();
+ case kArchFramePointer:
+ return 1;
+ case kArchParentFramePointer:
+ // Estimated max.
+ return AlignedMemoryLatency();
+ case kArchTruncateDoubleToI:
+ return TruncateDoubleToIDelayedLatency();
+ case kArchStoreWithWriteBarrier:
+ return Add64Latency() + 1 + CheckPageFlagLatency();
+ case kArchStackSlot:
+ // Estimated max.
+ return Add64Latency(false) + AndLatency(false) + AssertLatency() +
+ Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
+ 1 + Sub64Latency() + Add64Latency();
+ case kArchWordPoisonOnSpeculation:
+ return AndLatency();
+ case kIeee754Float64Acos:
+ case kIeee754Float64Acosh:
+ case kIeee754Float64Asin:
+ case kIeee754Float64Asinh:
+ case kIeee754Float64Atan:
+ case kIeee754Float64Atanh:
+ case kIeee754Float64Atan2:
+ case kIeee754Float64Cos:
+ case kIeee754Float64Cosh:
+ case kIeee754Float64Cbrt:
+ case kIeee754Float64Exp:
+ case kIeee754Float64Expm1:
+ case kIeee754Float64Log:
+ case kIeee754Float64Log1p:
+ case kIeee754Float64Log10:
+ case kIeee754Float64Log2:
+ case kIeee754Float64Pow:
+ case kIeee754Float64Sin:
+ case kIeee754Float64Sinh:
+ case kIeee754Float64Tan:
+ case kIeee754Float64Tanh:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kRiscvAdd32:
+ case kRiscvAdd64:
+ return Add64Latency(instr->InputAt(1)->IsRegister());
+ case kRiscvAddOvf64:
+ return AddOverflow64Latency();
+ case kRiscvSub32:
+ case kRiscvSub64:
+ return Sub64Latency(instr->InputAt(1)->IsRegister());
+ case kRiscvSubOvf64:
+ return SubOverflow64Latency();
+ case kRiscvMul32:
+ return Mul32Latency();
+ case kRiscvMulOvf32:
+ return MulOverflow32Latency();
+ case kRiscvMulHigh32:
+ return Mulh32Latency();
+ case kRiscvMulHighU32:
+ return Mulhu32Latency();
+ case kRiscvMulHigh64:
+ return Mulh64Latency();
+ case kRiscvDiv32: {
+ int latency = Div32Latency(instr->InputAt(1)->IsRegister());
+ return latency + MovzLatency();
+ }
+ case kRiscvDivU32: {
+ int latency = Divu32Latency(instr->InputAt(1)->IsRegister());
+ return latency + MovzLatency();
+ }
+ case kRiscvMod32:
+ return Mod32Latency();
+ case kRiscvModU32:
+ return Modu32Latency();
+ case kRiscvMul64:
+ return Mul64Latency();
+ case kRiscvDiv64: {
+ int latency = Div64Latency();
+ return latency + MovzLatency();
+ }
+ case kRiscvDivU64: {
+ int latency = Divu64Latency();
+ return latency + MovzLatency();
+ }
+ case kRiscvMod64:
+ return Mod64Latency();
+ case kRiscvModU64:
+ return Modu64Latency();
+ case kRiscvAnd:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvAnd32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = AndLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvOr:
+ return OrLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvOr32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = OrLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvNor:
+ return NorLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvNor32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = NorLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvXor:
+ return XorLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvXor32: {
+ bool is_operand_register = instr->InputAt(1)->IsRegister();
+ int latency = XorLatency(is_operand_register);
+ if (is_operand_register) {
+ return latency + 2;
+ } else {
+ return latency + 1;
+ }
+ }
+ case kRiscvClz32:
+ case kRiscvClz64:
+ return Clz64Latency();
+ case kRiscvCtz32:
+ return Ctz32Latency();
+ case kRiscvCtz64:
+ return Ctz64Latency();
+ case kRiscvPopcnt32:
+ return Popcnt32Latency();
+ case kRiscvPopcnt64:
+ return Popcnt64Latency();
+ case kRiscvShl32:
+ return 1;
+ case kRiscvShr32:
+ case kRiscvSar32:
+ case kRiscvZeroExtendWord:
+ return 2;
+ case kRiscvSignExtendWord:
+ case kRiscvShl64:
+ case kRiscvShr64:
+ case kRiscvSar64:
+ case kRiscvRor32:
+ case kRiscvRor64:
+ return 1;
+ case kRiscvTst:
+ return AndLatency(instr->InputAt(1)->IsRegister());
+ case kRiscvMov:
+ return 1;
+ case kRiscvCmpS:
+ return MoveLatency() + CompareF32Latency();
+ case kRiscvAddS:
+ return Latency::ADD_S;
+ case kRiscvSubS:
+ return Latency::SUB_S;
+ case kRiscvMulS:
+ return Latency::MUL_S;
+ case kRiscvDivS:
+ return Latency::DIV_S;
+ case kRiscvModS:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kRiscvAbsS:
+ return Latency::ABS_S;
+ case kRiscvNegS:
+ return NegdLatency();
+ case kRiscvSqrtS:
+ return Latency::SQRT_S;
+ case kRiscvMaxS:
+ return Latency::MAX_S;
+ case kRiscvMinS:
+ return Latency::MIN_S;
+ case kRiscvCmpD:
+ return MoveLatency() + CompareF64Latency();
+ case kRiscvAddD:
+ return Latency::ADD_D;
+ case kRiscvSubD:
+ return Latency::SUB_D;
+ case kRiscvMulD:
+ return Latency::MUL_D;
+ case kRiscvDivD:
+ return Latency::DIV_D;
+ case kRiscvModD:
+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
+ CallCFunctionLatency() + MovFromFloatResultLatency();
+ case kRiscvAbsD:
+ return Latency::ABS_D;
+ case kRiscvNegD:
+ return NegdLatency();
+ case kRiscvSqrtD:
+ return Latency::SQRT_D;
+ case kRiscvMaxD:
+ return Latency::MAX_D;
+ case kRiscvMinD:
+ return Latency::MIN_D;
+ case kRiscvFloat64RoundDown:
+ case kRiscvFloat64RoundTruncate:
+ case kRiscvFloat64RoundUp:
+ case kRiscvFloat64RoundTiesEven:
+ return Float64RoundLatency();
+ case kRiscvFloat32RoundDown:
+ case kRiscvFloat32RoundTruncate:
+ case kRiscvFloat32RoundUp:
+ case kRiscvFloat32RoundTiesEven:
+ return Float32RoundLatency();
+ case kRiscvFloat32Max:
+ return Float32MaxLatency();
+ case kRiscvFloat64Max:
+ return Float64MaxLatency();
+ case kRiscvFloat32Min:
+ return Float32MinLatency();
+ case kRiscvFloat64Min:
+ return Float64MinLatency();
+ case kRiscvFloat64SilenceNaN:
+ return Latency::SUB_D;
+ case kRiscvCvtSD:
+ return Latency::CVT_S_D;
+ case kRiscvCvtDS:
+ return Latency::CVT_D_S;
+ case kRiscvCvtDW:
+ return Latency::MOVT_FREG + Latency::CVT_D_W;
+ case kRiscvCvtSW:
+ return Latency::MOVT_FREG + Latency::CVT_S_W;
+ case kRiscvCvtSUw:
+ return 1 + Latency::MOVT_DREG + Latency::CVT_S_L;
+ case kRiscvCvtSL:
+ return Latency::MOVT_DREG + Latency::CVT_S_L;
+ case kRiscvCvtDL:
+ return Latency::MOVT_DREG + Latency::CVT_D_L;
+ case kRiscvCvtDUw:
+ return 1 + Latency::MOVT_DREG + Latency::CVT_D_L;
+ case kRiscvCvtDUl:
+ return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG +
+ 2 * Latency::CVT_D_L + Latency::ADD_D;
+ case kRiscvCvtSUl:
+ return 2 * Latency::BRANCH + 3 + 2 * Latency::MOVT_DREG +
+ 2 * Latency::CVT_S_L + Latency::ADD_S;
+ case kRiscvFloorWD:
+ return Latency::FLOOR_W_D + Latency::MOVF_FREG;
+ case kRiscvCeilWD:
+ return Latency::CEIL_W_D + Latency::MOVF_FREG;
+ case kRiscvRoundWD:
+ return Latency::ROUND_W_D + Latency::MOVF_FREG;
+ case kRiscvTruncWD:
+ return Latency::TRUNC_W_D + Latency::MOVF_FREG;
+ case kRiscvFloorWS:
+ return Latency::FLOOR_W_S + Latency::MOVF_FREG;
+ case kRiscvCeilWS:
+ return Latency::CEIL_W_S + Latency::MOVF_FREG;
+ case kRiscvRoundWS:
+ return Latency::ROUND_W_S + Latency::MOVF_FREG;
+ case kRiscvTruncWS:
+ return Latency::TRUNC_W_S + Latency::MOVF_FREG + 2 + MovnLatency();
+ case kRiscvTruncLS:
+ return TruncLSLatency(instr->OutputCount() > 1);
+ case kRiscvTruncLD:
+ return TruncLDLatency(instr->OutputCount() > 1);
+ case kRiscvTruncUwD:
+ // Estimated max.
+ return CompareF64Latency() + 2 * Latency::BRANCH +
+ 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() +
+ Latency::MOVT_FREG + Latency::MOVF_FREG + Latency::MOVT_HIGH_FREG +
+ 1;
+ case kRiscvTruncUwS:
+ // Estimated max.
+ return CompareF32Latency() + 2 * Latency::BRANCH +
+ 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() +
+ Latency::MOVT_FREG + 2 * Latency::MOVF_FREG + 2 + MovzLatency();
+ case kRiscvTruncUlS:
+ return TruncUlSLatency();
+ case kRiscvTruncUlD:
+ return TruncUlDLatency();
+ case kRiscvBitcastDL:
+ return Latency::MOVF_HIGH_DREG;
+ case kRiscvBitcastLD:
+ return Latency::MOVT_DREG;
+ case kRiscvFloat64ExtractLowWord32:
+ return Latency::MOVF_FREG;
+ case kRiscvFloat64InsertLowWord32:
+ return Latency::MOVF_HIGH_FREG + Latency::MOVT_FREG +
+ Latency::MOVT_HIGH_FREG;
+ case kRiscvFloat64ExtractHighWord32:
+ return Latency::MOVF_HIGH_FREG;
+ case kRiscvFloat64InsertHighWord32:
+ return Latency::MOVT_HIGH_FREG;
+ case kRiscvSignExtendByte:
+ case kRiscvSignExtendShort:
+ return 1;
+ case kRiscvLbu:
+ case kRiscvLb:
+ case kRiscvLhu:
+ case kRiscvLh:
+ case kRiscvLwu:
+ case kRiscvLw:
+ case kRiscvLd:
+ case kRiscvSb:
+ case kRiscvSh:
+ case kRiscvSw:
+ case kRiscvSd:
+ return AlignedMemoryLatency();
+ case kRiscvLoadFloat:
+ return ULoadFloatLatency();
+ case kRiscvLoadDouble:
+ return LoadDoubleLatency();
+ case kRiscvStoreFloat:
+ return StoreFloatLatency();
+ case kRiscvStoreDouble:
+ return StoreDoubleLatency();
+ case kRiscvUlhu:
+ case kRiscvUlh:
+ return UlhuLatency();
+ case kRiscvUlwu:
+ return UlwuLatency();
+ case kRiscvUlw:
+ return UlwLatency();
+ case kRiscvUld:
+ return UldLatency();
+ case kRiscvULoadFloat:
+ return ULoadFloatLatency();
+ case kRiscvULoadDouble:
+ return ULoadDoubleLatency();
+ case kRiscvUsh:
+ return UshLatency();
+ case kRiscvUsw:
+ return UswLatency();
+ case kRiscvUsd:
+ return UsdLatency();
+ case kRiscvUStoreFloat:
+ return UStoreFloatLatency();
+ case kRiscvUStoreDouble:
+ return UStoreDoubleLatency();
+ case kRiscvPush: {
+ int latency = 0;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ latency = StoreDoubleLatency() + Sub64Latency(false);
+ } else {
+ latency = PushLatency();
+ }
+ return latency;
+ }
+ case kRiscvPeek: {
+ int latency = 0;
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ auto op = LocationOperand::cast(instr->OutputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat64:
+ latency = LoadDoubleLatency();
+ break;
+ case MachineRepresentation::kFloat32:
+ latency = Latency::LOAD_FLOAT;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ latency = AlignedMemoryLatency();
+ }
+ return latency;
+ }
+ case kRiscvStackClaim:
+ return Sub64Latency(false);
+ case kRiscvStoreToStackSlot: {
+ int latency = 0;
+ if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ latency = 1; // Estimated value.
+ } else {
+ latency = StoreDoubleLatency();
+ }
+ } else {
+ latency = AlignedMemoryLatency();
+ }
+ return latency;
+ }
+ case kRiscvByteSwap64:
+ return ByteSwapSignedLatency();
+ case kRiscvByteSwap32:
+ return ByteSwapSignedLatency();
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ return 2;
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
+ return 3;
+ case kWord32AtomicExchangeInt8:
+ return Word32AtomicExchangeLatency(true, 8);
+ case kWord32AtomicExchangeUint8:
+ return Word32AtomicExchangeLatency(false, 8);
+ case kWord32AtomicExchangeInt16:
+ return Word32AtomicExchangeLatency(true, 16);
+ case kWord32AtomicExchangeUint16:
+ return Word32AtomicExchangeLatency(false, 16);
+ case kWord32AtomicExchangeWord32:
+ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
+ case kWord32AtomicCompareExchangeInt8:
+ return Word32AtomicCompareExchangeLatency(true, 8);
+ case kWord32AtomicCompareExchangeUint8:
+ return Word32AtomicCompareExchangeLatency(false, 8);
+ case kWord32AtomicCompareExchangeInt16:
+ return Word32AtomicCompareExchangeLatency(true, 16);
+ case kWord32AtomicCompareExchangeUint16:
+ return Word32AtomicCompareExchangeLatency(false, 16);
+ case kWord32AtomicCompareExchangeWord32:
+ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
+ BranchShortLatency() + 1;
+ case kRiscvAssertEqual:
+ return AssertLatency();
+ default:
+ return 1;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
new file mode 100644
index 0000000000..4d86fd02a3
--- /dev/null
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -0,0 +1,3034 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+// Adds RISC-V-specific methods for generating InstructionOperands.
+class RiscvOperandGenerator final : public OperandGenerator {
+ public:
+ explicit RiscvOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return OpParameter<int64_t>(node->op());
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node->op());
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kRiscvShl32:
+ case kRiscvSar32:
+ case kRiscvShr32:
+ return is_uint5(value);
+ case kRiscvShl64:
+ case kRiscvSar64:
+ case kRiscvShr64:
+ return is_uint6(value);
+ case kRiscvAdd32:
+ case kRiscvAnd32:
+ case kRiscvAnd:
+ case kRiscvAdd64:
+ case kRiscvOr32:
+ case kRiscvOr:
+ case kRiscvTst:
+ case kRiscvXor:
+ return is_int12(value);
+ case kRiscvLb:
+ case kRiscvLbu:
+ case kRiscvSb:
+ case kRiscvLh:
+ case kRiscvLhu:
+ case kRiscvSh:
+ case kRiscvLw:
+ case kRiscvSw:
+ case kRiscvLd:
+ case kRiscvSd:
+ case kRiscvLoadFloat:
+ case kRiscvStoreFloat:
+ case kRiscvLoadDouble:
+ case kRiscvStoreDouble:
+ return is_int32(value);
+ default:
+ return is_int12(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ RiscvOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
+ RiscvOperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kRiscvLw;
+ if (g.CanBeImmediate(offset, opcode_)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ immediate_ = g.GetIntegerConstantValue(offset);
+#endif
+ matches_ = g.CanBeImmediate(immediate_, kRiscvLw);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
+ ExtendingLoadMatcher m(node, selector);
+ RiscvOperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ RiscvOperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
+
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ VisitBinop(selector, node, opcode, false, kArchNop);
+}
+
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)),
+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
+}
+
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+}
+
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (params.transformation) {
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kRiscvS128Load8Splat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kRiscvS128Load16Splat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kRiscvS128Load32Splat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kRiscvS128Load64Splat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kRiscvS128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kRiscvS128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kRiscvS128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kRiscvS128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kRiscvS128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kRiscvS128Load32x2U;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvLoadFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvLoadDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsUnsigned() ? kRiscvLwu : kRiscvLw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvLd;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaLd;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(riscv): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier &&
+ V8_LIKELY(!FLAG_disable_write_barriers)) {
+ DCHECK(CanBeTaggedPointer(rep));
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvStoreFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvStoreDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvSh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvSw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvSd;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaSt;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kRiscvAnd32, true, kRiscvAnd32);
+}
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
+
+ // Dext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Dext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ return;
+ }
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ VisitBinop(this, node, kRiscvAnd, true, kRiscvAnd);
+}
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kRiscvOr32, true, kRiscvOr32);
+}
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kRiscvOr, true, kRiscvOr);
+}
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kRiscvXor32, true, kRiscvXor32);
+}
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kRiscvXor, true, kRiscvXor);
+}
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kRiscvShl32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kRiscvShl32, node);
+}
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitRRO(this, kRiscvShr32, node);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ RiscvOperandGenerator g(this);
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kRiscvShl32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kRiscvSar32, node);
+}
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kRiscvShl64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kRiscvShl64, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kRiscvShl64, node);
+}
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ VisitRRO(this, kRiscvShr64, node);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node, node)) return;
+ VisitRRO(this, kRiscvSar64, node);
+}
+
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kRiscvRor32, node);
+}
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kRiscvClz32, node);
+}
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvCtz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvCtz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvPopcnt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvPopcnt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kRiscvRor64, node);
+}
+
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kRiscvClz64, node);
+}
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kRiscvAdd32, true, kRiscvAdd32);
+}
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop(this, node, kRiscvAdd64, true, kRiscvAdd64);
+}
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kRiscvSub32);
+}
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kRiscvSub64);
+}
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kRiscvSub32 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Dmul high.
+ Emit(kRiscvMulHigh64, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kRiscvMul32, node);
+}
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitRRR(this, kRiscvMulHigh32, node);
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitRRR(this, kRiscvMulHighU32, node);
+}
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(dusmil): Add optimization for shifts larger than 32.
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kRiscvSub64 | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kRiscvMul64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Ddiv.
+ Emit(kRiscvDiv64, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kRiscvDiv32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kRiscvDivU32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ Emit(kRiscvMod64, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kRiscvMod32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kRiscvModU32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvDiv64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvDivU64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvMod64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kRiscvModU64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDS, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSW, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSUw, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDW, node);
+}
+
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDL, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDUw, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionCode opcode = kRiscvTruncWS;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionCode opcode = kRiscvTruncUwS;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kRiscvFloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kRiscvCeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kRiscvRoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kRiscvTruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kRiscvFloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kRiscvCeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kRiscvRoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kRiscvTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kRiscvTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kRiscvTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kRiscvTruncWD, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kRiscvTruncLD, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kRiscvTruncUwD, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kRiscvTruncUlD, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kRiscvTruncUwD, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionCode opcode = kRiscvTruncLD;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kRiscvTruncLS, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kRiscvTruncLD, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kRiscvTruncUlS, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ RiscvOperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kRiscvTruncUlD, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ Node* value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvLw;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvShl32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+ }
+}
+
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ if (node->opcode() == IrOpcode::kLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ // All other 32-bit operations sign-extend to the upper 32 bits
+ return false;
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (ZeroExtendsWord32ToWord64(value)) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
+ Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
+ return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kRiscvSar64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ // Semantics of this machine IR is not clear. For example, x86 zero-extend the
+ // truncated value; arm treats it as nop thus the upper 32-bit as undefined;
+ // Riscv emits ext instruction which zero-extend the 32-bit value; for riscv,
+ // we do sign-extension of the truncated value
+ Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kRiscvCvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kRiscvCvtSD, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kRiscvTruncWD, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSL, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDL, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kRiscvCvtSUl, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvCvtDUl, node);
+}
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kRiscvBitcastFloat32ToInt32, node);
+}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kRiscvBitcastDL, node);
+}
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kRiscvBitcastInt32ToFloat32, node);
+}
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kRiscvBitcastLD, node);
+}
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kRiscvAddS, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kRiscvAddD, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kRiscvSubS, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kRiscvSubD, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kRiscvMulS, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kRiscvMulD, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kRiscvDivS, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kRiscvDivD, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvModD, g.DefineAsFixed(node, fa0),
+ g.UseFixed(node->InputAt(0), fa0), g.UseFixed(node->InputAt(1), fa1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kRiscvAbsS, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kRiscvAbsD, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kRiscvSqrtS, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kRiscvSqrtD, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kRiscvFloat32RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kRiscvFloat64RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kRiscvNegS, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kRiscvNegD, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ RiscvOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa0),
+ g.UseFixed(node->InputAt(1), fa1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ RiscvOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
+ Node* node) {
+ RiscvOperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (PushParameter input : (*arguments)) {
+ Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(call_descriptor->StackParameterCount());
+ if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
+ Emit(kRiscvStackClaim, g.NoOutput(),
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
+ RiscvOperandGenerator g(this);
+
+ int reverse_slot = 1;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kRiscvPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvULoadFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvULoadDouble;
+ break;
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsUnsigned() ? kRiscvUlwu : kRiscvUlw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvUld;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaLd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kRiscvUStoreFloat;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kRiscvUStoreDouble;
+ break;
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvUsh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvUsw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvUsd;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kRiscvMsaSt;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+}
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kRiscvCmpS, lhs, rhs, cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kRiscvCmpD, lhs, rhs, cont);
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ RiscvOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ if (opcode == kRiscvTst) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ if (opcode == kRiscvTst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ }
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+bool IsNodeUnsigned(Node* n) {
+ NodeMatcher m(n);
+
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
+ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ LoadRepresentation load_rep = LoadRepresentationOf(n->op());
+ return load_rep.IsUnsigned();
+ } else {
+ return m.IsUint32Div() || m.IsUint32LessThan() ||
+ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
+ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
+ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
+ }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ if (FLAG_debug_code) {
+ RiscvOperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+ InstructionOperand optimizedResult = g.TempRegister();
+ InstructionOperand fullResult = g.TempRegister();
+ FlagsCondition condition = cont->condition();
+ InstructionCode testOpcode = opcode |
+ FlagsConditionField::encode(condition) |
+ FlagsModeField::encode(kFlags_set);
+
+ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+
+ selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+ selector->Emit(testOpcode, fullResult, leftOp, rightOp);
+
+ selector->Emit(kRiscvAssertEqual, g.NoOutput(), optimizedResult, fullResult,
+ g.TempImmediate(static_cast<int>(
+ AbortReason::kUnsupportedNonPrimitiveCompare)));
+ }
+
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ // RISC-V doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead. This behavior is correct in most cases,
+ // but doesn't work when comparing signed with unsigned operands.
+ // We could simulate full Word32 compare in all cases but this would
+ // create an unnecessary overhead since unsigned integers are rarely
+ // used in JavaScript.
+ // The solution proposed here tries to match a comparison of signed
+ // with unsigned operand, and perform full Word32Compare only
+ // in those cases. Unfortunately, the solution is not complete because
+ // it might skip cases where Word32 full compare is needed, so
+ // basically it is a hack.
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+#ifndef USE_SIMULATOR
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+#else
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
+ node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall) {
+#endif
+ VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
+ } else {
+ VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont);
+ }
+}
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kRiscvCmp, cont, false);
+}
+
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
+ RiscvOperandGenerator g(selector);
+ selector->EmitWithContinuation(kRiscvCmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
+}
+
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
+} // namespace
+
+void InstructionSelector::VisitStackPointerGreaterThan(
+ Node* node, FlagsContinuation* cont) {
+ StackCheckKind kind = StackCheckKindOf(node->op());
+ InstructionCode opcode =
+ kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
+
+ RiscvOperandGenerator g(this);
+
+ // No outputs.
+ InstructionOperand* const outputs = nullptr;
+ const int output_count = 0;
+
+ // Applying an offset to this stack check requires a temp register. Offsets
+ // are only applied to the first stack check. If applying an offset, we must
+ // ensure the input and temp registers do not alias, thus kUniqueRegister.
+ InstructionOperand temps[] = {g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
+ ? OperandGenerator::kUniqueRegister
+ : OperandGenerator::kRegister;
+
+ Node* const value = node->InputAt(0);
+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
+ static constexpr int input_count = arraysize(inputs);
+
+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
+ temp_count, temps, cont);
+}
+
+// Shared routine for word comparisons against zero.
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvAdd64, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvSub64, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvMulOvf32, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvAddOvf64, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kRiscvSubOvf64, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kRiscvTst, cont, true);
+ case IrOpcode::kStackPointerGreaterThan:
+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+ return VisitStackPointerGreaterThan(value, cont);
+ default:
+ break;
+ }
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(this, value, cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value()) {
+ index_operand = g.TempRegister();
+ Emit(kRiscvSub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value()));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+ }
+
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvAdd64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvAdd64, &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvSub64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvSub64, &cont);
+}
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvMulOvf32, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvMulOvf32, &cont);
+}
+
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvAddOvf64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvAddOvf64, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kRiscvSubOvf64, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kRiscvSubOvf64, &cont);
+}
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kRiscvFloat64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kRiscvFloat64ExtractHighWord32, node);
+}
+
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kRiscvFloat64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kRiscvFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kRiscvFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSync, g.NoOutput());
+}
+
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kWord32AtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitAtomicLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kWord32AtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kWord32AtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kWord32AtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ VisitAtomicStore(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvWord64AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvWord64AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvWord64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvWord64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitAtomicLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kRiscvWord64AtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kRiscvWord64AtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kRiscvWord64AtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kRiscvWord64AtomicStoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ VisitAtomicStore(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kWord32AtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kWord32AtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kWord32AtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kWord32AtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kWord32AtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kRiscvWord64AtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kRiscvWord64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kRiscvWord64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kRiscvWord64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kWord32AtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kWord32AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kWord32AtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kWord32AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kWord32AtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kRiscvWord64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicCompareExchange(this, node, opcode);
+}
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kRiscvWord64Atomic##op##Uint8, kRiscvWord64Atomic##op##Uint16, \
+ kRiscvWord64Atomic##op##Uint32, kRiscvWord64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kRiscvF64x2Abs) \
+ V(F64x2Neg, kRiscvF64x2Neg) \
+ V(F64x2Sqrt, kRiscvF64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \
+ V(F64x2Ceil, kRiscvF64x2Ceil) \
+ V(F64x2Floor, kRiscvF64x2Floor) \
+ V(F64x2Trunc, kRiscvF64x2Trunc) \
+ V(F64x2NearestInt, kRiscvF64x2NearestInt) \
+ V(I64x2Neg, kRiscvI64x2Neg) \
+ V(I64x2BitMask, kRiscvI64x2BitMask) \
+ V(I64x2Eq, kRiscvI64x2Eq) \
+ V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
+ V(F32x4Abs, kRiscvF32x4Abs) \
+ V(F32x4Neg, kRiscvF32x4Neg) \
+ V(F32x4Sqrt, kRiscvF32x4Sqrt) \
+ V(F32x4RecipApprox, kRiscvF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kRiscvF32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \
+ V(F32x4Ceil, kRiscvF32x4Ceil) \
+ V(F32x4Floor, kRiscvF32x4Floor) \
+ V(F32x4Trunc, kRiscvF32x4Trunc) \
+ V(F32x4NearestInt, kRiscvF32x4NearestInt) \
+ V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \
+ V(I32x4Neg, kRiscvI32x4Neg) \
+ V(I32x4SConvertI16x8Low, kRiscvI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kRiscvI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kRiscvI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kRiscvI32x4UConvertI16x8High) \
+ V(I32x4Abs, kRiscvI32x4Abs) \
+ V(I32x4BitMask, kRiscvI32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kRiscvI16x8Neg) \
+ V(I16x8SConvertI8x16Low, kRiscvI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kRiscvI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kRiscvI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kRiscvI16x8UConvertI8x16High) \
+ V(I16x8Abs, kRiscvI16x8Abs) \
+ V(I16x8BitMask, kRiscvI16x8BitMask) \
+ V(I8x16Neg, kRiscvI8x16Neg) \
+ V(I8x16Abs, kRiscvI8x16Abs) \
+ V(I8x16BitMask, kRiscvI8x16BitMask) \
+ V(I8x16Popcnt, kRiscvI8x16Popcnt) \
+ V(S128Not, kRiscvS128Not) \
+ V(V128AnyTrue, kRiscvV128AnyTrue) \
+ V(V32x4AllTrue, kRiscvV32x4AllTrue) \
+ V(V16x8AllTrue, kRiscvV16x8AllTrue) \
+ V(V8x16AllTrue, kRiscvV8x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, kRiscvF64x2Add) \
+ V(F64x2Sub, kRiscvF64x2Sub) \
+ V(F64x2Mul, kRiscvF64x2Mul) \
+ V(F64x2Div, kRiscvF64x2Div) \
+ V(F64x2Min, kRiscvF64x2Min) \
+ V(F64x2Max, kRiscvF64x2Max) \
+ V(F64x2Eq, kRiscvF64x2Eq) \
+ V(F64x2Ne, kRiscvF64x2Ne) \
+ V(F64x2Lt, kRiscvF64x2Lt) \
+ V(F64x2Le, kRiscvF64x2Le) \
+ V(I64x2Add, kRiscvI64x2Add) \
+ V(I64x2Sub, kRiscvI64x2Sub) \
+ V(I64x2Mul, kRiscvI64x2Mul) \
+ V(F32x4Add, kRiscvF32x4Add) \
+ V(F32x4AddHoriz, kRiscvF32x4AddHoriz) \
+ V(F32x4Sub, kRiscvF32x4Sub) \
+ V(F32x4Mul, kRiscvF32x4Mul) \
+ V(F32x4Div, kRiscvF32x4Div) \
+ V(F32x4Max, kRiscvF32x4Max) \
+ V(F32x4Min, kRiscvF32x4Min) \
+ V(F32x4Eq, kRiscvF32x4Eq) \
+ V(F32x4Ne, kRiscvF32x4Ne) \
+ V(F32x4Lt, kRiscvF32x4Lt) \
+ V(F32x4Le, kRiscvF32x4Le) \
+ V(I32x4Add, kRiscvI32x4Add) \
+ V(I32x4AddHoriz, kRiscvI32x4AddHoriz) \
+ V(I32x4Sub, kRiscvI32x4Sub) \
+ V(I32x4Mul, kRiscvI32x4Mul) \
+ V(I32x4MaxS, kRiscvI32x4MaxS) \
+ V(I32x4MinS, kRiscvI32x4MinS) \
+ V(I32x4MaxU, kRiscvI32x4MaxU) \
+ V(I32x4MinU, kRiscvI32x4MinU) \
+ V(I32x4Eq, kRiscvI32x4Eq) \
+ V(I32x4Ne, kRiscvI32x4Ne) \
+ V(I32x4GtS, kRiscvI32x4GtS) \
+ V(I32x4GeS, kRiscvI32x4GeS) \
+ V(I32x4GtU, kRiscvI32x4GtU) \
+ V(I32x4GeU, kRiscvI32x4GeU) \
+ V(I32x4DotI16x8S, kRiscvI32x4DotI16x8S) \
+ V(I16x8Add, kRiscvI16x8Add) \
+ V(I16x8AddSatS, kRiscvI16x8AddSatS) \
+ V(I16x8AddSatU, kRiscvI16x8AddSatU) \
+ V(I16x8AddHoriz, kRiscvI16x8AddHoriz) \
+ V(I16x8Sub, kRiscvI16x8Sub) \
+ V(I16x8SubSatS, kRiscvI16x8SubSatS) \
+ V(I16x8SubSatU, kRiscvI16x8SubSatU) \
+ V(I16x8Mul, kRiscvI16x8Mul) \
+ V(I16x8MaxS, kRiscvI16x8MaxS) \
+ V(I16x8MinS, kRiscvI16x8MinS) \
+ V(I16x8MaxU, kRiscvI16x8MaxU) \
+ V(I16x8MinU, kRiscvI16x8MinU) \
+ V(I16x8Eq, kRiscvI16x8Eq) \
+ V(I16x8Ne, kRiscvI16x8Ne) \
+ V(I16x8GtS, kRiscvI16x8GtS) \
+ V(I16x8GeS, kRiscvI16x8GeS) \
+ V(I16x8GtU, kRiscvI16x8GtU) \
+ V(I16x8GeU, kRiscvI16x8GeU) \
+ V(I16x8RoundingAverageU, kRiscvI16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS, kRiscvI16x8Q15MulRSatS) \
+ V(I16x8SConvertI32x4, kRiscvI16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kRiscvI16x8UConvertI32x4) \
+ V(I8x16Add, kRiscvI8x16Add) \
+ V(I8x16AddSatS, kRiscvI8x16AddSatS) \
+ V(I8x16AddSatU, kRiscvI8x16AddSatU) \
+ V(I8x16Sub, kRiscvI8x16Sub) \
+ V(I8x16SubSatS, kRiscvI8x16SubSatS) \
+ V(I8x16SubSatU, kRiscvI8x16SubSatU) \
+ V(I8x16Mul, kRiscvI8x16Mul) \
+ V(I8x16MaxS, kRiscvI8x16MaxS) \
+ V(I8x16MinS, kRiscvI8x16MinS) \
+ V(I8x16MaxU, kRiscvI8x16MaxU) \
+ V(I8x16MinU, kRiscvI8x16MinU) \
+ V(I8x16Eq, kRiscvI8x16Eq) \
+ V(I8x16Ne, kRiscvI8x16Ne) \
+ V(I8x16GtS, kRiscvI8x16GtS) \
+ V(I8x16GeS, kRiscvI8x16GeS) \
+ V(I8x16GtU, kRiscvI8x16GtU) \
+ V(I8x16GeU, kRiscvI8x16GeU) \
+ V(I8x16RoundingAverageU, kRiscvI8x16RoundingAverageU) \
+ V(I8x16SConvertI16x8, kRiscvI8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kRiscvI8x16UConvertI16x8) \
+ V(S128And, kRiscvS128And) \
+ V(S128Or, kRiscvS128Or) \
+ V(S128Xor, kRiscvS128Xor) \
+ V(S128AndNot, kRiscvS128AndNot)
+
+void InstructionSelector::VisitS128Const(Node* node) {
+ RiscvOperandGenerator g(this);
+ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
+ uint32_t val[kUint32Immediates];
+ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
+ // If all bytes are zeros or ones, avoid emitting code for generic constants
+ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
+ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
+ val[2] == UINT32_MAX && val[3] == UINT32_MAX;
+ InstructionOperand dst = g.DefineAsRegister(node);
+ if (all_zeros) {
+ Emit(kRiscvS128Zero, dst);
+ } else if (all_ones) {
+ Emit(kRiscvS128AllOnes, dst);
+ } else {
+ Emit(kRiscvS128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
+ g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvS128Zero, g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kRiscv##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+SIMD_VISIT_SPLAT(F64x2)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, kRiscv##Type##ExtractLane##Sign, node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
+SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I16x8, U)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kRiscv##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+SIMD_VISIT_REPLACE_LANE(F64x2)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShift(this, kRiscv##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kRiscvS128Select, node);
+}
+
+namespace {
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kRiscvS32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kRiscvS32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kRiscvS32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kRiscvS32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kRiscvS32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kRiscvS32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kRiscvS16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kRiscvS16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kRiscvS16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kRiscvS16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kRiscvS16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kRiscvS16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kRiscvS16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kRiscvS16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kRiscvS8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kRiscvS8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kRiscvS8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kRiscvS8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kRiscvS8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kRiscvS8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kRiscvS8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kRiscvS8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kRiscvS8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ is_swizzle, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t offset;
+ RiscvOperandGenerator g(this);
+ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
+ return;
+ }
+ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ return;
+ }
+ Emit(kRiscvS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
+}
+
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ // We don't want input 0 or input 1 to be the same as output, since we will
+ // modify output before do the calculation.
+ Emit(kRiscvI8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ RiscvOperandGenerator g(this);
+ Emit(kRiscvShl32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kRiscvF32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kRiscvF32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kRiscvF64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kRiscvF64x2Pmax, node);
+}
+
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) { \
+ UNREACHABLE(); \
+ } \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) { \
+ UNREACHABLE(); \
+ }
+
+VISIT_EXT_MUL(I64x2, I32x4S)
+VISIT_EXT_MUL(I64x2, I32x4U)
+VISIT_EXT_MUL(I32x4, I16x8S)
+VISIT_EXT_MUL(I32x4, I16x8U)
+VISIT_EXT_MUL(I16x8, I8x16S)
+VISIT_EXT_MUL(I16x8, I8x16U)
+#undef VISIT_EXT_MUL
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ NoUnalignedAccessSupport();
+}
+
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 9750b0d538..f7c5498e07 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1004,30 +1004,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ LoadU64(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CmpS64(scratch1,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&done);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ LoadU64(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void FlushPendingPushRegisters(TurboAssembler* tasm,
@@ -1077,8 +1053,7 @@ void AdjustStackPointerForTailCall(
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
S390OperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
@@ -1235,13 +1210,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- if (opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
@@ -1358,7 +1327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1922,16 +1891,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
break;
case kS390_Cntlz32: {
- __ llgfr(i.OutputRegister(), i.InputRegister(0));
- __ flogr(r0, i.OutputRegister());
- __ AddS32(i.OutputRegister(), r0, Operand(-32));
- // No need to zero-ext b/c llgfr is done already
+ __ CountLeadingZerosU32(i.OutputRegister(), i.InputRegister(0), r0);
break;
}
#if V8_TARGET_ARCH_S390X
case kS390_Cntlz64: {
- __ flogr(r0, i.InputRegister(0));
- __ mov(i.OutputRegister(), r0);
+ __ CountLeadingZerosU64(i.OutputRegister(), i.InputRegister(0), r0);
break;
}
#endif
@@ -1991,42 +1956,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CanonicalizeNaN(result, value);
break;
}
- case kS390_StackClaim: {
- int num_slots = i.InputInt32(0);
- __ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
- frame_access_state()->IncreaseSPDelta(num_slots);
- break;
- }
- case kS390_Push:
- if (instr->InputAt(0)->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- switch (op->representation()) {
- case MachineRepresentation::kFloat32:
- __ lay(sp, MemOperand(sp, -kSystemPointerSize));
- __ StoreF32(i.InputDoubleRegister(0), MemOperand(sp));
- break;
- case MachineRepresentation::kFloat64:
- __ lay(sp, MemOperand(sp, -kDoubleSize));
- __ StoreF64(i.InputDoubleRegister(0), MemOperand(sp));
- frame_access_state()->IncreaseSPDelta(kDoubleSize /
- kSystemPointerSize);
- break;
- case MachineRepresentation::kSimd128: {
- __ lay(sp, MemOperand(sp, -kSimd128Size));
- __ StoreV128(i.InputDoubleRegister(0), MemOperand(sp), kScratchReg);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- __ Push(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ case kS390_Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
+ MachineRepresentation rep = op->representation();
+ int pushed_slots = ElementSizeInPointers(rep);
+ // Slot-sized arguments are never padded but there may be a gap if
+ // the slot allocator reclaimed other padding slots. Adjust the stack
+ // here to skip any gap.
+ if (slots > pushed_slots) {
+ __ lay(sp,
+ MemOperand(sp, -((slots - pushed_slots) * kSystemPointerSize)));
}
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ __ StoreF32(i.InputDoubleRegister(1), MemOperand(sp));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ lay(sp, MemOperand(sp, -kDoubleSize));
+ __ StoreF64(i.InputDoubleRegister(1), MemOperand(sp));
+ break;
+ case MachineRepresentation::kSimd128:
+ __ lay(sp, MemOperand(sp, -kSimd128Size));
+ __ StoreV128(i.InputDoubleRegister(1), MemOperand(sp), kScratchReg);
+ break;
+ default:
+ __ Push(i.InputRegister(1));
+ break;
+ }
+ frame_access_state()->IncreaseSPDelta(slots);
break;
+ }
case kS390_PushFrame: {
int num_slots = i.InputInt32(1);
__ lay(sp, MemOperand(sp, -num_slots * kSystemPointerSize));
@@ -2335,7 +2297,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ lrvg(r0, operand);
__ lrvg(r1, MemOperand(operand.rx(), operand.rb(),
- operand.offset() + kBitsPerByte));
+ operand.offset() + kSystemPointerSize));
__ vlvgp(i.OutputSimd128Register(), r1, r0);
}
break;
@@ -2402,7 +2364,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(3));
__ strvg(r0, operand);
__ strvg(r1, MemOperand(operand.rx(), operand.rb(),
- operand.offset() + kBitsPerByte));
+ operand.offset() + kSystemPointerSize));
}
break;
}
@@ -2672,13 +2634,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_F32x4Splat: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(0),
Condition(2));
-#else
- __ vrep(i.OutputSimd128Register(), i.InputDoubleRegister(0), Operand(1),
- Condition(2));
-#endif
break;
}
case kS390_I64x2Splat: {
@@ -2707,84 +2664,44 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
// vector extract element
case kS390_F64x2ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
Operand(1 - i.InputInt8(1)), Condition(3));
-#else
- __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
- Operand(i.InputInt8(1)), Condition(3));
-#endif
break;
}
case kS390_F32x4ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
Operand(3 - i.InputInt8(1)), Condition(2));
-#else
- __ vrep(i.OutputDoubleRegister(), i.InputSimd128Register(0),
- Operand(i.InputInt8(1)), Condition(2));
-#endif
break;
}
case kS390_I64x2ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(3));
-#endif
break;
}
case kS390_I32x4ExtractLane: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(2));
-#endif
break;
}
case kS390_I16x8ExtractLaneU: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(1));
-#endif
break;
}
case kS390_I16x8ExtractLaneS: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(kScratchReg, i.InputSimd128Register(0),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
-#else
- __ vlgv(kScratchReg, i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(1));
-#endif
__ lghr(i.OutputRegister(), kScratchReg);
break;
}
case kS390_I8x16ExtractLaneU: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
-#else
- __ vlgv(i.OutputRegister(), i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(0));
-#endif
break;
}
case kS390_I8x16ExtractLaneS: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(kScratchReg, i.InputSimd128Register(0),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
-#else
- __ vlgv(kScratchReg, i.InputSimd128Register(0),
- MemOperand(r0, i.InputInt8(1)), Condition(0));
-#endif
__ lgbr(i.OutputRegister(), kScratchReg);
break;
}
@@ -2795,13 +2712,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
__ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
Condition(3));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(kScratchDoubleReg, kScratchReg,
MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
-#else
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
- Condition(3));
-#endif
__ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
break;
}
@@ -2809,17 +2721,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 0),
Condition(2));
__ vlvg(kScratchDoubleReg, kScratchReg,
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
-#else
- __ vlgv(kScratchReg, i.InputDoubleRegister(2), MemOperand(r0, 1),
- Condition(2));
- __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, i.InputInt8(1)),
- Condition(2));
-#endif
__ vlr(dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
break;
}
@@ -2829,13 +2734,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 1 - i.InputInt8(1)), Condition(3));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(3));
-#endif
break;
}
case kS390_I32x4ReplaceLane: {
@@ -2844,13 +2744,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 3 - i.InputInt8(1)), Condition(2));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(2));
-#endif
break;
}
case kS390_I16x8ReplaceLane: {
@@ -2859,13 +2754,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 7 - i.InputInt8(1)), Condition(1));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(1));
-#endif
break;
}
case kS390_I8x16ReplaceLane: {
@@ -2874,13 +2764,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src != dst) {
__ vlr(dst, src, Condition(0), Condition(0), Condition(0));
}
-#ifdef V8_TARGET_BIG_ENDIAN
__ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
MemOperand(r0, 15 - i.InputInt8(1)), Condition(0));
-#else
- __ vlvg(i.OutputSimd128Register(), i.InputRegister(2),
- MemOperand(r0, i.InputInt8(1)), Condition(0));
-#endif
break;
}
// vector binops
@@ -2942,13 +2827,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
-#define FLOAT_ADD_HORIZ(src0, src1, scratch0, scratch1, add0, add1) \
- __ vpk(dst, src0, src1, Condition(0), Condition(0), Condition(3)); \
- __ vesrl(scratch0, src0, MemOperand(r0, shift_bits), Condition(3)); \
- __ vesrl(scratch1, src1, MemOperand(r0, shift_bits), Condition(3)); \
- __ vpk(kScratchDoubleReg, scratch0, scratch1, Condition(0), Condition(0), \
- Condition(3)); \
- __ vfa(dst, add0, add1, Condition(0), Condition(0), Condition(2));
case kS390_F32x4AddHoriz: {
Simd128Register src0 = i.InputSimd128Register(0);
Simd128Register src1 = i.InputSimd128Register(1);
@@ -2956,14 +2834,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DoubleRegister tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
DoubleRegister tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
constexpr int shift_bits = 32;
-#ifdef V8_TARGET_BIG_ENDIAN
- FLOAT_ADD_HORIZ(src1, src0, tempFPReg2, tempFPReg1, kScratchDoubleReg,
- dst)
-#else
- FLOAT_ADD_HORIZ(src0, src1, tempFPReg1, tempFPReg2, dst,
- kScratchDoubleReg)
-#endif
-#undef FLOAT_ADD_HORIZ
+ __ vpk(dst, src1, src0, Condition(0), Condition(0), Condition(3));
+ __ vesrl(tempFPReg2, src1, MemOperand(r0, shift_bits), Condition(3));
+ __ vesrl(tempFPReg1, src0, MemOperand(r0, shift_bits), Condition(3));
+ __ vpk(kScratchDoubleReg, tempFPReg2, tempFPReg1, Condition(0),
+ Condition(0), Condition(3));
+ __ vfa(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
+ Condition(2));
break;
}
case kS390_F32x4Sub: {
@@ -3055,13 +2932,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
__ vsumg(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
Condition(0), Condition(2));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
Condition(3));
-#else
- __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(3));
-#endif
break;
}
case kS390_I32x4Sub: {
@@ -3092,13 +2964,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(1));
__ vsum(kScratchDoubleReg, src1, kScratchDoubleReg, Condition(0),
Condition(0), Condition(1));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpk(dst, kScratchDoubleReg, dst, Condition(0), Condition(0),
Condition(2));
-#else
- __ vpk(dst, dst, kScratchDoubleReg, Condition(0), Condition(0),
- Condition(2));
-#endif
break;
}
case kS390_I16x8Sub: {
@@ -3276,6 +3143,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(2));
break;
}
+ case kS390_I64x2Ne: {
+ __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ i.OutputSimd128Register(), Condition(0), Condition(0),
+ Condition(3));
+ break;
+ }
case kS390_I32x4Ne: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3312,6 +3187,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
+ case kS390_I64x2GtS: {
+ __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(0), Condition(3));
+ break;
+ }
+ case kS390_I64x2GeS: {
+ // Compute !(B > A) which is equal to A >= B.
+ __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(3));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(3));
+ break;
+ }
case kS390_I32x4GtS: {
__ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3503,11 +3391,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_F32x4RecipApprox: {
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
-#else
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
-#endif
__ vfd(i.OutputSimd128Register(), kScratchDoubleReg,
i.InputSimd128Register(0), Condition(0), Condition(0),
Condition(2));
@@ -3519,11 +3403,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
__ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
-#ifdef V8_TARGET_BIG_ENDIAN
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(2));
-#else
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(1), Condition(2));
-#endif
__ vfd(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(0), Condition(2));
break;
@@ -3554,10 +3434,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(2));
break;
}
+ case kS390_I64x2Abs: {
+ __ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(3));
+ break;
+ }
// vector boolean unops
- case kS390_V32x4AnyTrue:
- case kS390_V16x8AnyTrue:
- case kS390_V8x16AnyTrue: {
+ case kS390_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
Register temp = i.TempRegister(0);
@@ -3580,6 +3463,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
+ case kS390_V64x2AllTrue: {
+ SIMD_ALL_TRUE(3)
+ break;
+ }
case kS390_V32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
@@ -3616,17 +3503,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_S128Const: {
-#ifdef V8_TARGET_BIG_ENDIAN
for (int index = 0, j = 0; index < 2; index++, j = +2) {
__ mov(index < 1 ? ip : r0, Operand(i.InputInt32(j)));
__ iihf(index < 1 ? ip : r0, Operand(i.InputInt32(j + 1)));
}
-#else
- for (int index = 0, j = 0; index < 2; index++, j = +2) {
- __ mov(index < 1 ? r0 : ip, Operand(i.InputInt32(j)));
- __ iihf(index < 1 ? r0 : ip, Operand(i.InputInt32(j + 1)));
- }
-#endif
__ vlvgp(i.OutputSimd128Register(), r0, ip);
break;
}
@@ -3655,70 +3535,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector conversions
-#define CONVERT_FLOAT_TO_INT32(convert) \
- for (int index = 0; index < 4; index++) { \
- __ vlgv(kScratchReg, kScratchDoubleReg, MemOperand(r0, index), \
- Condition(2)); \
- __ MovIntToFloat(tempFPReg1, kScratchReg); \
- __ convert(kScratchReg, tempFPReg1, kRoundToZero); \
- __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
- }
case kS390_I32x4SConvertF32x4: {
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// NaN to 0
__ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
__ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(2));
__ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
Condition(0), Condition(0));
- CONVERT_FLOAT_TO_INT32(ConvertFloat32ToInt32)
+ __ vcgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(2));
break;
}
case kS390_I32x4UConvertF32x4: {
Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// NaN to 0, negative to 0
__ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
__ vfmax(kScratchDoubleReg, src, kScratchDoubleReg, Condition(1),
Condition(0), Condition(2));
- CONVERT_FLOAT_TO_INT32(ConvertFloat32ToUnsignedInt32)
- break;
- }
-#undef CONVERT_FLOAT_TO_INT32
-#define CONVERT_INT32_TO_FLOAT(convert, double_index) \
- Simd128Register src = i.InputSimd128Register(0); \
- Simd128Register dst = i.OutputSimd128Register(); \
- for (int index = 0; index < 4; index++) { \
- __ vlgv(kScratchReg, src, MemOperand(r0, index), Condition(2)); \
- __ convert(kScratchDoubleReg, kScratchReg); \
- __ MovFloatToInt(kScratchReg, kScratchDoubleReg); \
- __ vlvg(dst, kScratchReg, MemOperand(r0, index), Condition(2)); \
- }
+ __ vclgd(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(2));
+ break;
+ }
case kS390_F32x4SConvertI32x4: {
-#ifdef V8_TARGET_BIG_ENDIAN
- CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 0)
-#else
- CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, 1)
-#endif
+ __ vcdg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(4), Condition(0), Condition(2));
break;
}
case kS390_F32x4UConvertI32x4: {
-#ifdef V8_TARGET_BIG_ENDIAN
- CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 0)
-#else
- CONVERT_INT32_TO_FLOAT(ConvertUnsignedIntToFloat, 1)
-#endif
+ __ vcdlg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(4), Condition(0), Condition(2));
break;
}
-#undef CONVERT_INT32_TO_FLOAT
#define VECTOR_UNPACK(op, mode) \
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0), \
Condition(0), Condition(mode));
+ case kS390_I64x2SConvertI32x4Low: {
+ VECTOR_UNPACK(vupl, 2)
+ break;
+ }
+ case kS390_I64x2SConvertI32x4High: {
+ VECTOR_UNPACK(vuph, 2)
+ break;
+ }
+ case kS390_I64x2UConvertI32x4Low: {
+ VECTOR_UNPACK(vupll, 2)
+ break;
+ }
+ case kS390_I64x2UConvertI32x4High: {
+ VECTOR_UNPACK(vuplh, 2)
+ break;
+ }
case kS390_I32x4SConvertI16x8Low: {
VECTOR_UNPACK(vupl, 1)
break;
@@ -3753,22 +3621,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef VECTOR_UNPACK
case kS390_I16x8SConvertI32x4:
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0), Condition(0), Condition(2));
-#else
- __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
-#endif
break;
case kS390_I8x16SConvertI16x8:
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(0), Condition(0), Condition(1));
-#else
- __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
-#endif
break;
#define VECTOR_PACK_UNSIGNED(mode) \
Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0)); \
@@ -3781,25 +3639,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_I16x8UConvertI32x4: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(2)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
Condition(0), Condition(2));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I8x16UConvertI16x8: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
Condition(0), Condition(1));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
#undef VECTOR_PACK_UNSIGNED
@@ -3822,35 +3670,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(mode + 1));
case kS390_I16x8AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I16x8SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I16x8AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 1)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I16x8SubSatU: {
@@ -3862,46 +3695,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(2));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(2));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(2));
-#endif
break;
}
case kS390_I8x16AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 0)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
case kS390_I8x16SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 0)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
case kS390_I8x16AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 0)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-#endif
break;
}
case kS390_I8x16SubSatU: {
@@ -3913,14 +3726,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(1));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(1));
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
-#else
- __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
- Condition(0), Condition(1));
-
-#endif
break;
}
#undef BINOP_EXTRACT
@@ -3932,13 +3739,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt32(4), i.InputInt32(5)};
// create 2 * 8 byte inputs indicating new indices
for (int i = 0, j = 0; i < 2; i++, j = +2) {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
__ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
-#else
- __ mov(i < 1 ? r0 : ip, Operand(k8x16_indices[j]));
- __ iihf(i < 1 ? r0 : ip, Operand(k8x16_indices[j + 1]));
-#endif
}
__ vlvgp(kScratchDoubleReg, r0, ip);
__ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
@@ -3954,7 +3756,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrepi(kScratchDoubleReg, Operand(31), Condition(0));
__ vmnl(tempFPReg1, src1, kScratchDoubleReg, Condition(0), Condition(0),
Condition(0));
-#ifdef V8_TARGET_BIG_ENDIAN
// input needs to be reversed
__ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
__ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
@@ -3966,22 +3767,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(0));
__ vperm(dst, dst, kScratchDoubleReg, tempFPReg1, Condition(0),
Condition(0));
-#else
- __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
- Condition(0), Condition(0), Condition(0));
- __ vperm(dst, src0, kScratchDoubleReg, tempFPReg1, Condition(0),
- Condition(0));
-#endif
break;
}
case kS390_I64x2BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(kScratchReg, Operand(0x80800040));
__ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
-#else
- __ mov(kScratchReg, Operand(0x80808080));
- __ iihf(kScratchReg, Operand(0x40008080));
-#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -3990,13 +3780,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I32x4BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(kScratchReg, Operand(0x204060));
__ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
-#else
- __ mov(kScratchReg, Operand(0x80808080));
- __ iihf(kScratchReg, Operand(0x60402000));
-#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -4005,13 +3790,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I16x8BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(kScratchReg, Operand(0x40506070));
__ iihf(kScratchReg, Operand(0x102030));
-#else
- __ mov(kScratchReg, Operand(0x30201000));
- __ iihf(kScratchReg, Operand(0x70605040));
-#endif
__ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -4020,17 +3800,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I8x16BitMask: {
-#ifdef V8_TARGET_BIG_ENDIAN
__ mov(r0, Operand(0x60687078));
__ iihf(r0, Operand(0x40485058));
__ mov(ip, Operand(0x20283038));
__ iihf(ip, Operand(0x81018));
-#else
- __ mov(ip, Operand(0x58504840));
- __ iihf(ip, Operand(0x78706860));
- __ mov(r0, Operand(0x18100800));
- __ iihf(r0, Operand(0x38302820));
-#endif
__ vlvgp(kScratchDoubleReg, ip, r0);
__ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
@@ -4240,14 +4013,108 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrepi(tempFPReg2, Operand(0x4000), Condition(2));
Q15_MUL_ROAUND(kScratchDoubleReg, vupl)
Q15_MUL_ROAUND(dst, vuph)
-#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(dst, dst, kScratchDoubleReg, Condition(0), Condition(2));
-#else
- __ vpks(dst, kScratchDoubleReg, dst, Condition(0), Condition(2));
-#endif
break;
}
#undef Q15_MUL_ROAUND
+#define SIGN_SELECT(mode) \
+ Simd128Register src0 = i.InputSimd128Register(0); \
+ Simd128Register src1 = i.InputSimd128Register(1); \
+ Simd128Register src2 = i.InputSimd128Register(2); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg, Condition(0), \
+ Condition(0), Condition(3)); \
+ __ vch(kScratchDoubleReg, kScratchDoubleReg, src2, Condition(0), \
+ Condition(mode)); \
+ __ vsel(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
+ case kS390_I8x16SignSelect: {
+ SIGN_SELECT(0)
+ break;
+ }
+ case kS390_I16x8SignSelect: {
+ SIGN_SELECT(1)
+ break;
+ }
+ case kS390_I32x4SignSelect: {
+ SIGN_SELECT(2)
+ break;
+ }
+ case kS390_I64x2SignSelect: {
+ SIGN_SELECT(3)
+ break;
+ }
+#undef SIGN_SELECT
+ case kS390_I8x16Popcnt: {
+ __ vpopct(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Condition(0), Condition(0), Condition(0));
+ break;
+ }
+ case kS390_F64x2ConvertLowI32x4S: {
+ __ vupl(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(2));
+ __ vcdg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2ConvertLowI32x4U: {
+ __ vupll(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0),
+ Condition(0), Condition(2));
+ __ vcdlg(i.OutputSimd128Register(), kScratchDoubleReg, Condition(5),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2PromoteLowF32x4: {
+ Register holder = r1;
+ for (int index = 0; index < 2; ++index) {
+ __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, index + 2),
+ Condition(2));
+ __ MovIntToFloat(kScratchDoubleReg, r0);
+ __ ldebr(kScratchDoubleReg, kScratchDoubleReg);
+ __ MovDoubleToInt64(holder, kScratchDoubleReg);
+ holder = ip;
+ }
+ __ vlvgp(i.OutputSimd128Register(), r1, ip);
+ break;
+ }
+ case kS390_F32x4DemoteF64x2Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Register holder = r1;
+ for (int index = 0; index < 2; ++index) {
+ __ vlgv(r0, i.InputSimd128Register(0), MemOperand(r0, index),
+ Condition(3));
+ __ MovInt64ToDouble(kScratchDoubleReg, r0);
+ __ ledbr(kScratchDoubleReg, kScratchDoubleReg);
+ __ MovFloatToInt(holder, kScratchDoubleReg);
+ holder = ip;
+ }
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ __ vlvg(dst, r1, MemOperand(r0, 2), Condition(2));
+ __ vlvg(dst, ip, MemOperand(r0, 3), Condition(2));
+ break;
+ }
+ case kS390_I32x4TruncSatF64x2SZero: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ // NaN to 0
+ __ vlr(kScratchDoubleReg, src, Condition(0), Condition(0), Condition(0));
+ __ vfce(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(3));
+ __ vn(kScratchDoubleReg, src, kScratchDoubleReg, Condition(0),
+ Condition(0), Condition(0));
+ __ vcgd(kScratchDoubleReg, kScratchDoubleReg, Condition(5), Condition(0),
+ Condition(3));
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ __ vpks(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
+ break;
+ }
+ case kS390_I32x4TruncSatF64x2UZero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vclgd(kScratchDoubleReg, i.InputSimd128Register(0), Condition(5),
+ Condition(0), Condition(3));
+ __ vx(dst, dst, dst, Condition(0), Condition(0), Condition(2));
+ __ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
+ break;
+ }
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
@@ -4361,7 +4228,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4561,7 +4428,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
if (FLAG_debug_code) {
__ stop();
}
@@ -4637,7 +4504,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = r5;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4645,9 +4511,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index bed16450be..8068894b6b 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -92,7 +92,6 @@ namespace compiler {
V(S390_Tst64) \
V(S390_Push) \
V(S390_PushFrame) \
- V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
V(S390_SignExtendWord8ToInt32) \
V(S390_SignExtendWord16ToInt32) \
@@ -215,6 +214,9 @@ namespace compiler {
V(S390_F64x2Floor) \
V(S390_F64x2Trunc) \
V(S390_F64x2NearestInt) \
+ V(S390_F64x2ConvertLowI32x4S) \
+ V(S390_F64x2ConvertLowI32x4U) \
+ V(S390_F64x2PromoteLowF32x4) \
V(S390_F32x4Splat) \
V(S390_F32x4ExtractLane) \
V(S390_F32x4ReplaceLane) \
@@ -244,6 +246,7 @@ namespace compiler {
V(S390_F32x4Floor) \
V(S390_F32x4Trunc) \
V(S390_F32x4NearestInt) \
+ V(S390_F32x4DemoteF64x2Zero) \
V(S390_I64x2Neg) \
V(S390_I64x2Add) \
V(S390_I64x2Sub) \
@@ -260,6 +263,15 @@ namespace compiler {
V(S390_I64x2ExtMulHighI32x4S) \
V(S390_I64x2ExtMulLowI32x4U) \
V(S390_I64x2ExtMulHighI32x4U) \
+ V(S390_I64x2SConvertI32x4Low) \
+ V(S390_I64x2SConvertI32x4High) \
+ V(S390_I64x2UConvertI32x4Low) \
+ V(S390_I64x2UConvertI32x4High) \
+ V(S390_I64x2SignSelect) \
+ V(S390_I64x2Ne) \
+ V(S390_I64x2GtS) \
+ V(S390_I64x2GeS) \
+ V(S390_I64x2Abs) \
V(S390_I32x4Splat) \
V(S390_I32x4ExtractLane) \
V(S390_I32x4ReplaceLane) \
@@ -296,6 +308,9 @@ namespace compiler {
V(S390_I32x4ExtMulHighI16x8U) \
V(S390_I32x4ExtAddPairwiseI16x8S) \
V(S390_I32x4ExtAddPairwiseI16x8U) \
+ V(S390_I32x4SignSelect) \
+ V(S390_I32x4TruncSatF64x2SZero) \
+ V(S390_I32x4TruncSatF64x2UZero) \
V(S390_I16x8Splat) \
V(S390_I16x8ExtractLaneU) \
V(S390_I16x8ExtractLaneS) \
@@ -338,6 +353,7 @@ namespace compiler {
V(S390_I16x8ExtAddPairwiseI8x16S) \
V(S390_I16x8ExtAddPairwiseI8x16U) \
V(S390_I16x8Q15MulRSatS) \
+ V(S390_I16x8SignSelect) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
@@ -370,12 +386,13 @@ namespace compiler {
V(S390_I8x16BitMask) \
V(S390_I8x16Shuffle) \
V(S390_I8x16Swizzle) \
- V(S390_V32x4AnyTrue) \
- V(S390_V16x8AnyTrue) \
- V(S390_V8x16AnyTrue) \
+ V(S390_I8x16SignSelect) \
+ V(S390_I8x16Popcnt) \
+ V(S390_V64x2AllTrue) \
V(S390_V32x4AllTrue) \
V(S390_V16x8AllTrue) \
V(S390_V8x16AllTrue) \
+ V(S390_V128AnyTrue) \
V(S390_S128And) \
V(S390_S128Or) \
V(S390_S128Xor) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 8c1c804760..de6abc56a3 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -161,6 +161,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F64x2Floor:
case kS390_F64x2Trunc:
case kS390_F64x2NearestInt:
+ case kS390_F64x2ConvertLowI32x4S:
+ case kS390_F64x2ConvertLowI32x4U:
+ case kS390_F64x2PromoteLowF32x4:
case kS390_F32x4Splat:
case kS390_F32x4ExtractLane:
case kS390_F32x4ReplaceLane:
@@ -190,6 +193,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F32x4Floor:
case kS390_F32x4Trunc:
case kS390_F32x4NearestInt:
+ case kS390_F32x4DemoteF64x2Zero:
case kS390_I64x2Neg:
case kS390_I64x2Add:
case kS390_I64x2Sub:
@@ -206,6 +210,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I64x2ExtMulHighI32x4S:
case kS390_I64x2ExtMulLowI32x4U:
case kS390_I64x2ExtMulHighI32x4U:
+ case kS390_I64x2SConvertI32x4Low:
+ case kS390_I64x2SConvertI32x4High:
+ case kS390_I64x2UConvertI32x4Low:
+ case kS390_I64x2UConvertI32x4High:
+ case kS390_I64x2SignSelect:
+ case kS390_I64x2Ne:
+ case kS390_I64x2GtS:
+ case kS390_I64x2GeS:
+ case kS390_I64x2Abs:
case kS390_I32x4Splat:
case kS390_I32x4ExtractLane:
case kS390_I32x4ReplaceLane:
@@ -242,6 +255,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4ExtMulHighI16x8U:
case kS390_I32x4ExtAddPairwiseI16x8S:
case kS390_I32x4ExtAddPairwiseI16x8U:
+ case kS390_I32x4SignSelect:
+ case kS390_I32x4TruncSatF64x2SZero:
+ case kS390_I32x4TruncSatF64x2UZero:
case kS390_I16x8Splat:
case kS390_I16x8ExtractLaneU:
case kS390_I16x8ExtractLaneS:
@@ -284,6 +300,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8ExtAddPairwiseI8x16S:
case kS390_I16x8ExtAddPairwiseI8x16U:
case kS390_I16x8Q15MulRSatS:
+ case kS390_I16x8SignSelect:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
@@ -316,12 +333,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16BitMask:
case kS390_I8x16Shuffle:
case kS390_I8x16Swizzle:
- case kS390_V32x4AnyTrue:
- case kS390_V16x8AnyTrue:
- case kS390_V8x16AnyTrue:
+ case kS390_I8x16SignSelect:
+ case kS390_I8x16Popcnt:
+ case kS390_V64x2AllTrue:
case kS390_V32x4AllTrue:
case kS390_V16x8AllTrue:
case kS390_V8x16AllTrue:
+ case kS390_V128AnyTrue:
case kS390_S128And:
case kS390_S128Or:
case kS390_S128Xor:
@@ -367,7 +385,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
- case kS390_StackClaim:
return kHasSideEffect;
case kS390_Word64AtomicExchangeUint8:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index c2dd218fd6..972d268014 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -703,7 +703,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode |= AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(opcode, 1, outputs, input_count, inputs);
}
@@ -2119,36 +2119,15 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Push any stack arguments.
- int num_slots = 0;
- int slot = 0;
-
-#define INPUT_SWITCH(param) \
- switch (input.location.GetType().representation()) { \
- case MachineRepresentation::kSimd128: \
- param += kSimd128Size / kSystemPointerSize; \
- break; \
- case MachineRepresentation::kFloat64: \
- param += kDoubleSize / kSystemPointerSize; \
- break; \
- default: \
- param += 1; \
- break; \
- }
- for (PushParameter input : *arguments) {
- if (input.node == nullptr) continue;
- INPUT_SWITCH(num_slots)
- }
- Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
- for (PushParameter input : *arguments) {
+ int stack_decrement = 0;
+ for (PushParameter input : base::Reversed(*arguments)) {
+ stack_decrement += kSystemPointerSize;
// Skip any alignment holes in pushed nodes.
- if (input.node) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
- g.TempImmediate(slot));
- INPUT_SWITCH(slot)
- }
+ if (input.node == nullptr) continue;
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
+ Emit(kS390_Push, g.NoOutput(), decrement, g.UseRegister(input.node));
}
-#undef INPUT_SWITCH
- DCHECK(num_slots == slot);
}
}
@@ -2159,8 +2138,6 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2451,6 +2428,9 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4U) \
V(I16x8Q15MulRSatS) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2520,38 +2500,54 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(S128Xor) \
V(S128AndNot)
-#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs) \
- V(F64x2Neg) \
- V(F64x2Sqrt) \
- V(F64x2Ceil) \
- V(F64x2Floor) \
- V(F64x2Trunc) \
- V(F64x2NearestInt) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(F32x4Sqrt) \
- V(F32x4Ceil) \
- V(F32x4Floor) \
- V(F32x4Trunc) \
- V(F32x4NearestInt) \
- V(I64x2Neg) \
- V(I16x8Abs) \
- V(I32x4Neg) \
- V(I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High) \
- V(I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High) \
- V(I32x4Abs) \
- V(I16x8Neg) \
- V(I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High) \
- V(I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High) \
- V(I8x16Neg) \
- V(I8x16Abs) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(F32x4Sqrt) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero) \
+ V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I64x2Abs) \
+ V(I32x4Neg) \
+ V(I32x4Abs) \
+ V(I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
+ V(I16x8Neg) \
+ V(I16x8Abs) \
+ V(I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
+ V(I8x16Neg) \
+ V(I8x16Abs) \
+ V(I8x16Popcnt) \
V(S128Not)
#define SIMD_SHIFT_LIST(V) \
@@ -2569,9 +2565,8 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue) \
+ V(V128AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2723,7 +2718,6 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
S390OperandGenerator g(this);
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
-#ifdef V8_TARGET_BIG_ENDIAN
// Remap the shuffle indices to match IBM lane numbering.
int max_index = 15;
int total_lane_count = 2 * kSimd128Size;
@@ -2735,7 +2729,6 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
: total_lane_count - current_index + max_index);
}
shuffle_p = &shuffle_remapped[0];
-#endif
Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node),
g.UseUniqueRegister(input0), g.UseUniqueRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p)),
@@ -2817,11 +2810,21 @@ void InstructionSelector::EmitPrepareResults(
}
}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ // We should never reach here, see http://crrev.com/c/2577820
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitLoadTransform(Node* node) {
// We should never reach here, see http://crrev.com/c/2050811
UNREACHABLE();
}
+void InstructionSelector::VisitStoreLane(Node* node) {
+ // We should never reach here, see http://crrev.com/c/2577820
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
S390OperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index e905c7194f..0a3e065bbe 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -7,6 +7,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/external-reference.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/x64/assembler-x64.h"
@@ -339,7 +340,7 @@ class WasmOutOfLineTrap : public OutOfLineCode {
__ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
gen_->zone()->New<ReferenceMap>(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -364,8 +365,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
@@ -374,8 +374,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->andq(value, kSpeculationPoisonRegister);
@@ -710,28 +709,6 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &done, Label::kNear);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ SmiUntag(caller_args_count_reg,
- Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3);
- __ bind(&done);
-}
-
namespace {
void AdjustStackPointerForTailCall(Instruction* instr,
@@ -923,13 +900,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallCodeObjectFromJSFunction:
- if (!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
- V8_FALLTHROUGH;
case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
@@ -1058,7 +1028,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ bind(&return_location);
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
- RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map());
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
@@ -1077,7 +1047,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
}
- // TODO(tebbi): Do we need an lfence here?
+ // TODO(turbofan): Do we need an lfence here?
break;
}
case kArchJmp:
@@ -2154,8 +2124,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
if (access_mode == kMemoryAccessPoisoned) {
// If we have to poison the loaded value, we load into a general
// purpose register first, mask it with the poison, and move the
@@ -2174,7 +2143,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movdqu: {
- CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
__ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
@@ -2294,59 +2262,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Inc32:
__ incl(i.OutputRegister());
break;
- case kX64Push:
- if (HasAddressingMode(instr)) {
- size_t index = 0;
+ case kX64Push: {
+ int stack_decrement = i.InputInt32(0);
+ int slots = stack_decrement / kSystemPointerSize;
+ // Whenever codegen uses pushq, we need to check if stack_decrement
+ // contains any extra padding and adjust the stack before the pushq.
+ if (HasImmediateInput(instr, 1)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputImmediate(1));
+ } else if (HasAddressingMode(instr)) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ pushq(operand);
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- } else if (HasImmediateInput(instr, 0)) {
- __ pushq(i.InputImmediate(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- } else if (HasRegisterInput(instr, 0)) {
- __ pushq(i.InputRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- } else if (instr->InputAt(0)->IsFloatRegister() ||
- instr->InputAt(0)->IsDoubleRegister()) {
- // TODO(titzer): use another machine instruction?
- __ AllocateStackSpace(kDoubleSize);
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kDoubleSize);
- __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
- } else if (instr->InputAt(0)->IsSimd128Register()) {
- // TODO(titzer): use another machine instruction?
- __ AllocateStackSpace(kSimd128Size);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSimd128Size);
- __ Movups(Operand(rsp, 0), i.InputSimd128Register(0));
- } else if (instr->InputAt(0)->IsStackSlot() ||
- instr->InputAt(0)->IsFloatStackSlot() ||
- instr->InputAt(0)->IsDoubleStackSlot()) {
- __ pushq(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
} else {
- DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
- __ Movups(kScratchDoubleReg, i.InputOperand(0));
- // TODO(titzer): use another machine instruction?
- __ AllocateStackSpace(kSimd128Size);
- frame_access_state()->IncreaseSPDelta(kSimd128Size /
- kSystemPointerSize);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSimd128Size);
- __ Movups(Operand(rsp, 0), kScratchDoubleReg);
+ InstructionOperand* input = instr->InputAt(1);
+ if (input->IsRegister()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputRegister(1));
+ } else if (input->IsFloatRegister() || input->IsDoubleRegister()) {
+ DCHECK_GE(stack_decrement, kSystemPointerSize);
+ __ AllocateStackSpace(stack_decrement);
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+ } else if (input->IsSimd128Register()) {
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ __ AllocateStackSpace(stack_decrement);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(Operand(rsp, 0), i.InputSimd128Register(1));
+ } else if (input->IsStackSlot() || input->IsFloatStackSlot() ||
+ input->IsDoubleStackSlot()) {
+ __ AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ __ pushq(i.InputOperand(1));
+ } else {
+ DCHECK(input->IsSimd128StackSlot());
+ DCHECK_GE(stack_decrement, kSimd128Size);
+ // TODO(bbudge) Use Movaps when slots are aligned.
+ __ Movups(kScratchDoubleReg, i.InputOperand(1));
+ __ AllocateStackSpace(stack_decrement);
+ __ Movups(Operand(rsp, 0), kScratchDoubleReg);
+ }
}
+ frame_access_state()->IncreaseSPDelta(slots);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ stack_decrement);
break;
+ }
case kX64Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -2513,6 +2473,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F64x2ConvertLowI32x4S: {
+ __ Cvtdq2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F64x2ConvertLowI32x4U: {
+ __ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F64x2PromoteLowF32x4: {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4DemoteF64x2Zero: {
+ __ Cvtpd2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I32x4TruncSatF64x2SZero: {
+ __ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kX64I32x4TruncSatF64x2UZero: {
+ __ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ break;
+ }
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputDoubleRegister(0);
@@ -2589,11 +2576,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psrld(kScratchDoubleReg, byte{1});
- __ Andps(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ Andps(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
__ Psrld(dst, byte{1});
- __ Andps(dst, i.InputSimd128Register(0));
+ __ Andps(dst, src);
}
break;
}
@@ -2603,11 +2590,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Pslld(kScratchDoubleReg, byte{31});
- __ Xorps(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ Xorps(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
__ Pslld(dst, byte{31});
- __ Xorps(dst, i.InputSimd128Register(0));
+ __ Xorps(dst, src);
}
break;
}
@@ -2775,6 +2762,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
+ case kX64I64x2Abs: {
+ __ I64x2Abs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kX64I64x2Neg: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
@@ -2848,9 +2839,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Eq: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
ASSEMBLE_SIMD_BINOP(pcmpeqq);
break;
}
+ case kX64I64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister tmp = i.TempSimd128Register(0);
+ __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ Pcmpeqq(tmp, tmp);
+ __ Pxor(i.OutputSimd128Register(), tmp);
+ break;
+ }
+ case kX64I64x2GtS: {
+ __ I64x2GtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2GeS: {
+ __ I64x2GeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kX64I64x2ShrU: {
// Take shift value modulo 2^6.
ASSEMBLE_SIMD_SHIFT(psrlq, 6);
@@ -2885,15 +2895,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2SConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpunpckhqdq(dst, src, src);
- } else {
- __ pshufd(dst, src, 0xEE);
- }
- __ Pmovsxdq(dst, dst);
+ __ I64x2SConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64I64x2UConvertI32x4Low: {
@@ -2901,17 +2904,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2UConvertI32x4High: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpunpckhdq(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pshufd(dst, src, 0xEE);
- __ pmovzxdq(dst, dst);
- }
+ __ I64x2UConvertI32x4High(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64I32x4Splat: {
@@ -3106,31 +3100,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4ExtAddPairwiseI16x8S: {
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // kScratchDoubleReg = |1|1|1|1|1|1|1|1|
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlw(kScratchDoubleReg, byte{15});
- // pmaddwd multiplies signed words in kScratchDoubleReg and src, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
+ XMMRegister src1 = i.InputSimd128Register(0);
+ // pmaddwd multiplies signed words in src1 and src2, producing signed
+ // doublewords, then adds pairwise.
+ // src1 = |a|b|c|d|e|f|g|h|
+ // src2 = |1|1|1|1|1|1|1|1|
// dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- __ Pmaddwd(dst, src, kScratchDoubleReg);
+ Operand src2 = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001());
+ __ Pmaddwd(dst, src1, src2);
break;
}
case kX64I32x4ExtAddPairwiseI16x8U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
-
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, byte{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- __ Pand(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- __ Psrld(dst, src, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- __ Paddd(dst, kScratchDoubleReg);
+ __ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64S128Const: {
@@ -3244,9 +3227,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pcmpeqw(dst, i.InputSimd128Register(1));
__ Pcmpeqw(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I16x8GtS: {
@@ -3352,35 +3336,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8ExtAddPairwiseI8x16S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- DCHECK_NE(dst, src);
- // dst = i8x16.splat(1)
- __ Move(dst, uint32_t{0x01010101});
- __ Pshufd(dst, dst, byte{0});
- __ Pmaddubsw(dst, dst, src);
+ __ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
break;
}
case kX64I16x8ExtAddPairwiseI8x16U: {
XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // dst = i8x16.splat(1)
- __ Move(kScratchDoubleReg, uint32_t{0x01010101});
- __ Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- __ Pmaddubsw(dst, src, kScratchDoubleReg);
+ XMMRegister src1 = i.InputSimd128Register(0);
+ Operand src2 = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01());
+ __ Pmaddubsw(dst, src1, src2);
break;
}
case kX64I16x8Q15MulRSatS: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- // k = i16x8.splat(0x8000)
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllw(kScratchDoubleReg, byte{15});
-
- __ Pmulhrsw(dst, src0, src1);
- __ Pcmpeqw(kScratchDoubleReg, dst);
- __ Pxor(dst, kScratchDoubleReg);
+ __ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kX64I8x16Splat: {
@@ -3586,9 +3556,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Ne: {
XMMRegister tmp = i.TempSimd128Register(0);
- __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pcmpeqb(dst, i.InputSimd128Register(1));
__ Pcmpeqb(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
+ __ Pxor(dst, tmp);
break;
}
case kX64I8x16GtS: {
@@ -3758,14 +3729,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
- __ Movdqa(kScratchDoubleReg, dst);
- __ Pcmpeqd(dst, dst);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Pxor(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
__ Pxor(dst, src);
}
-
break;
}
case kX64S128Select: {
@@ -3782,16 +3751,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister mask = i.TempSimd128Register(0);
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- __ Move(mask, uint32_t{0x70707070});
- __ Pshufd(mask, mask, uint8_t{0x0});
- __ Paddusb(mask, i.InputSimd128Register(1));
- __ Pshufb(dst, mask);
+ __ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kX64I8x16Shuffle: {
@@ -3841,6 +3802,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I8x16Popcnt: {
+ __ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.TempSimd128Register(0));
+ break;
+ }
case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
XMMRegister dst = i.OutputSimd128Register();
@@ -3919,12 +3885,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
uint8_t lane = i.InputUint8(index + 1);
- if (lane == 0) {
- __ Movss(operand, i.InputSimd128Register(index));
- } else {
- DCHECK_GE(3, lane);
- __ Extractps(operand, i.InputSimd128Register(index), lane);
- }
+ __ S128Store32Lane(operand, i.InputSimd128Register(index), lane);
break;
}
case kX64S128Store64Lane: {
@@ -3932,12 +3893,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
uint8_t lane = i.InputUint8(index + 1);
- if (lane == 0) {
- __ Movlps(operand, i.InputSimd128Register(index));
- } else {
- DCHECK_EQ(1, lane);
- __ Movhps(operand, i.InputSimd128Register(index));
- }
+ __ S128Store64Lane(operand, i.InputSimd128Register(index), lane);
break;
}
case kX64Shufps: {
@@ -4156,9 +4112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Por(dst, kScratchDoubleReg);
break;
}
- case kX64V32x4AnyTrue:
- case kX64V16x8AnyTrue:
- case kX64V8x16AnyTrue: {
+ case kX64V128AnyTrue: {
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
@@ -4171,6 +4125,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
+ case kX64V64x2AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
+ break;
+ }
case kX64V32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
@@ -4183,6 +4141,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
+ case kX64Prefetch:
+ __ prefetch(i.MemoryOperand(), 1);
+ break;
+ case kX64PrefetchNta:
+ __ prefetch(i.MemoryOperand(), 0);
+ break;
case kWord32AtomicExchangeInt8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
@@ -4333,14 +4297,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicStoreWord8:
case kWord32AtomicStoreWord16:
case kWord32AtomicStoreWord32:
- case kX64Word64AtomicLoadUint8:
- case kX64Word64AtomicLoadUint16:
- case kX64Word64AtomicLoadUint32:
- case kX64Word64AtomicLoadUint64:
- case kX64Word64AtomicStoreWord8:
- case kX64Word64AtomicStoreWord16:
- case kX64Word64AtomicStoreWord32:
- case kX64Word64AtomicStoreWord64:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
@@ -4663,7 +4619,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ near_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
- RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
@@ -4758,7 +4714,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
Register argc_reg = rcx;
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
// kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
@@ -4766,9 +4721,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
const bool drop_jsargs = frame_access_state()->has_frame() &&
call_descriptor->IsJSFunctionCall() &&
parameter_count != 0;
-#else
- const bool drop_jsargs = false;
-#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 375a81d096..6c48a04ea1 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -174,6 +174,9 @@ namespace compiler {
V(X64F64x2Pmin) \
V(X64F64x2Pmax) \
V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F64x2PromoteLowF32x4) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -200,8 +203,10 @@ namespace compiler {
V(X64F32x4Pmin) \
V(X64F32x4Pmax) \
V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
V(X64I64x2Neg) \
V(X64I64x2BitMask) \
V(X64I64x2Shl) \
@@ -210,6 +215,9 @@ namespace compiler {
V(X64I64x2Sub) \
V(X64I64x2Mul) \
V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
V(X64I64x2ShrU) \
V(X64I64x2SignSelect) \
V(X64I64x2ExtMulLowI32x4S) \
@@ -256,6 +264,8 @@ namespace compiler {
V(X64I32x4ExtMulHighI16x8U) \
V(X64I32x4ExtAddPairwiseI16x8S) \
V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneS) \
V(X64I16x8SConvertI8x16Low) \
@@ -343,6 +353,7 @@ namespace compiler {
V(X64S128AndNot) \
V(X64I8x16Swizzle) \
V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
V(X64S128Load8Splat) \
V(X64S128Load16Splat) \
V(X64S128Load32Splat) \
@@ -382,20 +393,13 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
- V(X64V32x4AnyTrue) \
+ V(X64V128AnyTrue) \
+ V(X64V64x2AllTrue) \
V(X64V32x4AllTrue) \
- V(X64V16x8AnyTrue) \
V(X64V16x8AllTrue) \
- V(X64V8x16AnyTrue) \
V(X64V8x16AllTrue) \
- V(X64Word64AtomicLoadUint8) \
- V(X64Word64AtomicLoadUint16) \
- V(X64Word64AtomicLoadUint32) \
- V(X64Word64AtomicLoadUint64) \
- V(X64Word64AtomicStoreWord8) \
- V(X64Word64AtomicStoreWord16) \
- V(X64Word64AtomicStoreWord32) \
- V(X64Word64AtomicStoreWord64) \
+ V(X64Prefetch) \
+ V(X64PrefetchNta) \
V(X64Word64AtomicAddUint8) \
V(X64Word64AtomicAddUint16) \
V(X64Word64AtomicAddUint32) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index e9ed7c9e85..2ecbab8f50 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -150,6 +150,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Pmin:
case kX64F64x2Pmax:
case kX64F64x2Round:
+ case kX64F64x2ConvertLowI32x4S:
+ case kX64F64x2ConvertLowI32x4U:
+ case kX64F64x2PromoteLowF32x4:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -176,8 +179,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Pmin:
case kX64F32x4Pmax:
case kX64F32x4Round:
+ case kX64F32x4DemoteF64x2Zero:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
+ case kX64I64x2Abs:
case kX64I64x2Neg:
case kX64I64x2BitMask:
case kX64I64x2Shl:
@@ -186,6 +191,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I64x2Sub:
case kX64I64x2Mul:
case kX64I64x2Eq:
+ case kX64I64x2GtS:
+ case kX64I64x2GeS:
+ case kX64I64x2Ne:
case kX64I64x2ShrU:
case kX64I64x2SignSelect:
case kX64I64x2ExtMulLowI32x4S:
@@ -232,6 +240,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4ExtMulHighI16x8U:
case kX64I32x4ExtAddPairwiseI16x8S:
case kX64I32x4ExtAddPairwiseI16x8U:
+ case kX64I32x4TruncSatF64x2SZero:
+ case kX64I32x4TruncSatF64x2UZero:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneS:
case kX64I16x8SConvertI8x16Low:
@@ -311,12 +321,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Zero:
case kX64S128AllOnes:
case kX64S128AndNot:
- case kX64V32x4AnyTrue:
+ case kX64V64x2AllTrue:
case kX64V32x4AllTrue:
- case kX64V16x8AnyTrue:
case kX64V16x8AllTrue:
case kX64I8x16Swizzle:
case kX64I8x16Shuffle:
+ case kX64I8x16Popcnt:
case kX64Shufps:
case kX64S32x4Rotate:
case kX64S32x4Swizzle:
@@ -344,7 +354,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x8Reverse:
case kX64S8x4Reverse:
case kX64S8x2Reverse:
- case kX64V8x16AnyTrue:
+ case kX64V128AnyTrue:
case kX64V8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
@@ -417,18 +427,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64MFence:
case kX64LFence:
+ case kX64Prefetch:
+ case kX64PrefetchNta:
return kHasSideEffect;
- case kX64Word64AtomicLoadUint8:
- case kX64Word64AtomicLoadUint16:
- case kX64Word64AtomicLoadUint32:
- case kX64Word64AtomicLoadUint64:
- return kIsLoadOperation;
-
- case kX64Word64AtomicStoreWord8:
- case kX64Word64AtomicStoreWord16:
- case kX64Word64AtomicStoreWord32:
- case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint8:
case kX64Word64AtomicAddUint16:
case kX64Word64AtomicAddUint32:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index e2d8cf27bf..5508357675 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -10,7 +10,9 @@
#include "src/base/platform/wrappers.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
+#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/backend/instruction.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -383,7 +385,7 @@ void InstructionSelector::VisitLoadLane(Node* node) {
// x64 supports unaligned loads.
DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= MiscField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
Emit(opcode, 1, outputs, input_count, inputs);
}
@@ -435,7 +437,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
InstructionCode code = opcode;
if (params.kind == MemoryAccessKind::kProtected) {
- code |= MiscField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtected);
}
VisitLoad(node, node, code);
}
@@ -450,10 +452,10 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
- code |= MiscField::encode(kMemoryAccessProtected);
+ code |= AccessModeField::encode(kMemoryAccessProtected);
} else if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= MiscField::encode(kMemoryAccessPoisoned);
+ code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
@@ -528,7 +530,7 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
- MiscField::encode(kMemoryAccessProtected);
+ AccessModeField::encode(kMemoryAccessProtected);
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
@@ -565,7 +567,7 @@ void InstructionSelector::VisitStoreLane(Node* node) {
opcode |= AddressingModeField::encode(addressing_mode);
if (params.kind == MemoryAccessKind::kProtected) {
- opcode |= MiscField::encode(kMemoryAccessProtected);
+ opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
@@ -575,6 +577,30 @@ void InstructionSelector::VisitStoreLane(Node* node) {
Emit(opcode, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitPrefetchTemporal(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionCode opcode = kX64Prefetch;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
+void InstructionSelector::VisitPrefetchNonTemporal(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionCode opcode = kX64PrefetchNta;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ DCHECK_LE(input_count, 2);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -1795,29 +1821,33 @@ void InstructionSelector::EmitPrepareArguments(
} else {
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
+ int stack_decrement = 0;
for (PushParameter input : base::Reversed(*arguments)) {
- // Skip any alignment holes in pushed nodes. We may have one in case of a
- // Simd128 stack argument.
+ stack_decrement += kSystemPointerSize;
+ // Skip holes in the param array. These represent both extra slots for
+ // multi-slot values and padding slots for alignment.
if (input.node == nullptr) continue;
+ InstructionOperand decrement = g.UseImmediate(stack_decrement);
+ stack_decrement = 0;
if (g.CanBeImmediate(input.node)) {
- Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
+ Emit(kX64Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
} else if (IsSupported(ATOM) ||
sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
- Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
+ Emit(kX64Push, g.NoOutput(), decrement, g.UseRegister(input.node));
} else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
effect_level)) {
InstructionOperand outputs[1];
- InstructionOperand inputs[4];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionCode opcode = kX64Push;
+ inputs[input_count++] = decrement;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
input.node, inputs, &input_count);
- opcode |= AddressingModeField::encode(mode);
+ InstructionCode opcode = kX64Push | AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.UseAny(input.node));
+ Emit(kX64Push, g.NoOutput(), decrement, g.UseAny(input.node));
}
}
}
@@ -1850,8 +1880,6 @@ void InstructionSelector::EmitPrepareResults(
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-
namespace {
void VisitCompareWithMemoryOperand(InstructionSelector* selector,
@@ -2894,6 +2922,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16GeU)
#define SIMD_BINOP_ONE_TEMP_LIST(V) \
+ V(I64x2Ne) \
V(I32x4Ne) \
V(I32x4GtU) \
V(I16x8Ne) \
@@ -2903,12 +2932,15 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_UNOP_LIST(V) \
V(F64x2Sqrt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero) \
V(I64x2Neg) \
V(I64x2BitMask) \
V(I64x2SConvertI32x4Low) \
@@ -2947,12 +2979,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16Shl) \
V(I8x16ShrU)
-#define SIMD_ANYTRUE_LIST(V) \
- V(V32x4AnyTrue) \
- V(V16x8AnyTrue) \
- V(V8x16AnyTrue)
-
#define SIMD_ALLTRUE_LIST(V) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -3142,15 +3170,11 @@ SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
#undef VISIT_SIMD_BINOP_ONE_TEMP
#undef SIMD_BINOP_ONE_TEMP_LIST
-#define VISIT_SIMD_ANYTRUE(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0))); \
- }
-SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
-#undef VISIT_SIMD_ANYTRUE
-#undef SIMD_ANYTRUE_LIST
+void InstructionSelector::VisitV128AnyTrue(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64V128AnyTrue, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+}
#define VISIT_SIMD_ALLTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3628,10 +3652,9 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64I8x16Swizzle, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
+ Emit(kX64I8x16Swizzle,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
namespace {
@@ -3693,6 +3716,85 @@ void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
Emit(kX64I16x8ExtAddPairwiseI8x16U, dst, g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitI8x16Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst = CpuFeatures::IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ Emit(kX64I8x16Popcnt, dst, g.UseUniqueRegister(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kX64F64x2ConvertLowI32x4U, dst, g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ // Requires dst != src.
+ Emit(kX64I32x4TruncSatF64x2SZero, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+ } else {
+ Emit(kX64I32x4TruncSatF64x2SZero, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+ }
+}
+
+void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst = CpuFeatures::IsSupported(AVX)
+ ? g.DefineAsRegister(node)
+ : g.DefineSameAsFirst(node);
+ Emit(kX64I32x4TruncSatF64x2UZero, dst, g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitI64x2GtS(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kX64I64x2GtS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kX64I64x2GtS, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kX64I64x2GtS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2GeS(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kX64I64x2GeS, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ Emit(kX64I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ } else {
+ Emit(kX64I64x2GeS, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ }
+}
+
+void InstructionSelector::VisitI64x2Abs(Node* node) {
+ X64OperandGenerator g(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ Emit(kX64I64x2Abs, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)));
+ } else {
+ Emit(kX64I64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+ }
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 2d9b026dfa..86ec47979f 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -92,7 +92,8 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
} else {
counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
}
- Node* one = graph->NewNode(common.Float64Constant(1));
+ Node* zero = graph->NewNode(common.Int32Constant(0));
+ Node* one = graph->NewNode(common.Int32Constant(1));
BasicBlockVector* blocks = schedule->rpo_order();
size_t block_number = 0;
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
@@ -104,26 +105,37 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
// It is unnecessary to wire effect and control deps for load and store
// since this happens after scheduling.
// Construct increment operation.
- int offset_to_counter_value = static_cast<int>(block_number) * kDoubleSize;
+ int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
if (on_heap_counters) {
offset_to_counter_value += ByteArray::kHeaderSize - kHeapObjectTag;
}
Node* offset_to_counter =
graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
Node* load =
- graph->NewNode(machine.Load(MachineType::Float64()), counters_array,
+ graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
offset_to_counter, graph->start(), graph->start());
- Node* inc = graph->NewNode(machine.Float64Add(), load, one);
- Node* store = graph->NewNode(
- machine.Store(StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier)),
- counters_array, offset_to_counter, inc, graph->start(), graph->start());
+ Node* inc = graph->NewNode(machine.Int32Add(), load, one);
+
+ // Branchless saturation, because we've already run the scheduler, so
+ // introducing extra control flow here would be surprising.
+ Node* overflow = graph->NewNode(machine.Uint32LessThan(), inc, load);
+ Node* overflow_mask = graph->NewNode(machine.Int32Sub(), zero, overflow);
+ Node* saturated_inc =
+ graph->NewNode(machine.Word32Or(), inc, overflow_mask);
+
+ Node* store =
+ graph->NewNode(machine.Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ counters_array, offset_to_counter, saturated_inc,
+ graph->start(), graph->start());
// Insert the new nodes.
- static const int kArraySize = 6;
- Node* to_insert[kArraySize] = {counters_array, one, offset_to_counter,
- load, inc, store};
- // The first two Nodes are constant across all blocks.
- int insertion_start = block_number == 0 ? 0 : 2;
+ static const int kArraySize = 10;
+ Node* to_insert[kArraySize] = {
+ counters_array, zero, one, offset_to_counter,
+ load, inc, overflow, overflow_mask,
+ saturated_inc, store};
+ // The first three Nodes are constant across all blocks.
+ int insertion_start = block_number == 0 ? 0 : 3;
NodeVector::iterator insertion_point = FindInsertionPoint(block);
block->InsertNodes(insertion_point, &to_insert[insertion_start],
&to_insert[kArraySize]);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index e76189a31f..8489a72658 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -79,7 +79,7 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
}
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
- Zone* zone, BailoutId osr_bailout_id,
+ Zone* zone, BytecodeOffset osr_bailout_id,
bool analyze_liveness)
: bytecode_array_(bytecode_array),
zone_(zone),
@@ -166,6 +166,11 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState* in_liveness,
}
}
+ if (Bytecodes::WritesImplicitRegister(bytecode)) {
+ in_liveness->MarkRegisterDead(
+ interpreter::Register::FromShortStar(bytecode).index());
+ }
+
if (Bytecodes::ReadsAccumulator(bytecode)) {
in_liveness->MarkAccumulatorLive();
}
@@ -308,6 +313,10 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments* assignments,
break;
}
}
+
+ if (Bytecodes::WritesImplicitRegister(bytecode)) {
+ assignments->Add(interpreter::Register::FromShortStar(bytecode));
+ }
}
} // namespace
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index a05194f832..0e9043a16a 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -99,7 +99,7 @@ struct V8_EXPORT_PRIVATE LoopInfo {
class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
- BailoutId osr_bailout_id, bool analyze_liveness);
+ BytecodeOffset osr_bailout_id, bool analyze_liveness);
BytecodeAnalysis(const BytecodeAnalysis&) = delete;
BytecodeAnalysis& operator=(const BytecodeAnalysis&) = delete;
@@ -128,7 +128,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
return osr_entry_point_;
}
// Return the osr_bailout_id (for verification purposes).
- BailoutId osr_bailout_id() const { return osr_bailout_id_; }
+ BytecodeOffset osr_bailout_id() const { return osr_bailout_id_; }
// Return whether liveness analysis was performed (for verification purposes).
bool liveness_analyzed() const { return analyze_liveness_; }
@@ -167,7 +167,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
Handle<BytecodeArray> const bytecode_array_;
Zone* const zone_;
- BailoutId const osr_bailout_id_;
+ BytecodeOffset const osr_bailout_id_;
bool const analyze_liveness_;
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 52acfc847e..54996bb475 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -15,6 +15,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
@@ -38,11 +39,12 @@ class BytecodeGraphBuilder {
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
FeedbackCellRef const& feedback_cell,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeOffset osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
CodeKind code_kind, BytecodeGraphBuilderFlags flags,
- TickCounter* tick_counter);
+ TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info);
BytecodeGraphBuilder(const BytecodeGraphBuilder&) = delete;
BytecodeGraphBuilder& operator=(const BytecodeGraphBuilder&) = delete;
@@ -207,10 +209,54 @@ class BytecodeGraphBuilder {
// Prepare information for lazy deoptimization. This information is attached
// to the given node and the output value produced by the node is combined.
- // Conceptually this frame state is "after" a given operation.
- void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
+ //
+ // The low-level chokepoint - use the variants below instead.
void PrepareFrameState(Node* node, OutputFrameStateCombine combine,
- BailoutId bailout_id);
+ BytecodeOffset bailout_id,
+ const BytecodeLivenessState* liveness);
+
+ // In the common case, frame states are conceptually "after" a given
+ // operation and at the current bytecode offset.
+ void PrepareFrameState(Node* node, OutputFrameStateCombine combine) {
+ if (!OperatorProperties::HasFrameStateInput(node->op())) return;
+ const int offset = bytecode_iterator().current_offset();
+ return PrepareFrameState(node, combine, BytecodeOffset(offset),
+ bytecode_analysis().GetOutLivenessFor(offset));
+ }
+
+ // For function-entry stack checks, they're conceptually "before" the first
+ // bytecode and at a special marker bytecode offset.
+ // In the case of FE stack checks, the current bytecode is also the first
+ // bytecode, so we use a special marker bytecode offset to signify a virtual
+ // bytecode before the first physical bytecode.
+ void PrepareFrameStateForFunctionEntryStackCheck(Node* node) {
+ DCHECK_EQ(bytecode_iterator().current_offset(), 0);
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ DCHECK(node->opcode() == IrOpcode::kJSStackCheck);
+ return PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
+ BytecodeOffset(kFunctionEntryBytecodeOffset),
+ bytecode_analysis().GetInLivenessFor(0));
+ }
+
+ // For OSR-entry stack checks, they're conceptually "before" the first
+ // bytecode of the current loop. We implement this in a similar manner to
+ // function-entry (FE) stack checks above, i.e. we deopt at the predecessor
+ // of the current bytecode.
+ // In the case of OSR-entry stack checks, a physical predecessor bytecode
+ // exists: the JumpLoop bytecode. We attach to JumpLoop by using
+ // `bytecode_analysis().osr_bailout_id()` instead of current_offset (the
+ // former points at JumpLoop, the latter at the loop header, i.e. the target
+ // of JumpLoop).
+ void PrepareFrameStateForOSREntryStackCheck(Node* node) {
+ DCHECK_EQ(bytecode_iterator().current_offset(),
+ bytecode_analysis().osr_entry_point());
+ DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+ DCHECK(node->opcode() == IrOpcode::kJSStackCheck);
+ const int offset = bytecode_analysis().osr_bailout_id().ToInt();
+ return PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
+ BytecodeOffset(offset),
+ bytecode_analysis().GetOutLivenessFor(offset));
+ }
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadGlobal(NameRef name, uint32_t feedback_slot_index,
@@ -304,6 +350,7 @@ class BytecodeGraphBuilder {
// StackChecks.
void BuildFunctionEntryStackCheck();
void BuildIterationBodyStackCheck();
+ void MaybeBuildOSREntryStackCheck();
// Control flow plumbing.
void BuildJump();
@@ -365,6 +412,12 @@ class BytecodeGraphBuilder {
int context_register_; // Index of register holding handler context.
};
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const {
+ return broker_->CanonicalPersistentHandle(
+ bytecode_iterator().GetConstantForIndexOperand(operand_index,
+ local_isolate_));
+ }
+
Graph* graph() const { return jsgraph_->graph(); }
CommonOperatorBuilder* common() const { return jsgraph_->common(); }
Zone* graph_zone() const { return graph()->zone(); }
@@ -388,6 +441,9 @@ class BytecodeGraphBuilder {
SourcePositionTableIterator& source_position_iterator() {
return *source_position_iterator_.get();
}
+ interpreter::BytecodeArrayIterator const& bytecode_iterator() const {
+ return bytecode_iterator_;
+ }
interpreter::BytecodeArrayIterator& bytecode_iterator() {
return bytecode_iterator_;
}
@@ -418,6 +474,7 @@ class BytecodeGraphBuilder {
#undef DECLARE_VISIT_BYTECODE
JSHeapBroker* const broker_;
+ LocalIsolate* const local_isolate_;
Zone* const local_zone_;
JSGraph* const jsgraph_;
// The native context for which we optimize.
@@ -434,6 +491,7 @@ class BytecodeGraphBuilder {
Environment* environment_;
bool const osr_;
int currently_peeled_loop_offset_;
+ bool is_osr_entry_stack_check_pending_;
const bool skip_first_stack_check_;
@@ -484,6 +542,8 @@ class BytecodeGraphBuilder {
TickCounter* const tick_counter_;
+ ObserveNodeInfo const observe_node_info_;
+
static constexpr int kBinaryOperationHintIndex = 1;
static constexpr int kBinaryOperationSmiHintIndex = 1;
static constexpr int kCompareOperationHintIndex = 1;
@@ -532,7 +592,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
+ Node* Checkpoint(BytecodeOffset bytecode_offset,
+ OutputFrameStateCombine combine,
const BytecodeLivenessState* liveness);
// Control dependency tracked by this environment.
@@ -955,7 +1016,7 @@ Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
}
Node* BytecodeGraphBuilder::Environment::Checkpoint(
- BailoutId bailout_id, OutputFrameStateCombine combine,
+ BytecodeOffset bailout_id, OutputFrameStateCombine combine,
const BytecodeLivenessState* liveness) {
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
@@ -991,17 +1052,21 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
- FeedbackCellRef const& feedback_cell, BailoutId osr_offset,
+ FeedbackCellRef const& feedback_cell, BytecodeOffset osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind,
- BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
+ BytecodeGraphBuilderFlags flags, TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info)
: broker_(broker),
+ local_isolate_(broker_->local_isolate()
+ ? broker_->local_isolate()
+ : broker_->isolate()->AsLocalIsolate()),
local_zone_(local_zone),
jsgraph_(jsgraph),
native_context_(native_context),
shared_info_(shared_info),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value().AsFeedbackVector()),
+ feedback_vector_(feedback_cell.value()->AsFeedbackVector()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -1009,19 +1074,19 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
- FrameStateType::kInterpretedFunction,
+ FrameStateType::kUnoptimizedFunction,
bytecode_array().parameter_count(), bytecode_array().register_count(),
shared_info.object())),
source_position_iterator_(std::make_unique<SourcePositionTableIterator>(
bytecode_array().SourcePositionTable())),
- bytecode_iterator_(
- std::make_unique<OffHeapBytecodeArray>(bytecode_array())),
+ bytecode_iterator_(bytecode_array().object()),
bytecode_analysis_(
bytecode_array().object(), local_zone, osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness),
environment_(nullptr),
osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
+ is_osr_entry_stack_check_pending_(osr_),
skip_first_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
merge_environments_(local_zone),
@@ -1039,7 +1104,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
state_values_cache_(jsgraph),
source_positions_(source_positions),
start_position_(shared_info.StartPosition(), inlining_id),
- tick_counter_(tick_counter) {}
+ tick_counter_(tick_counter),
+ observe_node_info_(observe_node_info) {}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
@@ -1213,7 +1279,7 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
- BailoutId bailout_id(bytecode_iterator().current_offset());
+ BytecodeOffset bailout_id(bytecode_iterator().current_offset());
const BytecodeLivenessState* liveness_before =
bytecode_analysis().GetInLivenessFor(
@@ -1239,36 +1305,18 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
#endif // DEBUG
}
-void BytecodeGraphBuilder::PrepareFrameState(Node* node,
- OutputFrameStateCombine combine) {
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- PrepareFrameState(node, combine,
- BailoutId(bytecode_iterator().current_offset()));
- }
-}
-
-void BytecodeGraphBuilder::PrepareFrameState(Node* node,
- OutputFrameStateCombine combine,
- BailoutId bailout_id) {
+void BytecodeGraphBuilder::PrepareFrameState(
+ Node* node, OutputFrameStateCombine combine, BytecodeOffset bailout_id,
+ const BytecodeLivenessState* liveness) {
if (OperatorProperties::HasFrameStateInput(node->op())) {
// Add the frame state for after the operation. The node in question has
// already been created and had a {Dead} frame state input up until now.
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
- DCHECK_IMPLIES(bailout_id.ToInt() == kFunctionEntryBytecodeOffset,
- bytecode_iterator().current_offset() == 0);
-
- // If we have kFunctionEntryBytecodeOffset as the bailout_id, we want to get
- // the liveness at the moment of function entry. This is the same as the IN
- // liveness of the first actual bytecode.
- const BytecodeLivenessState* liveness_after =
- bailout_id.ToInt() == kFunctionEntryBytecodeOffset
- ? bytecode_analysis().GetInLivenessFor(0)
- : bytecode_analysis().GetOutLivenessFor(bailout_id.ToInt());
Node* frame_state_after =
- environment()->Checkpoint(bailout_id, combine, liveness_after);
+ environment()->Checkpoint(bailout_id, combine, liveness);
NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
}
@@ -1378,8 +1426,7 @@ void BytecodeGraphBuilder::BuildFunctionEntryStackCheck() {
if (!skip_first_stack_check()) {
Node* node =
NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
- PrepareFrameState(node, OutputFrameStateCombine::Ignore(),
- BailoutId(kFunctionEntryBytecodeOffset));
+ PrepareFrameStateForFunctionEntryStackCheck(node);
}
}
@@ -1389,6 +1436,15 @@ void BytecodeGraphBuilder::BuildIterationBodyStackCheck() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::MaybeBuildOSREntryStackCheck() {
+ if (V8_UNLIKELY(is_osr_entry_stack_check_pending_)) {
+ is_osr_entry_stack_check_pending_ = false;
+ Node* node =
+ NewNode(javascript()->StackCheck(StackCheckKind::kJSFunctionEntry));
+ PrepareFrameStateForOSREntryStackCheck(node);
+ }
+}
+
// We will iterate through the OSR loop, then its parent, and so on
// until we have reached the outmost loop containing the OSR loop. We do
// not generate nodes for anything before the outermost loop.
@@ -1469,6 +1525,13 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
+
+ // The OSR-entry stack check must be emitted during the first call to
+ // VisitSingleBytecode in an OSR'd function. We don't know if that call
+ // will be made from AdvanceToOsrEntryAndPeelLoops or from VisitBytecodes,
+ // therefore we insert the logic here inside VisitSingleBytecode itself.
+ MaybeBuildOSREntryStackCheck();
+
switch (bytecode_iterator().current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@@ -1526,8 +1589,8 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
}
void BytecodeGraphBuilder::VisitLdaConstant() {
- ObjectRef object(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ObjectRef object(broker(), GetConstantForIndexOperand(0),
+ ObjectRef::BackgroundSerialization::kAllowed);
Node* node = jsgraph()->Constant(object);
environment()->BindAccumulator(node);
}
@@ -1568,6 +1631,16 @@ void BytecodeGraphBuilder::VisitStar() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value);
}
+#define SHORT_STAR_VISITOR(Name, ...) \
+ void BytecodeGraphBuilder::Visit##Name() { \
+ Node* value = environment()->LookupAccumulator(); \
+ environment()->BindRegister( \
+ interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
+ value); \
+ }
+SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
+#undef SHORT_STAR_VISITOR
+
void BytecodeGraphBuilder::VisitMov() {
Node* value =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1587,8 +1660,7 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name,
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -1597,8 +1669,7 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -1607,8 +1678,7 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1749,8 +1819,8 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
@@ -1902,9 +1972,8 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name = jsgraph()->Constant(
+ ObjectRef(broker(), GetConstantForIndexOperand(0)));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1939,8 +2008,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(0));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1955,9 +2023,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name = jsgraph()->Constant(NameRef(
- broker(),
- bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(NameRef(broker(), GetConstantForIndexOperand(0)));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1986,8 +2053,8 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -2011,8 +2078,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name.object(), feedback);
@@ -2036,8 +2102,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
const Operator* op = javascript()->LoadNamed(name.object(), FeedbackSource());
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, object, feedback_vector_node());
@@ -2049,8 +2114,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* home_object = environment()->LookupAccumulator();
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
@@ -2104,8 +2168,7 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
@@ -2146,8 +2209,7 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(),
- bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ NameRef name(broker(), GetConstantForIndexOperand(1));
LanguageMode language_mode =
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
@@ -2226,8 +2288,7 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- SharedFunctionInfoRef shared_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ SharedFunctionInfoRef shared_info(broker(), GetConstantForIndexOperand(0));
AllocationType allocation =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
@@ -2244,16 +2305,14 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
const Operator* op = javascript()->CreateBlockContext(scope_info.object());
Node* context = NewNode(op);
environment()->BindAccumulator(context);
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op = javascript()->CreateFunctionContext(
scope_info.object(), slots, FUNCTION_SCOPE);
@@ -2262,8 +2321,7 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op = javascript()->CreateFunctionContext(scope_info.object(),
slots, EVAL_SCOPE);
@@ -2274,8 +2332,7 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(1));
const Operator* op = javascript()->CreateCatchContext(scope_info.object());
Node* context = NewNode(op, exception);
@@ -2285,8 +2342,7 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- ScopeInfoRef scope_info(
- broker(), bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(1));
const Operator* op = javascript()->CreateWithContext(scope_info.object());
Node* context = NewNode(op, object);
@@ -2312,8 +2368,7 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- StringRef constant_pattern(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ StringRef constant_pattern(broker(), GetConstantForIndexOperand(0));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2327,7 +2382,7 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
ArrayBoilerplateDescriptionRef array_boilerplate_description(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ broker(), GetConstantForIndexOperand(0));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2366,7 +2421,7 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
ObjectBoilerplateDescriptionRef constant_properties(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ broker(), GetConstantForIndexOperand(0));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2404,8 +2459,8 @@ void BytecodeGraphBuilder::VisitCloneObject() {
void BytecodeGraphBuilder::VisitGetTemplateObject() {
FeedbackSource source =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
- TemplateObjectDescriptionRef description(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ TemplateObjectDescriptionRef description(broker(),
+ GetConstantForIndexOperand(0));
STATIC_ASSERT(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0);
const Operator* op = javascript()->GetTemplateObject(
description.object(), shared_info().object(), source);
@@ -2696,16 +2751,25 @@ void BytecodeGraphBuilder::VisitCallRuntime() {
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
- // Create node to perform the runtime call.
- const Operator* call = javascript()->CallRuntime(function_id, reg_count);
- Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
- environment()->BindAccumulator(value, Environment::kAttachFrameState);
-
- // Connect to the end if {function_id} is non-returning.
- if (Runtime::IsNonReturning(function_id)) {
- // TODO(7099): Investigate if we need LoopExit node here.
- Node* control = NewNode(common()->Throw());
- MergeControlToLeaveFunction(control);
+ // Handle %ObserveNode here (rather than in JSIntrinsicLowering) to observe
+ // the node as early as possible.
+ if (function_id == Runtime::FunctionId::kObserveNode) {
+ DCHECK_EQ(1, reg_count);
+ Node* value = environment()->LookupRegister(receiver);
+ observe_node_info_.StartObserving(value);
+ environment()->BindAccumulator(value);
+ } else {
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(function_id, reg_count);
+ Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+
+ // Connect to the end if {function_id} is non-returning.
+ if (Runtime::IsNonReturning(function_id)) {
+ // TODO(7099): Investigate if we need LoopExit node here.
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
}
}
@@ -2891,8 +2955,8 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* accumulator = environment()->LookupAccumulator();
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
- Node* name = jsgraph()->Constant(ObjectRef(
- broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())));
+ Node* name =
+ jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
@@ -4508,18 +4572,21 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
FeedbackCellRef const& feedback_cell,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeOffset osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags,
- TickCounter* tick_counter) {
+ TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info) {
DCHECK(broker->IsSerializedForCompilation(
- shared_info, feedback_cell.value().AsFeedbackVector()));
+ shared_info, feedback_cell.value()->AsFeedbackVector()));
+ DCHECK(feedback_cell.value()->AsFeedbackVector().serialized());
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
feedback_cell, osr_offset, jsgraph, invocation_frequency,
- source_positions, inlining_id, code_kind, flags, tick_counter);
+ source_positions, inlining_id, code_kind, flags, tick_counter,
+ observe_node_info);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 501451ec55..6870f266be 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -7,6 +7,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/js-type-hint-lowering.h"
+#include "src/compiler/node-observer.h"
#include "src/handles/handles.h"
#include "src/objects/code-kind.h"
#include "src/utils/utils.h"
@@ -25,6 +26,7 @@ class Zone;
namespace compiler {
class JSGraph;
+class NodeObserver;
class SourcePositionTable;
enum class BytecodeGraphBuilderFlag : uint8_t {
@@ -42,12 +44,13 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
FeedbackCellRef const& feedback_cell,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeOffset osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags,
- TickCounter* tick_counter);
+ TickCounter* tick_counter,
+ ObserveNodeInfo const& observe_node_info = {});
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 2c5338b0d7..5950541111 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -31,6 +31,7 @@ namespace {
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS rcx, rdx, r8, r9
#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3
+#define FP_RETURN_REGISTER xmm0
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | r14.bit() | \
r15.bit()
@@ -43,6 +44,7 @@ namespace {
// == x64 other ==============================================================
#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
+#define FP_RETURN_REGISTER xmm0
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif // V8_TARGET_OS_WIN
@@ -59,7 +61,6 @@ namespace {
(1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
(1 << d14.code()) | (1 << d15.code())
-
#elif V8_TARGET_ARCH_ARM64
// ===========================================================================
// == arm64 ====================================================================
@@ -130,6 +131,19 @@ namespace {
d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
d14.bit() | d15.bit()
+#elif V8_TARGET_ARCH_RISCV64
+// ===========================================================================
+// == riscv64 =================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+// fp is not part of CALLEE_SAVE_REGISTERS (similar to how MIPS64 or PPC defines
+// it)
+#define CALLEE_SAVE_REGISTERS \
+ s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | \
+ s8.bit() | s9.bit() | s10.bit() | s11.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ fs0.bit() | fs1.bit() | fs2.bit() | fs3.bit() | fs4.bit() | fs5.bit() | \
+ fs6.bit() | fs7.bit() | fs8.bit() | fs9.bit() | fs10.bit() | fs11.bit()
#else
// ===========================================================================
// == unknown ================================================================
@@ -236,24 +250,36 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
// Check the types of the signature.
for (size_t i = 0; i < msig->parameter_count(); i++) {
- MachineRepresentation rep = msig->GetParam(i).representation();
- CHECK_NE(MachineRepresentation::kFloat32, rep);
- CHECK_NE(MachineRepresentation::kFloat64, rep);
+ MachineType type = msig->GetParam(i);
+ CHECK(!IsFloatingPoint(type.representation()));
}
-#endif
- // Add return location(s). We don't support FP returns for now.
+ // Check the return types.
for (size_t i = 0; i < locations.return_count_; i++) {
MachineType type = msig->GetReturn(i);
CHECK(!IsFloatingPoint(type.representation()));
}
+#endif
CHECK_GE(2, locations.return_count_);
if (locations.return_count_ > 0) {
- locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
- msig->GetReturn(0)));
+#ifdef FP_RETURN_REGISTER
+ const v8::internal::DoubleRegister kFPReturnRegister = FP_RETURN_REGISTER;
+ auto reg = IsFloatingPoint(msig->GetReturn(0).representation())
+ ? kFPReturnRegister.code()
+ : kReturnRegister0.code();
+#else
+ auto reg = kReturnRegister0.code();
+#endif
+ // TODO(chromium:1052746): Use the correctly sized register here (e.g. "al"
+ // if the return type is kBit), so we don't have to use a hacky bitwise AND
+ // elsewhere.
+ locations.AddReturn(LinkageLocation::ForRegister(reg, msig->GetReturn(0)));
}
+
if (locations.return_count_ > 1) {
+ DCHECK(!IsFloatingPoint(msig->GetReturn(0).representation()));
+
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister1.code(),
msig->GetReturn(1)));
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 0344a1916f..0361a2ada0 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -8,7 +8,6 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
-#include "src/codegen/code-stub-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
@@ -562,23 +561,23 @@ TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
-TNode<WordT> CodeAssembler::WordShl(SloppyTNode<WordT> value, int shift) {
+TNode<WordT> CodeAssembler::WordShl(TNode<WordT> value, int shift) {
return (shift != 0) ? WordShl(value, IntPtrConstant(shift)) : value;
}
-TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> value, int shift) {
+TNode<WordT> CodeAssembler::WordShr(TNode<WordT> value, int shift) {
return (shift != 0) ? WordShr(value, IntPtrConstant(shift)) : value;
}
-TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> value, int shift) {
+TNode<WordT> CodeAssembler::WordSar(TNode<WordT> value, int shift) {
return (shift != 0) ? WordSar(value, IntPtrConstant(shift)) : value;
}
-TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> value, int shift) {
+TNode<Word32T> CodeAssembler::Word32Shr(TNode<Word32T> value, int shift) {
return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value;
}
-TNode<Word32T> CodeAssembler::Word32Sar(SloppyTNode<Word32T> value, int shift) {
+TNode<Word32T> CodeAssembler::Word32Sar(TNode<Word32T> value, int shift) {
return (shift != 0) ? Word32Sar(value, Int32Constant(shift)) : value;
}
@@ -647,8 +646,7 @@ TNode<Float64T> CodeAssembler::RoundIntPtrToFloat64(Node* value) {
return UncheckedCast<Float64T>(raw_assembler()->ChangeInt32ToFloat64(value));
}
-TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(
- SloppyTNode<Float32T> value) {
+TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
return UncheckedCast<Int32T>(raw_assembler()->TruncateFloat32ToInt32(
value, TruncateKind::kSetOverflowToMin));
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 70991d8c7b..263ed37536 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -742,7 +742,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Load(MachineTypeOf<Type>::value, base, needs_poisoning));
}
template <class Type>
- TNode<Type> Load(Node* base, SloppyTNode<WordT> offset,
+ TNode<Type> Load(Node* base, TNode<WordT> offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return UncheckedCast<Type>(
Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
@@ -996,17 +996,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
IntPtrSub(static_cast<Node*>(left), static_cast<Node*>(right)));
}
- TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
- TNode<WordT> WordShr(SloppyTNode<WordT> value, int shift);
- TNode<WordT> WordSar(SloppyTNode<WordT> value, int shift);
+ TNode<WordT> WordShl(TNode<WordT> value, int shift);
+ TNode<WordT> WordShr(TNode<WordT> value, int shift);
+ TNode<WordT> WordSar(TNode<WordT> value, int shift);
TNode<IntPtrT> WordShr(TNode<IntPtrT> value, int shift) {
- return UncheckedCast<IntPtrT>(WordShr(static_cast<Node*>(value), shift));
+ return UncheckedCast<IntPtrT>(WordShr(TNode<WordT>(value), shift));
}
TNode<IntPtrT> WordSar(TNode<IntPtrT> value, int shift) {
- return UncheckedCast<IntPtrT>(WordSar(static_cast<Node*>(value), shift));
+ return UncheckedCast<IntPtrT>(WordSar(TNode<WordT>(value), shift));
}
- TNode<Word32T> Word32Shr(SloppyTNode<Word32T> value, int shift);
- TNode<Word32T> Word32Sar(SloppyTNode<Word32T> value, int shift);
+ TNode<Word32T> Word32Shr(TNode<Word32T> value, int shift);
+ TNode<Word32T> Word32Sar(TNode<Word32T> value, int shift);
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
@@ -1040,7 +1040,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// range, make sure that overflow detection is easy. In particular, return
// int_min instead of int_max on arm platforms by using parameter
// kSetOverflowToMin.
- TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value);
+ TNode<Int32T> TruncateFloat32ToInt32(TNode<Float32T> value);
// Projections
template <int index, class T1, class T2>
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 8b3826424f..73aca646ce 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1528,7 +1528,7 @@ MachineRepresentation DeadValueRepresentationOf(Operator const* op) {
}
const Operator* CommonOperatorBuilder::FrameState(
- BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ BytecodeOffset bailout_id, OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info) {
FrameStateInfo state_info(bailout_id, state_combine, function_info);
return zone()->New<Operator1<FrameStateInfo>>( // --
@@ -1625,6 +1625,17 @@ CommonOperatorBuilder::CreateFrameStateFunctionInfo(
shared_info);
}
+const FrameStateFunctionInfo*
+CommonOperatorBuilder::CreateJSToWasmFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ const wasm::FunctionSig* signature) {
+ DCHECK_EQ(type, FrameStateType::kJSToWasmBuiltinContinuation);
+ DCHECK_NOT_NULL(signature);
+ return zone()->New<JSToWasmFrameStateFunctionInfo>(
+ type, parameter_count, local_count, shared_info, signature);
+}
+
const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
return zone()->New<Operator1<MachineRepresentation>>( // --
IrOpcode::kDeadValue, Operator::kPure, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index d2768a9cf4..bf0e3a7bab 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -543,7 +543,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* ObjectState(uint32_t object_id, int pointer_slots);
const Operator* TypedObjectState(uint32_t object_id,
const ZoneVector<MachineType>* types);
- const Operator* FrameState(BailoutId bailout_id,
+ const Operator* FrameState(BytecodeOffset bailout_id,
OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info);
const Operator* Call(const CallDescriptor* call_descriptor);
@@ -561,6 +561,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
Handle<SharedFunctionInfo> shared_info);
+ const FrameStateFunctionInfo* CreateJSToWasmFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ const wasm::FunctionSig* signature);
const Operator* MarkAsSafetyCheck(const Operator* op,
IsSafetyCheck safety_check);
@@ -600,6 +604,65 @@ class CommonNodeWrapperBase : public NodeWrapper {
NodeProperties::GetValueInput(node(), TheIndex)); \
}
+// TODO(jgruber): This class doesn't match the usual OpcodeNode naming
+// convention for historical reasons (it was originally a very basic typed node
+// wrapper similar to Effect and Control). Consider updating the name, with low
+// priority.
+class FrameState : public CommonNodeWrapperBase {
+ public:
+ explicit constexpr FrameState(Node* node) : CommonNodeWrapperBase(node) {
+ // TODO(jgruber): Disallow kStart (needed for PromiseConstructorBasic unit
+ // test, among others). Also, outer_frame_state points at the start node
+ // for non-inlined functions. This could be avoided by checking
+ // has_outer_frame_state() before casting to FrameState.
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kFrameState ||
+ node->opcode() == IrOpcode::kStart);
+ }
+
+ FrameStateInfo frame_state_info() const {
+ return FrameStateInfoOf(node()->op());
+ }
+
+ static constexpr int kFrameStateParametersInput = 0;
+ static constexpr int kFrameStateLocalsInput = 1;
+ static constexpr int kFrameStateStackInput = 2;
+ static constexpr int kFrameStateContextInput = 3;
+ static constexpr int kFrameStateFunctionInput = 4;
+ static constexpr int kFrameStateOuterStateInput = 5;
+ static constexpr int kFrameStateInputCount = 6;
+
+ // Note: The parameters should be accessed through StateValuesAccess.
+ Node* parameters() const {
+ Node* n = node()->InputAt(kFrameStateParametersInput);
+ DCHECK(n->opcode() == IrOpcode::kStateValues ||
+ n->opcode() == IrOpcode::kTypedStateValues);
+ return n;
+ }
+ Node* locals() const {
+ Node* n = node()->InputAt(kFrameStateLocalsInput);
+ DCHECK(n->opcode() == IrOpcode::kStateValues ||
+ n->opcode() == IrOpcode::kTypedStateValues);
+ return n;
+ }
+ // TODO(jgruber): Consider renaming this to the more meaningful
+ // 'accumulator'.
+ Node* stack() const { return node()->InputAt(kFrameStateStackInput); }
+ Node* context() const { return node()->InputAt(kFrameStateContextInput); }
+ Node* function() const { return node()->InputAt(kFrameStateFunctionInput); }
+
+ // An outer frame state exists for inlined functions; otherwise it points at
+ // the start node.
+ bool has_outer_frame_state() const {
+ Node* maybe_outer_frame_state = node()->InputAt(kFrameStateOuterStateInput);
+ DCHECK(maybe_outer_frame_state->opcode() == IrOpcode::kFrameState ||
+ maybe_outer_frame_state->opcode() == IrOpcode::kStart);
+ return maybe_outer_frame_state->opcode() == IrOpcode::kFrameState;
+ }
+ FrameState outer_frame_state() const {
+ return FrameState{node()->InputAt(kFrameStateOuterStateInput)};
+ }
+};
+
class StartNode final : public CommonNodeWrapperBase {
public:
explicit constexpr StartNode(Node* node) : CommonNodeWrapperBase(node) {
@@ -641,6 +704,67 @@ class StartNode final : public CommonNodeWrapperBase {
return node()->op()->ValueOutputCount() - kExtraOutputCount -
kReceiverOutputCount;
}
+
+ // Note these functions don't return the index of the Start output; instead
+ // they return the index assigned to the Parameter node.
+ // TODO(jgruber): Consider unifying the two.
+ int NewTargetParameterIndex() const {
+ return Linkage::GetJSCallNewTargetParamIndex(FormalParameterCount());
+ }
+ int ArgCountParameterIndex() const {
+ return Linkage::GetJSCallArgCountParamIndex(FormalParameterCount());
+ }
+ int ContextParameterIndex() const {
+ return Linkage::GetJSCallContextParamIndex(FormalParameterCount());
+ }
+
+ // TODO(jgruber): Remove this function and use
+ // Linkage::GetJSCallContextParamIndex instead. This currently doesn't work
+ // because tests don't create valid Start nodes - for example, they may add
+ // only two context outputs (and not the closure, new target, argc). Once
+ // tests are fixed, remove this function.
+ int ContextParameterIndex_MaybeNonStandardLayout() const {
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ //
+ // TODO(jgruber): This function is called from spots that operate on
+ // CSA/Torque graphs; Start node layout appears to be different there.
+ // These should be unified to avoid confusion. Once done, enable this
+ // DCHECK: DCHECK_EQ(LastOutputIndex(), ContextOutputIndex());
+ return node()->op()->ValueOutputCount() - 2;
+ }
+ int LastParameterIndex_MaybeNonStandardLayout() const {
+ return ContextParameterIndex_MaybeNonStandardLayout();
+ }
+
+ // Unlike ContextParameterIndex_MaybeNonStandardLayout above, these return
+ // output indices (and not the index assigned to a Parameter).
+ int NewTargetOutputIndex() const {
+ // Indices assigned to parameters are off-by-one (Parameters indices start
+ // at -1).
+ // TODO(jgruber): Consider starting at 0.
+ DCHECK_EQ(Linkage::GetJSCallNewTargetParamIndex(FormalParameterCount()) + 1,
+ node()->op()->ValueOutputCount() - 3);
+ return node()->op()->ValueOutputCount() - 3;
+ }
+ int ArgCountOutputIndex() const {
+ // Indices assigned to parameters are off-by-one (Parameters indices start
+ // at -1).
+ // TODO(jgruber): Consider starting at 0.
+ DCHECK_EQ(Linkage::GetJSCallArgCountParamIndex(FormalParameterCount()) + 1,
+ node()->op()->ValueOutputCount() - 2);
+ return node()->op()->ValueOutputCount() - 2;
+ }
+ int ContextOutputIndex() const {
+ // Indices assigned to parameters are off-by-one (Parameters indices start
+ // at -1).
+ // TODO(jgruber): Consider starting at 0.
+ DCHECK_EQ(Linkage::GetJSCallContextParamIndex(FormalParameterCount()) + 1,
+ node()->op()->ValueOutputCount() - 1);
+ return node()->op()->ValueOutputCount() - 1;
+ }
+ int LastOutputIndex() const { return ContextOutputIndex(); }
};
class DynamicCheckMapsWithDeoptUnlessNode final : public CommonNodeWrapperBase {
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index be503aa73f..2628575e4d 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -256,8 +256,6 @@ class FieldConstnessDependency final : public CompilationDependency {
class GlobalPropertyDependency final : public CompilationDependency {
public:
- // TODO(neis): Once the concurrent compiler frontend is always-on, we no
- // longer need to explicitly store the type and the read_only flag.
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
: cell_(cell), type_(type), read_only_(read_only) {
@@ -404,10 +402,6 @@ void CompilationDependencies::DependOnStableMap(const MapRef& map) {
}
}
-void CompilationDependencies::DependOnTransition(const MapRef& target_map) {
- RecordDependency(TransitionDependencyOffTheRecord(target_map));
-}
-
AllocationType CompilationDependencies::DependOnPretenureMode(
const AllocationSiteRef& site) {
DCHECK(!site.IsNeverSerializedHeapObject());
@@ -441,26 +435,15 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness(
return PropertyConstness::kConst;
}
-void CompilationDependencies::DependOnFieldRepresentation(
- const MapRef& map, InternalIndex descriptor) {
- RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor));
-}
-
-void CompilationDependencies::DependOnFieldType(const MapRef& map,
- InternalIndex descriptor) {
- RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor));
-}
-
void CompilationDependencies::DependOnGlobalProperty(
const PropertyCellRef& cell) {
- DCHECK(!cell.IsNeverSerializedHeapObject());
PropertyCellType type = cell.property_details().cell_type();
bool read_only = cell.property_details().IsReadOnly();
RecordDependency(zone_->New<GlobalPropertyDependency>(cell, type, read_only));
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
- DCHECK(!cell.IsNeverSerializedHeapObject());
+ cell.SerializeAsProtector();
if (cell.value().AsSmi() != Protectors::kProtectorValid) return false;
RecordDependency(zone_->New<ProtectorDependency>(cell));
return true;
@@ -514,13 +497,6 @@ void CompilationDependencies::DependOnElementsKind(
}
}
-bool CompilationDependencies::AreValid() const {
- for (auto dep : dependencies_) {
- if (!dep->IsValid()) return false;
- }
- return true;
-}
-
bool CompilationDependencies::Commit(Handle<Code> code) {
// Dependencies are context-dependent. In the future it may be possible to
// restore them in the consumer native context, but for now they are
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 0b1612487e..bcf619ea09 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -45,22 +45,10 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// Record the assumption that {map} stays stable.
void DependOnStableMap(const MapRef& map);
- // Record the assumption that {target_map} can be transitioned to, i.e., that
- // it does not become deprecated.
- void DependOnTransition(const MapRef& target_map);
-
// Return the pretenure mode of {site} and record the assumption that it does
// not change.
AllocationType DependOnPretenureMode(const AllocationSiteRef& site);
- // Record the assumption that the field representation of a field does not
- // change. The field is identified by the arguments.
- void DependOnFieldRepresentation(const MapRef& map, InternalIndex descriptor);
-
- // Record the assumption that the field type of a field does not change. The
- // field is identified by the arguments.
- void DependOnFieldType(const MapRef& map, InternalIndex descriptor);
-
// Return a field's constness and, if kConst, record the assumption that it
// remains kConst. The field is identified by the arguments.
//
@@ -110,23 +98,28 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(
const JSFunctionRef& function);
- // The methods below allow for gathering dependencies without actually
- // recording them. They can be recorded at a later time (or they can be
- // ignored). For example,
- // DependOnTransition(map);
- // is equivalent to:
- // RecordDependency(TransitionDependencyOffTheRecord(map));
+ // Records {dependency} if not null.
void RecordDependency(CompilationDependency const* dependency);
+
+ // The methods below allow for gathering dependencies without actually
+ // recording them. They can be recorded at a later time via RecordDependency
+ // (or they can be ignored).
+
+ // Gather the assumption that {target_map} can be transitioned to, i.e., that
+ // it does not become deprecated.
CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
+
+ // Gather the assumption that the field representation of a field does not
+ // change. The field is identified by the arguments.
CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
const MapRef& map, InternalIndex descriptor) const;
+
+ // Gather the assumption that the field type of a field does not change. The
+ // field is identified by the arguments.
CompilationDependency const* FieldTypeDependencyOffTheRecord(
const MapRef& map, InternalIndex descriptor) const;
- // Exposed only for testing purposes.
- bool AreValid() const;
-
private:
Zone* const zone_;
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 5fe983213c..d64c3c80e5 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -148,7 +148,6 @@ class EffectControlLinearizer {
Node* LowerObjectIsInteger(Node* node);
Node* LowerNumberIsSafeInteger(Node* node);
Node* LowerObjectIsSafeInteger(Node* node);
- Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerRestLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
@@ -251,6 +250,8 @@ class EffectControlLinearizer {
Node* CallBuiltin(Builtins::Name builtin, Operator::Properties properties,
Args...);
+ Node* ChangeBitToTagged(Node* value);
+ Node* ChangeFloat64ToTagged(Node* value, CheckForMinusZeroMode mode);
Node* ChangeInt32ToSmi(Node* value);
// In pointer compression, we smi-corrupt. This means the upper bits of a Smi
// are not important. ChangeTaggedInt32ToSmi has a known tagged int32 as input
@@ -259,11 +260,13 @@ class EffectControlLinearizer {
// In non pointer compression, it behaves like ChangeInt32ToSmi.
Node* ChangeTaggedInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
+ Node* ChangeInt32ToTagged(Node* value);
Node* ChangeInt64ToSmi(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
Node* ChangeIntPtrToSmi(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
+ Node* ChangeUint32ToTagged(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
@@ -1125,9 +1128,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kObjectIsUndetectable:
result = LowerObjectIsUndetectable(node);
break;
- case IrOpcode::kArgumentsFrame:
- result = LowerArgumentsFrame(node);
- break;
case IrOpcode::kArgumentsLength:
result = LowerArgumentsLength(node);
break;
@@ -1382,7 +1382,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
+ return ChangeFloat64ToTagged(value, mode);
+}
+Node* EffectControlLinearizer::ChangeFloat64ToTagged(
+ Node* value, CheckForMinusZeroMode mode) {
auto done = __ MakeLabel(MachineRepresentation::kTagged);
auto if_heapnumber = __ MakeDeferredLabel();
auto if_int32 = __ MakeLabel();
@@ -1438,7 +1442,10 @@ Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
Node* value = node->InputAt(0);
+ return ChangeBitToTagged(value);
+}
+Node* EffectControlLinearizer::ChangeBitToTagged(Node* value) {
auto if_true = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -1459,7 +1466,10 @@ Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* value = node->InputAt(0);
+ return ChangeInt32ToTagged(value);
+}
+Node* EffectControlLinearizer::ChangeInt32ToTagged(Node* value) {
if (SmiValuesAre32Bits()) {
return ChangeInt32ToSmi(value);
}
@@ -1505,7 +1515,10 @@ Node* EffectControlLinearizer::LowerChangeInt64ToTagged(Node* node) {
Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
Node* value = node->InputAt(0);
+ return ChangeUint32ToTagged(value);
+}
+Node* EffectControlLinearizer::ChangeUint32ToTagged(Node* value) {
auto if_not_in_smi_range = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
@@ -1773,8 +1786,10 @@ Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check_instance_type =
- __ Word32Equal(value_instance_type, __ Int32Constant(JS_FUNCTION_TYPE));
+ Node* check_instance_type = __ Uint32LessThanOrEqual(
+ __ Int32Sub(value_instance_type,
+ __ Int32Constant(FIRST_JS_FUNCTION_TYPE)),
+ __ Int32Constant(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
__ DeoptimizeIfNot(DeoptimizeReason::kWrongCallTarget, FeedbackSource(),
check_instance_type, frame_state);
@@ -3676,34 +3691,9 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
return ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
-#else
- auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
- Node* frame = __ LoadFramePointer();
-
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
- int formal_parameter_count = FormalParameterCountOf(node->op());
- DCHECK_LE(0, formal_parameter_count);
-
- // The ArgumentsLength node is computing the actual number of arguments.
- // We have to distinguish the case when there is an arguments adaptor frame
- // (i.e., arguments_frame != LoadFramePointer()).
- auto if_adaptor_frame = __ MakeLabel();
- __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done,
- __ SmiConstant(formal_parameter_count));
- __ Goto(&if_adaptor_frame);
-
- __ Bind(&if_adaptor_frame);
- Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
- MachineType::Pointer(), arguments_frame,
- __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
- __ Goto(&done, arguments_length);
- __ Bind(&done);
- return done.PhiAt(0);
-#endif
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
@@ -3713,27 +3703,9 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
Node* frame = __ LoadFramePointer();
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
-#else
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
-
- // The RestLength node is computing the number of rest parameters,
- // which is max(0, actual_parameter_count - formal_parameter_count).
- // We have to distinguish the case, when there is an arguments adaptor frame
- // (i.e., arguments_frame != LoadFramePointer()).
- auto if_adaptor_frame = __ MakeLabel();
- __ GotoIf(__ TaggedEqual(arguments_frame, frame), &done, __ SmiConstant(0));
- __ Goto(&if_adaptor_frame);
-
- __ Bind(&if_adaptor_frame);
- Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
- MachineType::Pointer(), arguments_frame,
- __ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
-#endif
-
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -3744,27 +3716,6 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
- auto done = __ MakeLabel(MachineType::PointerRepresentation());
-
- Node* frame = __ LoadFramePointer();
- Node* parent_frame =
- __ Load(MachineType::Pointer(), frame,
- __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
- Node* parent_frame_type = __ Load(
- MachineType::IntPtr(), parent_frame,
- __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
-
- __ GotoIf(__ IntPtrEqual(parent_frame_type,
- __ IntPtrConstant(StackFrame::TypeToMarker(
- StackFrame::ARGUMENTS_ADAPTOR))),
- &done, parent_frame);
- __ Goto(&done, frame);
-
- __ Bind(&done);
- return done.PhiAt(0);
-}
-
Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) {
AllocationType const allocation = AllocationTypeOf(node->op());
Node* length = node->InputAt(0);
@@ -3864,8 +3815,8 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
CreateArgumentsType type = parameters.arguments_type();
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- Node* frame = NodeProperties::GetValueInput(node, 0);
- Node* arguments_count = NodeProperties::GetValueInput(node, 1);
+ Node* frame = __ LoadFramePointer();
+ Node* arguments_count = NodeProperties::GetValueInput(node, 0);
Builtins::Name builtin_name;
switch (type) {
case CreateArgumentsType::kMappedArguments:
@@ -5026,7 +4977,6 @@ void EffectControlLinearizer::LowerStoreMessage(Node* node) {
__ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
}
-// TODO(mslekova): Avoid code duplication with simplified lowering.
static MachineType MachineTypeFor(CTypeInfo::Type type) {
switch (type) {
case CTypeInfo::Type::kVoid:
@@ -5062,23 +5012,33 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
value_input_count);
if (fast_api_call_stack_slot_ == nullptr) {
- // Add the { fallback } output parameter.
- int kAlign = 4;
+ int kAlign = alignof(v8::FastApiCallbackOptions);
int kSize = sizeof(v8::FastApiCallbackOptions);
- // If this check fails, probably you've added new fields to
+ // If this check fails, you've probably added new fields to
// v8::FastApiCallbackOptions, which means you'll need to write code
// that initializes and reads from them too (see the Store and Load to
// fast_api_call_stack_slot_ below).
- CHECK_EQ(kSize, 1);
+ CHECK_EQ(kSize, sizeof(uintptr_t) * 2);
fast_api_call_stack_slot_ = __ StackSlot(kSize, kAlign);
}
- // Generate the store to `fast_api_call_stack_slot_`.
- __ Store(StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
- fast_api_call_stack_slot_, 0, jsgraph()->ZeroConstant());
+ // Leave the slot uninit if the callback doesn't use it.
+ if (c_signature->HasOptions()) {
+ // Generate the stores to `fast_api_call_stack_slot_`.
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ fast_api_call_stack_slot_,
+ static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)),
+ jsgraph()->ZeroConstant());
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ fast_api_call_stack_slot_,
+ static_cast<int>(offsetof(v8::FastApiCallbackOptions, data)),
+ n.SlowCallArgument(FastApiCallNode::kSlowCallDataArgumentIndex));
+ }
MachineSignature::Builder builder(
- graph()->zone(), 1, c_arg_count + FastApiCallNode::kHasErrorInputCount);
+ graph()->zone(), 1, c_arg_count + (c_signature->HasOptions() ? 1 : 0));
MachineType return_type = MachineTypeFor(c_signature->ReturnInfo().GetType());
builder.AddReturn(return_type);
for (int i = 0; i < c_arg_count; ++i) {
@@ -5086,7 +5046,9 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
MachineTypeFor(c_signature->ArgumentInfo(i).GetType());
builder.AddParam(machine_type);
}
- builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
+ if (c_signature->HasOptions()) {
+ builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
+ }
CallDescriptor* call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
@@ -5101,7 +5063,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
target_address, 0, n.target());
Node** const inputs = graph()->zone()->NewArray<Node*>(
- c_arg_count + FastApiCallNode::kFastCallExtraInputCount);
+ c_arg_count + n.FastCallExtraInputCount());
inputs[0] = n.target();
for (int i = FastApiCallNode::kFastTargetInputCount;
i < c_arg_count + FastApiCallNode::kFastTargetInputCount; ++i) {
@@ -5113,37 +5075,74 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
inputs[i] = NodeProperties::GetValueInput(node, i);
}
}
- inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
-
- inputs[c_arg_count + 2] = __ effect();
- inputs[c_arg_count + 3] = __ control();
+ if (c_signature->HasOptions()) {
+ inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
+ inputs[c_arg_count + 2] = __ effect();
+ inputs[c_arg_count + 3] = __ control();
+ } else {
+ inputs[c_arg_count + 1] = __ effect();
+ inputs[c_arg_count + 2] = __ control();
+ }
- __ Call(call_descriptor,
- c_arg_count + FastApiCallNode::kFastCallExtraInputCount, inputs);
+ Node* c_call_result = __ Call(
+ call_descriptor, c_arg_count + n.FastCallExtraInputCount(), inputs);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
target_address, 0, __ IntPtrConstant(0));
+ Node* fast_call_result;
+ switch (c_signature->ReturnInfo().GetType()) {
+ case CTypeInfo::Type::kVoid:
+ fast_call_result = __ UndefinedConstant();
+ break;
+ case CTypeInfo::Type::kBool:
+ static_assert(sizeof(bool) == 1, "unsupported bool size");
+ fast_call_result = ChangeBitToTagged(
+ __ Word32And(c_call_result, __ Int32Constant(0xFF)));
+ break;
+ case CTypeInfo::Type::kInt32:
+ fast_call_result = ChangeInt32ToTagged(c_call_result);
+ break;
+ case CTypeInfo::Type::kUint32:
+ fast_call_result = ChangeUint32ToTagged(c_call_result);
+ break;
+ case CTypeInfo::Type::kInt64:
+ case CTypeInfo::Type::kUint64:
+ UNREACHABLE();
+ case CTypeInfo::Type::kFloat32:
+ fast_call_result =
+ ChangeFloat64ToTagged(__ ChangeFloat32ToFloat64(c_call_result),
+ CheckForMinusZeroMode::kCheckForMinusZero);
+ break;
+ case CTypeInfo::Type::kFloat64:
+ fast_call_result = ChangeFloat64ToTagged(
+ c_call_result, CheckForMinusZeroMode::kCheckForMinusZero);
+ break;
+ case CTypeInfo::Type::kV8Value:
+ UNREACHABLE();
+ }
+
+ if (!c_signature->HasOptions()) return fast_call_result;
+
// Generate the load from `fast_api_call_stack_slot_`.
- Node* load = __ Load(MachineType::Int32(), fast_api_call_stack_slot_, 0);
+ Node* load =
+ __ Load(MachineType::Int32(), fast_api_call_stack_slot_,
+ static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)));
- TNode<Boolean> cond =
- TNode<Boolean>::UncheckedCast(__ Word32Equal(load, __ Int32Constant(0)));
+ Node* is_zero = __ Word32Equal(load, __ Int32Constant(0));
// Hint to true.
auto if_success = __ MakeLabel();
auto if_error = __ MakeDeferredLabel();
auto merge = __ MakeLabel(MachineRepresentation::kTagged);
- __ Branch(cond, &if_success, &if_error);
+ __ Branch(is_zero, &if_success, &if_error);
- // Generate fast call.
__ Bind(&if_success);
- Node* then_result = [&]() { return __ UndefinedConstant(); }();
- __ Goto(&merge, then_result);
+ __ Goto(&merge, fast_call_result);
// Generate direct slow call.
__ Bind(&if_error);
- Node* else_result = [&]() {
+ {
Node** const slow_inputs = graph()->zone()->NewArray<Node*>(
n.SlowCallArgumentCount() +
FastApiCallNode::kEffectAndControlInputCount);
@@ -5157,12 +5156,11 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
slow_inputs[index] = __ effect();
slow_inputs[index + 1] = __ control();
- Node* slow_call = __ Call(
+ Node* slow_call_result = __ Call(
params.descriptor(),
index + FastApiCallNode::kEffectAndControlInputCount, slow_inputs);
- return slow_call;
- }();
- __ Goto(&merge, else_result);
+ __ Goto(&merge, slow_call_result);
+ }
__ Bind(&merge);
return merge.PhiAt(0);
@@ -5235,13 +5233,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* offset =
__ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
- if (FLAG_unbox_double_fields) {
- Node* result = __ Load(MachineType::Float64(), object, offset);
- __ Goto(&done_double, result);
- } else {
- Node* field = __ Load(MachineType::AnyTagged(), object, offset);
- __ Goto(&loaded_field, field);
- }
+ Node* field = __ Load(MachineType::AnyTagged(), object, offset);
+ __ Goto(&loaded_field, field);
}
__ Bind(&if_outofobject);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index f4ab1c9709..97b22d8875 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -159,9 +159,12 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
// This input order is important to match the DFS traversal used in the
// instruction selector. Otherwise, the instruction selector might find a
// duplicate node before the original one.
- for (int input_id : {kFrameStateOuterStateInput, kFrameStateFunctionInput,
- kFrameStateParametersInput, kFrameStateContextInput,
- kFrameStateLocalsInput, kFrameStateStackInput}) {
+ for (int input_id : {FrameState::kFrameStateOuterStateInput,
+ FrameState::kFrameStateFunctionInput,
+ FrameState::kFrameStateParametersInput,
+ FrameState::kFrameStateContextInput,
+ FrameState::kFrameStateLocalsInput,
+ FrameState::kFrameStateStackInput}) {
Node* input = node->InputAt(input_id);
new_node.ReplaceInput(ReduceDeoptState(input, effect, deduplicator),
input_id);
@@ -226,9 +229,7 @@ void EscapeAnalysisReducer::Finalize() {
? params.formal_parameter_count()
: 0;
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
- if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
- Node* arguments_length = NodeProperties::GetValueInput(node, 1);
+ Node* arguments_length = NodeProperties::GetValueInput(node, 0);
if (arguments_length->opcode() != IrOpcode::kArgumentsLength) continue;
Node* arguments_length_state = nullptr;
@@ -328,7 +329,10 @@ void EscapeAnalysisReducer::Finalize() {
}
NodeProperties::SetType(offset,
TypeCache::Get()->kArgumentsLengthType);
- NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
+ Node* frame = jsgraph()->graph()->NewNode(
+ jsgraph()->machine()->LoadFramePointer());
+ NodeProperties::SetType(frame, Type::ExternalPointer());
+ NodeProperties::ReplaceValueInput(load, frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
NodeProperties::ChangeOp(
load, jsgraph()->simplified()->LoadStackArgument());
@@ -337,7 +341,7 @@ void EscapeAnalysisReducer::Finalize() {
case IrOpcode::kLoadField: {
DCHECK_EQ(FieldAccessOf(load->op()).offset,
FixedArray::kLengthOffset);
- Node* length = NodeProperties::GetValueInput(node, 1);
+ Node* length = NodeProperties::GetValueInput(node, 0);
ReplaceWithValue(load, length);
break;
}
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index c27bf8551c..7ff6ab684f 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -133,7 +133,7 @@ class VariableTracker {
Maybe<Node*> Get(Variable var) {
Node* node = current_state_.Get(var);
if (node && node->opcode() == IrOpcode::kDead) {
- // TODO(tebbi): We use {Dead} as a sentinel for uninitialized memory.
+ // TODO(turbofan): We use {Dead} as a sentinel for uninitialized memory.
// Reading uninitialized memory can only happen in unreachable code. In
// this case, we have to mark the object as escaping to avoid dead nodes
// in the graph. This is a workaround that should be removed once we can
@@ -479,8 +479,8 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
Node* phi = graph_->graph()->NewNode(
graph_->common()->Phi(MachineRepresentation::kTagged, arity),
arity + 1, &buffer_.front());
- // TODO(tebbi): Computing precise types here is tricky, because of
- // the necessary revisitations. If we really need this, we should
+ // TODO(turbofan): Computing precise types here is tricky, because
+ // of the necessary revisitations. If we really need this, we should
// probably do it afterwards.
NodeProperties::SetType(phi, Type::Any());
reducer_->AddRoot(phi);
@@ -711,7 +711,7 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
} else if (right_object && !right_object->HasEscaped()) {
replacement = jsgraph->FalseConstant();
}
- // TODO(tebbi) This is a workaround for uninhabited types. If we
+ // TODO(turbofan) This is a workaround for uninhabited types. If we
// replaced a value of uninhabited type with a constant, we would
// widen the type of the node. This could produce inconsistent
// types (which might confuse representation selection). We get
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 0fbc7d0bdd..907c7cc087 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -131,7 +131,7 @@ class VirtualObject : public Dependable {
CHECK(IsAligned(offset, kTaggedSize));
CHECK(!HasEscaped());
if (offset >= size()) {
- // TODO(tebbi): Reading out-of-bounds can only happen in unreachable
+ // TODO(turbofan): Reading out-of-bounds can only happen in unreachable
// code. In this case, we have to mark the object as escaping to avoid
// dead nodes in the graph. This is a workaround that should be removed
// once we can handle dead nodes everywhere.
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 5598a0fe59..b7c4588e36 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -11,16 +11,12 @@
#include "src/compiler/node.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Guard equality of these constants. Ideally they should be merged at
-// some point.
-STATIC_ASSERT(kFrameStateOuterStateInput ==
- FrameState::kFrameStateOuterStateInput);
-
size_t hash_value(OutputFrameStateCombine const& sc) {
return base::hash_value(sc.parameter_);
}
@@ -53,8 +49,8 @@ size_t hash_value(FrameStateInfo const& info) {
std::ostream& operator<<(std::ostream& os, FrameStateType type) {
switch (type) {
- case FrameStateType::kInterpretedFunction:
- os << "INTERPRETED_FRAME";
+ case FrameStateType::kUnoptimizedFunction:
+ os << "UNOPTIMIZED_FRAME";
break;
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
@@ -65,6 +61,9 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kBuiltinContinuation:
os << "BUILTIN_CONTINUATION_FRAME";
break;
+ case FrameStateType::kJSToWasmBuiltinContinuation:
+ os << "JS_TO_WASM_BUILTIN_CONTINUATION_FRAME";
+ break;
case FrameStateType::kJavaScriptBuiltinContinuation:
os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
break;
@@ -88,7 +87,7 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
namespace {
-// Lazy deopt points where the frame state is assocated with a call get an
+// Lazy deopt points where the frame state is associated with a call get an
// additional parameter for the return result from the call. The return result
// is added by the deoptimizer and not explicitly specified in the frame state.
// Lazy deopt points which can catch exceptions further get an additional
@@ -110,7 +109,8 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
JSGraph* jsgraph, FrameStateType frame_type, Builtins::Name name,
Node* closure, Node* context, Node** parameters, int parameter_count,
Node* outer_frame_state,
- Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>()) {
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(),
+ const wasm::FunctionSig* signature = nullptr) {
Graph* const graph = jsgraph->graph();
CommonOperatorBuilder* const common = jsgraph->common();
@@ -118,10 +118,13 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
common->StateValues(parameter_count, SparseInputMask::Dense());
Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
- BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
+ BytecodeOffset bailout_id = Builtins::GetContinuationBytecodeOffset(name);
const FrameStateFunctionInfo* state_info =
- common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0,
- shared);
+ signature ? common->CreateJSToWasmFrameStateFunctionInfo(
+ frame_type, parameter_count, 0, shared, signature)
+ : common->CreateFrameStateFunctionInfo(
+ frame_type, parameter_count, 0, shared);
+
const Operator* op = common->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
return FrameState(graph->NewNode(op, params_node, jsgraph->EmptyStateValues(),
@@ -134,7 +137,7 @@ FrameState CreateBuiltinContinuationFrameStateCommon(
FrameState CreateStubBuiltinContinuationFrameState(
JSGraph* jsgraph, Builtins::Name name, Node* context,
Node* const* parameters, int parameter_count, Node* outer_frame_state,
- ContinuationFrameStateMode mode) {
+ ContinuationFrameStateMode mode, const wasm::FunctionSig* signature) {
Callable callable = Builtins::CallableFor(jsgraph->isolate(), name);
CallInterfaceDescriptor descriptor = callable.descriptor();
@@ -163,10 +166,29 @@ FrameState CreateStubBuiltinContinuationFrameState(
actual_parameters.push_back(parameters[i]);
}
+ FrameStateType frame_state_type = FrameStateType::kBuiltinContinuation;
+ if (name == Builtins::kJSToWasmLazyDeoptContinuation) {
+ CHECK_NOT_NULL(signature);
+ frame_state_type = FrameStateType::kJSToWasmBuiltinContinuation;
+ }
return CreateBuiltinContinuationFrameStateCommon(
- jsgraph, FrameStateType::kBuiltinContinuation, name,
- jsgraph->UndefinedConstant(), context, actual_parameters.data(),
- static_cast<int>(actual_parameters.size()), outer_frame_state);
+ jsgraph, frame_state_type, name, jsgraph->UndefinedConstant(), context,
+ actual_parameters.data(), static_cast<int>(actual_parameters.size()),
+ outer_frame_state, Handle<SharedFunctionInfo>(), signature);
+}
+
+FrameState CreateJSWasmCallBuiltinContinuationFrameState(
+ JSGraph* jsgraph, Node* context, Node* outer_frame_state,
+ const wasm::FunctionSig* signature) {
+ base::Optional<wasm::ValueKind> wasm_return_type =
+ wasm::WasmReturnTypeFromSignature(signature);
+ Node* node_return_type =
+ jsgraph->SmiConstant(wasm_return_type ? wasm_return_type.value() : -1);
+ Node* lazy_deopt_parameters[] = {node_return_type};
+ return CreateStubBuiltinContinuationFrameState(
+ jsgraph, Builtins::kJSToWasmLazyDeoptContinuation, context,
+ lazy_deopt_parameters, arraysize(lazy_deopt_parameters),
+ outer_frame_state, ContinuationFrameStateMode::LAZY, signature);
}
FrameState CreateJavaScriptBuiltinContinuationFrameState(
@@ -185,6 +207,7 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
// to be the second value in the translation when creating stack crawls
// (e.g. Error.stack) of optimized JavaScript frames.
std::vector<Node*> actual_parameters;
+ actual_parameters.reserve(stack_parameter_count);
for (int i = 0; i < stack_parameter_count; ++i) {
actual_parameters.push_back(stack_parameters[i]);
}
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 1dc54c0fdb..32586264e7 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -62,10 +62,12 @@ class OutputFrameStateCombine {
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kInterpretedFunction, // Represents an InterpretedFrame.
+ kUnoptimizedFunction, // Represents an UnoptimizedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
kConstructStub, // Represents a ConstructStubFrame.
kBuiltinContinuation, // Represents a continuation to a stub.
+ kJSToWasmBuiltinContinuation, // Represents a lazy deopt continuation for a
+ // JS to Wasm call.
kJavaScriptBuiltinContinuation, // Represents a continuation to a JavaScipt
// builtin.
kJavaScriptBuiltinContinuationWithCatch // Represents a continuation to a
@@ -89,7 +91,7 @@ class FrameStateFunctionInfo {
FrameStateType type() const { return type_; }
static bool IsJSFunctionType(FrameStateType type) {
- return type == FrameStateType::kInterpretedFunction ||
+ return type == FrameStateType::kUnoptimizedFunction ||
type == FrameStateType::kJavaScriptBuiltinContinuation ||
type == FrameStateType::kJavaScriptBuiltinContinuationWithCatch;
}
@@ -101,20 +103,37 @@ class FrameStateFunctionInfo {
Handle<SharedFunctionInfo> const shared_info_;
};
+class JSToWasmFrameStateFunctionInfo : public FrameStateFunctionInfo {
+ public:
+ JSToWasmFrameStateFunctionInfo(FrameStateType type, int parameter_count,
+ int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ const wasm::FunctionSig* signature)
+ : FrameStateFunctionInfo(type, parameter_count, local_count, shared_info),
+ signature_(signature) {
+ DCHECK_NOT_NULL(signature);
+ }
+
+ const wasm::FunctionSig* signature() const { return signature_; }
+
+ private:
+ const wasm::FunctionSig* const signature_;
+};
class FrameStateInfo final {
public:
- FrameStateInfo(BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ FrameStateInfo(BytecodeOffset bailout_id,
+ OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* info)
: bailout_id_(bailout_id),
frame_state_combine_(state_combine),
info_(info) {}
FrameStateType type() const {
- return info_ == nullptr ? FrameStateType::kInterpretedFunction
+ return info_ == nullptr ? FrameStateType::kUnoptimizedFunction
: info_->type();
}
- BailoutId bailout_id() const { return bailout_id_; }
+ BytecodeOffset bailout_id() const { return bailout_id_; }
OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
MaybeHandle<SharedFunctionInfo> shared_info() const {
return info_ == nullptr ? MaybeHandle<SharedFunctionInfo>()
@@ -129,7 +148,7 @@ class FrameStateInfo final {
const FrameStateFunctionInfo* function_info() const { return info_; }
private:
- BailoutId const bailout_id_;
+ BytecodeOffset const bailout_id_;
OutputFrameStateCombine const frame_state_combine_;
const FrameStateFunctionInfo* const info_;
};
@@ -141,20 +160,19 @@ size_t hash_value(FrameStateInfo const&);
std::ostream& operator<<(std::ostream&, FrameStateInfo const&);
-static constexpr int kFrameStateParametersInput = 0;
-static constexpr int kFrameStateLocalsInput = 1;
-static constexpr int kFrameStateStackInput = 2;
-static constexpr int kFrameStateContextInput = 3;
-static constexpr int kFrameStateFunctionInput = 4;
-static constexpr int kFrameStateOuterStateInput = 5;
-static constexpr int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
-
enum class ContinuationFrameStateMode { EAGER, LAZY, LAZY_WITH_CATCH };
+class FrameState;
+
FrameState CreateStubBuiltinContinuationFrameState(
JSGraph* graph, Builtins::Name name, Node* context, Node* const* parameters,
int parameter_count, Node* outer_frame_state,
- ContinuationFrameStateMode mode);
+ ContinuationFrameStateMode mode,
+ const wasm::FunctionSig* signature = nullptr);
+
+FrameState CreateJSWasmCallBuiltinContinuationFrameState(
+ JSGraph* jsgraph, Node* context, Node* outer_frame_state,
+ const wasm::FunctionSig* signature);
FrameState CreateJavaScriptBuiltinContinuationFrameState(
JSGraph* graph, const SharedFunctionInfoRef& shared, Builtins::Name name,
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 1b03a22968..7fc0c27b84 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -51,13 +51,13 @@ class CallDescriptor;
//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
index b3d7a5571a..465bdf133b 100644
--- a/deps/v8/src/compiler/functional-list.h
+++ b/deps/v8/src/compiler/functional-list.h
@@ -16,7 +16,7 @@ namespace compiler {
// results in an O(1) copy operation. It is the equivalent of functional lists
// in ML-like languages, with the only difference that it also caches the length
// of the list in each node.
-// TODO(tebbi): Use this implementation also for RedundancyElimination.
+// TODO(turbofan): Use this implementation also for RedundancyElimination.
template <class A>
class FunctionalList {
private:
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index a2de1b7c09..bb3bc34a58 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -34,6 +34,7 @@ class BasicBlock;
V(BitcastInt32ToFloat32) \
V(BitcastWord32ToWord64) \
V(BitcastInt64ToFloat64) \
+ V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
@@ -47,6 +48,7 @@ class BasicBlock;
V(Float64ExtractLowWord32) \
V(Float64SilenceNaN) \
V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
@@ -89,6 +91,9 @@ class BasicBlock;
V(Word64And) \
V(Word64Equal) \
V(Word64Or) \
+ V(Word64Sar) \
+ V(Word64SarShiftOutZeros) \
+ V(Word64Shl) \
V(Word64Shr) \
V(WordAnd) \
V(WordEqual) \
@@ -105,8 +110,12 @@ class BasicBlock;
V(Int32Mod) \
V(Int32MulWithOverflow) \
V(Int32SubWithOverflow) \
+ V(Int64Div) \
+ V(Int64Mod) \
V(Uint32Div) \
- V(Uint32Mod)
+ V(Uint32Mod) \
+ V(Uint64Div) \
+ V(Uint64Mod)
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(AllocateInOldGenerationStub, Code) \
@@ -369,6 +378,22 @@ class V8_EXPORT_PRIVATE GraphAssembler {
BranchHint hint, Vars...);
// Control helpers.
+
+ // {GotoIf(c, l, h)} is equivalent to {BranchWithHint(c, l, templ, h);
+ // Bind(templ)}.
+ template <typename... Vars>
+ void GotoIf(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
+ BranchHint hint, Vars...);
+
+ // {GotoIfNot(c, l, h)} is equivalent to {BranchWithHint(c, templ, l, h);
+ // Bind(templ)}.
+ // The branch hint refers to the expected outcome of the provided condition,
+ // so {GotoIfNot(..., BranchHint::kTrue)} means "optimize for the case where
+ // the branch is *not* taken".
+ template <typename... Vars>
+ void GotoIfNot(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
+ BranchHint hint, Vars...);
+
// {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
template <typename... Vars>
void GotoIf(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
@@ -747,9 +772,7 @@ void GraphAssembler::Goto(GraphAssemblerLabel<sizeof...(Vars)>* label,
template <typename... Vars>
void GraphAssembler::GotoIf(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* label,
- Vars... vars) {
- BranchHint hint =
- label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+ BranchHint hint, Vars... vars) {
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfTrue(), branch);
@@ -762,8 +785,7 @@ void GraphAssembler::GotoIf(Node* condition,
template <typename... Vars>
void GraphAssembler::GotoIfNot(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* label,
- Vars... vars) {
- BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+ BranchHint hint, Vars... vars) {
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfFalse(), branch);
@@ -773,6 +795,23 @@ void GraphAssembler::GotoIfNot(Node* condition,
control_ = AddNode(graph()->NewNode(common()->IfTrue(), branch));
}
+template <typename... Vars>
+void GraphAssembler::GotoIf(Node* condition,
+ GraphAssemblerLabel<sizeof...(Vars)>* label,
+ Vars... vars) {
+ BranchHint hint =
+ label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+ return GotoIf(condition, label, hint, vars...);
+}
+
+template <typename... Vars>
+void GraphAssembler::GotoIfNot(Node* condition,
+ GraphAssemblerLabel<sizeof...(Vars)>* label,
+ Vars... vars) {
+ BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+ return GotoIfNot(condition, label, hint, vars...);
+}
+
template <typename... Args>
TNode<Object> GraphAssembler::Call(const CallDescriptor* call_descriptor,
Node* first_arg, Args... args) {
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 155d6fa8ef..998f37eea8 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -10,6 +10,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
@@ -28,8 +29,19 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
+Reduction Reducer::Reduce(Node* node,
+ ObserveNodeManager* observe_node_manager) {
+ Reduction reduction = Reduce(node);
+ if (V8_UNLIKELY(observe_node_manager && reduction.Changed())) {
+ observe_node_manager->OnNodeChanged(reducer_name(), node,
+ reduction.replacement());
+ }
+ return reduction;
+}
+
GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
- JSHeapBroker* broker, Node* dead)
+ JSHeapBroker* broker, Node* dead,
+ ObserveNodeManager* observe_node_manager)
: graph_(graph),
dead_(dead),
state_(graph, 4),
@@ -37,7 +49,8 @@ GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
revisit_(zone),
stack_(zone),
tick_counter_(tick_counter),
- broker_(broker) {
+ broker_(broker),
+ observe_node_manager_(observe_node_manager) {
if (dead != nullptr) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -89,7 +102,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
for (auto i = reducers_.begin(); i != reducers_.end();) {
if (i != skip) {
tick_counter_->TickAndMaybeEnterSafepoint();
- Reduction reduction = (*i)->Reduce(node);
+ Reduction reduction = (*i)->Reduce(node, observe_node_manager_);
if (!reduction.Changed()) {
// No change from this reducer.
} else if (reduction.replacement() == node) {
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 171033fe53..6a6eab5ebb 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -20,6 +20,7 @@ namespace compiler {
class Graph;
class JSHeapBroker;
class Node;
+class ObserveNodeManager;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
@@ -58,7 +59,7 @@ class V8_EXPORT_PRIVATE Reducer {
virtual const char* reducer_name() const = 0;
// Try to reduce a node if possible.
- virtual Reduction Reduce(Node* node) = 0;
+ Reduction Reduce(Node* node, ObserveNodeManager* observe_node_manager);
// Invoked by the {GraphReducer} when all nodes are done. Can be used to
// do additional reductions at the end, which in turn can cause a new round
@@ -69,6 +70,9 @@ class V8_EXPORT_PRIVATE Reducer {
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
static Reduction Changed(Node* node) { return Reduction(node); }
+
+ private:
+ virtual Reduction Reduce(Node* node) = 0;
};
@@ -136,7 +140,8 @@ class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
- JSHeapBroker* broker, Node* dead = nullptr);
+ JSHeapBroker* broker, Node* dead = nullptr,
+ ObserveNodeManager* observe_node_manager = nullptr);
~GraphReducer() override;
GraphReducer(const GraphReducer&) = delete;
@@ -193,6 +198,7 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneStack<NodeState> stack_;
TickCounter* const tick_counter_;
JSHeapBroker* const broker_;
+ ObserveNodeManager* const observe_node_manager_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index c6f58152bf..1208d0f4f6 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -230,6 +230,8 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
}
std::replace(filename.begin(), filename.begin() + filename.length(), ' ',
'_');
+ std::replace(filename.begin(), filename.begin() + filename.length(), ':',
+ '-');
EmbeddedVector<char, 256> base_dir;
if (optional_base_dir != nullptr) {
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index e5bfa3e34e..e41bb6d748 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -27,10 +27,16 @@ class InternalizedString;
class JSBoundFunction;
class JSDataView;
class JSGlobalProxy;
-class JSRegExp;
class JSTypedArray;
class NativeContext;
class ScriptContextTable;
+template <typename>
+class Signature;
+
+namespace wasm {
+class ValueType;
+struct WasmModule;
+} // namespace wasm
namespace compiler {
@@ -38,6 +44,10 @@ namespace compiler {
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
+inline bool IsAnyStore(AccessMode mode) {
+ return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
+}
+
enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
enum class OddballType : uint8_t {
@@ -63,19 +73,44 @@ enum class OddballType : uint8_t {
V(ScopeInfo) \
/* Subtypes of String */ \
V(InternalizedString) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
/* Subtypes of Name */ \
- V(String) \
V(Symbol) \
/* Subtypes of HeapObject */ \
V(AccessorInfo) \
V(ArrayBoilerplateDescription) \
V(CallHandlerInfo) \
V(Cell) \
- V(Name) \
+ V(Code) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(RegExpBoilerplateDescription) \
+ V(SharedFunctionInfo) \
V(TemplateObjectDescription)
// This list is sorted such that subtypes appear before their supertypes.
// DO NOT VIOLATE THIS PROPERTY!
+// Classes in this list behave like serialized classes, but they allow lazy
+// serialization from background threads where this is safe (e.g. for objects
+// that are immutable and fully initialized once visible). Pass
+// ObjectRef::BackgroundSerialization::kAllowed to the ObjectRef constructor
+// for objects where serialization from the background thread is safe.
+#define HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
+ /* Subtypes of HeapObject */ \
+ V(BigInt) \
+ V(HeapNumber) \
+ V(Map)
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
+// Types in this list can be serialized on demand from the background thread.
+#define HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
+ /* Subtypes of HeapObject */ \
+ V(PropertyCell)
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
#define HEAP_BROKER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of JSObject */ \
V(JSArray) \
@@ -84,7 +119,6 @@ enum class OddballType : uint8_t {
V(JSFunction) \
V(JSGlobalObject) \
V(JSGlobalProxy) \
- V(JSRegExp) \
V(JSTypedArray) \
/* Subtypes of Context */ \
V(NativeContext) \
@@ -92,25 +126,19 @@ enum class OddballType : uint8_t {
V(Context) \
V(ScriptContextTable) \
/* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
V(FixedArray) \
V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(String) \
/* Subtypes of JSReceiver */ \
V(JSObject) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
- V(BigInt) \
- V(Code) \
V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
V(FixedArrayBase) \
V(FunctionTemplateInfo) \
- V(HeapNumber) \
V(JSReceiver) \
- V(Map) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
+ V(Name) \
V(SourceTextModule) \
/* Subtypes of Object */ \
V(HeapObject)
@@ -123,18 +151,26 @@ class PerIsolateCompilerCache;
class PropertyAccessInfo;
#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
class V8_EXPORT_PRIVATE ObjectRef {
public:
+ enum class BackgroundSerialization {
+ kDisallowed,
+ kAllowed,
+ };
+
ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+ BackgroundSerialization background_serialization =
+ BackgroundSerialization::kDisallowed,
bool check_type = true);
ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
: data_(data), broker_(broker) {
CHECK_NOT_NULL(data_);
}
-
Handle<Object> object() const;
bool equals(const ObjectRef& other) const;
@@ -145,11 +181,15 @@ class V8_EXPORT_PRIVATE ObjectRef {
#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
@@ -159,12 +199,6 @@ class V8_EXPORT_PRIVATE ObjectRef {
bool BooleanValue() const;
Maybe<double> OddballToNumber() const;
- // Return the element at key {index} if {index} is known to be an own data
- // property of the object that is non-writable and non-configurable.
- base::Optional<ObjectRef> GetOwnConstantElement(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
-
Isolate* isolate() const;
struct Hash {
@@ -240,8 +274,10 @@ class HeapObjectType {
// the outermost Ref class in the inheritance chain only.
#define DEFINE_REF_CONSTRUCTOR(name, base) \
name##Ref(JSHeapBroker* broker, Handle<Object> object, \
+ BackgroundSerialization background_serialization = \
+ BackgroundSerialization::kDisallowed, \
bool check_type = true) \
- : base(broker, object, false) { \
+ : base(broker, object, background_serialization, false) { \
if (check_type) { \
CHECK(Is##name()); \
} \
@@ -271,9 +307,16 @@ class PropertyCellRef : public HeapObjectRef {
Handle<PropertyCell> object() const;
- PropertyDetails property_details() const;
+ // Can be called from a background thread.
+ V8_WARN_UNUSED_RESULT bool Serialize() const;
+ void SerializeAsProtector() const {
+ bool serialized = Serialize();
+ // A protector always holds a Smi value and its cell type never changes, so
+ // Serialize can't fail.
+ CHECK(serialized);
+ }
- void Serialize();
+ PropertyDetails property_details() const;
ObjectRef value() const;
};
@@ -290,17 +333,21 @@ class JSObjectRef : public JSReceiverRef {
Handle<JSObject> object() const;
- uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
- double RawFastDoublePropertyAt(FieldIndex index) const;
ObjectRef RawFastPropertyAt(FieldIndex index) const;
+ // Return the element at key {index} if {index} is known to be an own data
+ // property of the object that is non-writable and non-configurable.
+ base::Optional<ObjectRef> GetOwnConstantElement(
+ uint32_t index, SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
// Return the value of the property identified by the field {index}
// if {index} is known to be an own data property of the object.
base::Optional<ObjectRef> GetOwnDataProperty(
Representation field_representation, FieldIndex index,
SerializationPolicy policy =
SerializationPolicy::kAssumeSerialized) const;
- FixedArrayBaseRef elements() const;
+ base::Optional<FixedArrayBaseRef> elements() const;
void SerializeElements();
void EnsureElementsTenured();
ElementsKind GetElementsKind() const;
@@ -342,7 +389,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
bool has_feedback_vector() const;
bool has_initial_map() const;
bool has_prototype() const;
- bool HasAttachedOptimizedCode() const;
bool PrototypeRequiresRuntimeLookup() const;
void Serialize();
@@ -354,25 +400,32 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
+
+ void SerializeCodeAndFeedback();
+ bool serialized_code_and_feedback() const;
+
+ // The following are available only after calling SerializeCodeAndFeedback().
+ // TODO(mvstanton): Once we allow inlining of functions we didn't see
+ // during serialization, we do need to ensure that any feedback vector
+ // we read here has been fully initialized (ie, store-ordered into the
+ // cell).
FeedbackVectorRef feedback_vector() const;
FeedbackCellRef raw_feedback_cell() const;
CodeRef code() const;
- int InitialMapInstanceSizeWithMinSlack() const;
};
-class JSRegExpRef : public JSObjectRef {
+class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
public:
- DEFINE_REF_CONSTRUCTOR(JSRegExp, JSObjectRef)
+ DEFINE_REF_CONSTRUCTOR(RegExpBoilerplateDescription, HeapObjectRef)
- Handle<JSRegExp> object() const;
+ Handle<RegExpBoilerplateDescription> object() const;
- ObjectRef raw_properties_or_hash() const;
- ObjectRef data() const;
- ObjectRef source() const;
- ObjectRef flags() const;
- ObjectRef last_index() const;
+ void Serialize();
- void SerializeAsRegExpBoilerplate();
+ FixedArrayRef data() const;
+ StringRef source() const;
+ int flags() const;
};
class HeapNumberRef : public HeapObjectRef {
@@ -425,42 +478,48 @@ class ContextRef : public HeapObjectRef {
V(JSGlobalObject, global_object) \
V(JSGlobalProxy, global_proxy_object) \
V(JSObject, promise_prototype) \
- V(Map, block_context_map) \
V(Map, bound_function_with_constructor_map) \
V(Map, bound_function_without_constructor_map) \
- V(Map, catch_context_map) \
- V(Map, eval_context_map) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, function_context_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
V(Map, js_array_holey_double_elements_map) \
V(Map, js_array_holey_elements_map) \
V(Map, js_array_holey_smi_elements_map) \
V(Map, js_array_packed_double_elements_map) \
V(Map, js_array_packed_elements_map) \
V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(Map, with_context_map) \
V(ScriptContextTable, script_context_table)
+#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, regexp_exec_function)
+
+#define BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, block_context_map) \
+ V(Map, catch_context_map) \
+ V(Map, eval_context_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, function_context_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(Map, with_context_map)
+
// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
// happened when Turbofan is invoked via --always-opt.
-#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
- V(Map, async_function_object_map) \
- V(Map, map_key_iterator_map) \
- V(Map, map_key_value_iterator_map) \
- V(Map, map_value_iterator_map) \
- V(JSFunction, regexp_exec_function) \
- V(Map, set_key_value_iterator_map) \
+#define BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, async_function_object_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(Map, set_key_value_iterator_map) \
V(Map, set_value_iterator_map)
-#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(V)
class NativeContextRef : public ContextRef {
public:
@@ -469,6 +528,7 @@ class NativeContextRef : public ContextRef {
Handle<NativeContext> object() const;
void Serialize();
+ void SerializeOnBackground();
#define DECL_ACCESSOR(type, name) type##Ref name() const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
@@ -501,6 +561,11 @@ class DescriptorArrayRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(DescriptorArray, HeapObjectRef)
Handle<DescriptorArray> object() const;
+
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
+ NameRef GetPropertyKey(InternalIndex descriptor_index) const;
+ base::Optional<ObjectRef> GetStrongValue(
+ InternalIndex descriptor_index) const;
};
class FeedbackCellRef : public HeapObjectRef {
@@ -509,7 +574,12 @@ class FeedbackCellRef : public HeapObjectRef {
Handle<FeedbackCell> object() const;
base::Optional<SharedFunctionInfoRef> shared_function_info() const;
- HeapObjectRef value() const;
+
+ // TODO(mvstanton): Once we allow inlining of functions we didn't see
+ // during serialization, we do need to ensure that any feedback vector
+ // we read here has been fully initialized (ie, store-ordered into the
+ // cell).
+ base::Optional<FeedbackVectorRef> value() const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -642,10 +712,11 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
ObjectRef GetFieldType(InternalIndex descriptor_index) const;
- bool IsUnboxedDoubleField(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
+ DescriptorArrayRef instance_descriptors() const;
+
void SerializeRootMap();
base::Optional<MapRef> FindRootMap() const;
@@ -734,26 +805,19 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
Handle<BytecodeArray> object() const;
+ // NOTE: Concurrent reads of the actual bytecodes as well as the constant pool
+ // (both immutable) do not go through BytecodeArrayRef but are performed
+ // directly through the handle by BytecodeArrayAccessor.
+
int register_count() const;
int parameter_count() const;
interpreter::Register incoming_new_target_or_generator_register() const;
- // Bytecode access methods.
- uint8_t get(int index) const;
- Address GetFirstBytecodeAddress() const;
-
Handle<ByteArray> SourcePositionTable() const;
- // Constant pool access.
- Handle<Object> GetConstantAtIndex(int index) const;
- bool IsConstantAtIndexSmi(int index) const;
- Smi GetConstantAtIndexAsSmi(int index) const;
-
// Exception handler table.
Address handler_table_address() const;
int handler_table_size() const;
-
- void SerializeForCompilation();
};
class JSArrayRef : public JSObjectRef {
@@ -762,13 +826,24 @@ class JSArrayRef : public JSObjectRef {
Handle<JSArray> object() const;
- ObjectRef length() const;
+ // The `length` property of boilerplate JSArray objects. Boilerplates are
+ // immutable after initialization. Must not be used for non-boilerplate
+ // JSArrays.
+ ObjectRef GetBoilerplateLength() const;
// Return the element at key {index} if the array has a copy-on-write elements
// storage and {index} is known to be an own data property.
+ // Note the value returned by this function is only valid if we ensure at
+ // runtime that the backing store has not changed.
base::Optional<ObjectRef> GetOwnCowElement(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ FixedArrayBaseRef elements_ref, uint32_t index,
+ SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
+ // The `JSArray::length` property; not safe to use in general, but can be
+ // used in some special cases that guarantee a valid `length` value despite
+ // concurrent reads.
+ ObjectRef length_unsafe() const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -786,20 +861,22 @@ class ScopeInfoRef : public HeapObjectRef {
void SerializeScopeInfoChain();
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript)
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ V(const wasm::WasmModule*, wasm_module) \
+ V(const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -833,6 +910,10 @@ class StringRef : public NameRef {
Handle<String> object() const;
+ base::Optional<ObjectRef> GetCharAsStringOrUndefined(
+ uint32_t index, SerializationPolicy policy =
+ SerializationPolicy::kAssumeSerialized) const;
+
base::Optional<int> length() const;
base::Optional<uint16_t> GetFirstChar();
base::Optional<double> ToNumber();
@@ -859,6 +940,7 @@ class JSTypedArrayRef : public JSObjectRef {
void Serialize();
bool serialized() const;
+ bool ShouldHaveBeenSerialized() const;
HeapObjectRef buffer() const;
};
@@ -922,7 +1004,7 @@ class CodeRef : public HeapObjectRef {
Handle<Code> object() const;
- unsigned inlined_bytecode_size() const;
+ unsigned GetInlinedBytecodeSize() const;
};
class InternalizedStringRef : public StringRef {
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 9684086a5d..251ce6ee5a 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -2049,13 +2049,11 @@ struct PromiseCtorFrameStateParams {
// Remnant of old-style JSCallReducer code. Could be ported to graph assembler,
// but probably not worth the effort.
-FrameState CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count, BailoutId bailout_id,
- FrameStateType frame_state_type,
- const SharedFunctionInfoRef& shared,
- Node* context,
- CommonOperatorBuilder* common,
- Graph* graph) {
+FrameState CreateArtificialFrameState(
+ Node* node, Node* outer_frame_state, int parameter_count,
+ BytecodeOffset bailout_id, FrameStateType frame_state_type,
+ const SharedFunctionInfoRef& shared, Node* context,
+ CommonOperatorBuilder* common, Graph* graph) {
const FrameStateFunctionInfo* state_info =
common->CreateFrameStateFunctionInfo(
frame_state_type, parameter_count + 1, 0, shared.object());
@@ -2089,7 +2087,7 @@ FrameState PromiseConstructorFrameState(
DCHECK_EQ(1, params.shared.internal_formal_parameter_count());
return CreateArtificialFrameState(
params.node_ptr, params.outer_frame_state, 1,
- BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
+ BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
params.shared, params.context, common, graph);
}
@@ -3244,7 +3242,8 @@ class IteratingArrayBuiltinHelper {
}
// TODO(jgruber): May only be needed for holey elements kinds.
- if (!dependencies->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies->DependOnNoElementsProtector()) return;
+
has_stability_dependency_ = inference_.RelyOnMapsPreferStability(
dependencies, jsgraph, &effect_, control_, p.feedback());
@@ -3430,9 +3429,90 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
return ReplaceWithSubgraph(&a, subgraph);
}
+namespace {
+
+bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
+ DCHECK(FLAG_turbo_inline_js_wasm_calls);
+ if (wasm_signature->return_count() > 1) {
+ return false;
+ }
+
+ for (auto type : wasm_signature->all()) {
+#if defined(V8_TARGET_ARCH_32_BIT)
+ if (type == wasm::kWasmI64) return false;
+#endif
+ if (type != wasm::kWasmI32 && type != wasm::kWasmI64 &&
+ type != wasm::kWasmF32 && type != wasm::kWasmF64) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace
+
+Reduction JSCallReducer::ReduceCallWasmFunction(
+ Node* node, const SharedFunctionInfoRef& shared) {
+ JSCallNode n(node);
+ const CallParameters& p = n.Parameters();
+
+ // Avoid deoptimization loops
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ // TODO(paolosev@microsoft.com): Enable inlining for calls in try/catch.
+ if (NodeProperties::IsExceptionalCall(node)) {
+ return NoChange();
+ }
+
+ const wasm::FunctionSig* wasm_signature = shared.wasm_function_signature();
+ if (!CanInlineJSToWasmCall(wasm_signature)) {
+ return NoChange();
+ }
+
+ // Signal TurboFan that it should run the 'wasm-inlining' phase.
+ has_wasm_calls_ = true;
+
+ const wasm::WasmModule* wasm_module = shared.wasm_module();
+ const Operator* op =
+ javascript()->CallWasm(wasm_module, wasm_signature, p.feedback());
+
+ // Remove additional inputs
+ size_t actual_arity = n.ArgumentCount();
+ DCHECK(JSCallNode::kFeedbackVectorIsLastInput);
+ DCHECK_EQ(actual_arity + JSWasmCallNode::kExtraInputCount - 1,
+ n.FeedbackVectorIndex());
+ size_t expected_arity = wasm_signature->parameter_count();
+
+ while (actual_arity > expected_arity) {
+ int removal_index =
+ static_cast<int>(n.FirstArgumentIndex() + expected_arity);
+ DCHECK_LT(removal_index, static_cast<int>(node->InputCount()));
+ node->RemoveInput(removal_index);
+ actual_arity--;
+ }
+
+ // Add missing inputs
+ while (actual_arity < expected_arity) {
+ int insertion_index = n.ArgumentIndex(n.ArgumentCount());
+ node->InsertInput(graph()->zone(), insertion_index,
+ jsgraph()->UndefinedConstant());
+ actual_arity++;
+ }
+
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
namespace {
bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
+ if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat32 ||
+ c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kFloat64) {
+ return true;
+ }
for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat32 ||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
@@ -3447,6 +3527,10 @@ bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
#ifndef V8_TARGET_ARCH_64_BIT
namespace {
bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
+ if (c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kInt64 ||
+ c_signature->ReturnInfo().GetType() == CTypeInfo::Type::kUint64) {
+ return true;
+ }
for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
@@ -3804,13 +3888,13 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arguments_list}).
CreateArgumentsType const type = CreateArgumentsTypeOf(arguments_list->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(arguments_list);
- int start_index = 0;
+ FrameState frame_state =
+ FrameState{NodeProperties::GetFrameStateInput(arguments_list)};
int formal_parameter_count;
{
Handle<SharedFunctionInfo> shared;
- if (!FrameStateInfoOf(frame_state->op()).shared_info().ToHandle(&shared)) {
+ if (!frame_state.frame_state_info().shared_info().ToHandle(&shared)) {
return NoChange();
}
formal_parameter_count = SharedFunctionInfoRef(broker(), shared)
@@ -3828,8 +3912,6 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
return NoChange();
}
}
- } else if (type == CreateArgumentsType::kRestParameter) {
- start_index = formal_parameter_count;
}
// TODO(jgruber,v8:8888): Attempt to remove this restriction. The reason it
@@ -3846,13 +3928,19 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// Remove the {arguments_list} input from the {node}.
node->RemoveInput(arraylike_or_spread_index);
+ // The index of the first relevant parameter. Only non-zero when looking at
+ // rest parameters, in which case it is set to the index of the first rest
+ // parameter.
+ const int start_index = (type == CreateArgumentsType::kRestParameter)
+ ? formal_parameter_count
+ : 0;
+
// After removing the arraylike or spread object, the argument count is:
int argc =
arraylike_or_spread_index - JSCallOrConstructNode::FirstArgumentIndex();
// Check if are spreading to inlined arguments or to the arguments of
// the outermost function.
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) {
+ if (!frame_state.has_outer_frame_state()) {
Operator const* op;
if (IsCallWithArrayLikeOrSpread(node)) {
static constexpr int kTargetAndReceiver = 2;
@@ -3867,40 +3955,22 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
NodeProperties::ChangeOp(node, op);
return Changed(node);
}
+ FrameState outer_state = frame_state.outer_frame_state();
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arg_array}).
- FrameStateInfo outer_info = FrameStateInfoOf(outer_state->op());
+ FrameStateInfo outer_info = outer_state.frame_state_info();
if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
// Need to take the parameters from the arguments adaptor.
frame_state = outer_state;
}
// Add the actual parameters to the {node}, skipping the receiver.
- const int argument_count =
- FrameStateInfoOf(frame_state->op()).parameter_count() -
- 1; // Minus receiver.
- if (start_index < argument_count) {
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin(); // Skip the receiver.
- for (int i = 0; i < start_index; i++) {
- // A non-zero start_index implies that there are rest arguments. Skip
- // them.
- ++parameters_it;
- }
- for (int i = start_index; i < argument_count; ++i, ++parameters_it) {
- Node* parameter_node = parameters_it.node();
- DCHECK_NOT_NULL(parameter_node);
- node->InsertInput(graph()->zone(),
- JSCallOrConstructNode::ArgumentIndex(argc++),
- parameter_node);
- }
- // TODO(jgruber): Currently, each use-site does the awkward dance above,
- // iterating based on the FrameStateInfo's parameter count minus one, and
- // manually advancing the iterator past the receiver. Consider wrapping all
- // this in an understandable iterator s.t. one only needs to iterate from
- // the beginning until done().
- DCHECK(parameters_it.done());
+ StateValuesAccess parameters_access(frame_state.parameters());
+ for (auto it = parameters_access.begin_without_receiver_and_skip(start_index);
+ !it.done(); ++it) {
+ DCHECK_NOT_NULL(it.node());
+ node->InsertInput(graph()->zone(),
+ JSCallOrConstructNode::ArgumentIndex(argc++), it.node());
}
if (IsCallWithArrayLikeOrSpread(node)) {
@@ -4089,8 +4159,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, SharedFunctionInfoRef(broker(), p.shared_info()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
- return ReduceJSCall(node,
- cell.value().AsFeedbackVector().shared_function_info());
+ if (cell.shared_function_info().has_value()) {
+ return ReduceJSCall(node, *cell.shared_function_info());
+ } else {
+ TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
+ << cell << " has no FeedbackVector");
+ return NoChange();
+ }
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -4169,11 +4244,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell(
broker(), feedback_target.value().AsFeedbackCell().object());
- if (feedback_cell.value().IsFeedbackVector()) {
+ if (feedback_cell.value().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
- FeedbackVectorRef feedback_vector =
- feedback_cell.value().AsFeedbackVector();
+ FeedbackVectorRef feedback_vector = *feedback_cell.value();
if (!feedback_vector.serialized()) {
TRACE_BROKER_MISSING(
broker(), "feedback vector, not serialized: " << feedback_vector);
@@ -4555,6 +4629,11 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
if (shared.function_template_info().has_value()) {
return ReduceCallApiFunction(node, shared);
}
+
+ if ((flags() & kInlineJSToWasmCalls) && shared.wasm_function_signature()) {
+ return ReduceCallWasmFunction(node, shared);
+ }
+
return NoChange();
}
@@ -5094,7 +5173,9 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds, true)) {
return inference.NoChange();
}
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
+ }
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5229,7 +5310,9 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
return inference.NoChange();
}
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
+ }
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5273,7 +5356,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
Node* efalse = effect;
Node* vfalse;
{
- // TODO(tebbi): We should trim the backing store if the capacity is too
+ // TODO(turbofan): We should trim the backing store if the capacity is too
// big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
// Load the elements backing store from the {receiver}.
@@ -5367,7 +5450,9 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kinds)) {
return inference.NoChange();
}
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (!dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
+ }
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5605,8 +5690,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
if (!dependencies()->DependOnArraySpeciesProtector())
return inference.NoChange();
- if (can_be_holey) {
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (can_be_holey && !dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
}
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -5765,9 +5850,11 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
}
}
- if (IsHoleyElementsKind(elements_kind)) {
- if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
+ if (IsHoleyElementsKind(elements_kind) &&
+ !dependencies()->DependOnNoElementsProtector()) {
+ return inference.NoChange();
}
+
// Since the map inference was done relative to {iterator_effect} rather than
// {effect}, we need to guard the use of the map(s) even when the inference
// was reliable.
@@ -6663,7 +6750,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
- node, frame_state, arity, BailoutId::ConstructStubInvoke(),
+ node, frame_state, arity, BytecodeOffset::ConstructStubInvoke(),
FrameStateType::kConstructStub, shared, context, common(), graph());
// This continuation just returns the newly created JSTypedArray. We
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 0a2050b200..8d24175d4b 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
enum Flag {
kNoFlags = 0u,
kBailoutOnUninitialized = 1u << 0,
+ kInlineJSToWasmCalls = 1u << 1,
};
using Flags = base::Flags<Flag>;
@@ -69,10 +70,14 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Zone* ZoneForGraphAssembler() const { return temp_zone(); }
JSGraph* JSGraphForGraphAssembler() const { return jsgraph(); }
+ bool has_wasm_calls() const { return has_wasm_calls_; }
+
private:
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node,
const SharedFunctionInfoRef& shared);
+ Reduction ReduceCallWasmFunction(Node* node,
+ const SharedFunctionInfoRef& shared);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -245,6 +250,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Flags const flags_;
CompilationDependencies* const dependencies_;
std::set<Node*> waitlist_;
+
+ bool has_wasm_calls_ = false;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 21f6c887c0..448652ad8d 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -89,13 +89,9 @@ namespace {
bool IsContextParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
- Node* const start = NodeProperties::GetValueInput(node, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(node->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- return index == start->op()->ValueOutputCount() - 2;
+ return ParameterIndexOf(node->op()) ==
+ StartNode{NodeProperties::GetValueInput(node, 0)}
+ .ContextParameterIndex_MaybeNonStandardLayout();
}
// Given a context {node} and the {distance} from that context to the target
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 74cb7937fa..899922a27f 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -36,10 +36,10 @@ namespace compiler {
namespace {
// Retrieves the frame state holding actual argument values.
-Node* GetArgumentsFrameState(Node* frame_state) {
- Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state);
- FrameStateInfo outer_state_info = FrameStateInfoOf(outer_state->op());
- return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+FrameState GetArgumentsFrameState(FrameState frame_state) {
+ FrameState outer_state{NodeProperties::GetFrameStateInput(frame_state)};
+ return outer_state.frame_state_info().type() ==
+ FrameStateType::kArgumentsAdaptor
? outer_state
: frame_state;
}
@@ -148,16 +148,15 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
- Node* const frame_state = NodeProperties::GetFrameStateInput(node);
- Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
Node* const control = graph()->start();
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ FrameStateInfo state_info = frame_state.frame_state_info();
SharedFunctionInfoRef shared(broker(),
state_info.shared_info().ToHandleChecked());
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
// arguments object, but only for non-inlined (i.e. outermost) frames.
- if (outer_state->opcode() != IrOpcode::kFrameState) {
+ if (!frame_state.has_outer_frame_state()) {
switch (type) {
case CreateArgumentsType::kMappedArguments: {
// TODO(turbofan): Duplicate parameters are not handled yet.
@@ -165,17 +164,15 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const callee = NodeProperties::GetValueInput(node, 0);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length =
- graph()->NewNode(simplified()->ArgumentsLength(
- shared.internal_formal_parameter_count()),
- arguments_frame);
+ graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
bool has_aliased_arguments = false;
- Node* const elements = effect = AllocateAliasedArguments(
- effect, control, context, arguments_frame, arguments_length, shared,
+ Node* const elements = effect = TryAllocateAliasedArguments(
+ effect, control, context, arguments_length, shared,
&has_aliased_arguments);
+ if (elements == nullptr) return NoChange();
+
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
has_aliased_arguments
@@ -197,18 +194,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
case CreateArgumentsType::kUnmappedArguments: {
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length =
- graph()->NewNode(simplified()->ArgumentsLength(
- shared.internal_formal_parameter_count()),
- arguments_frame);
+ graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
Node* const elements = effect =
graph()->NewNode(simplified()->NewArgumentsElements(
CreateArgumentsType::kUnmappedArguments,
shared.internal_formal_parameter_count()),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
jsgraph()->Constant(native_context().strict_arguments_map());
@@ -227,21 +220,16 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
case CreateArgumentsType::kRestParameter: {
Node* effect = NodeProperties::GetEffectInput(node);
- Node* const arguments_frame =
- graph()->NewNode(simplified()->ArgumentsFrame());
Node* const arguments_length =
- graph()->NewNode(simplified()->ArgumentsLength(
- shared.internal_formal_parameter_count()),
- arguments_frame);
+ graph()->NewNode(simplified()->ArgumentsLength());
Node* const rest_length = graph()->NewNode(
- simplified()->RestLength(shared.internal_formal_parameter_count()),
- arguments_frame);
+ simplified()->RestLength(shared.internal_formal_parameter_count()));
// Allocate the elements backing store.
Node* const elements = effect =
graph()->NewNode(simplified()->NewArgumentsElements(
CreateArgumentsType::kRestParameter,
shared.internal_formal_parameter_count()),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
native_context().js_array_packed_elements_map());
@@ -263,7 +251,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
// Use inline allocation for all mapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- DCHECK_EQ(outer_state->opcode(), IrOpcode::kFrameState);
+ DCHECK_EQ(frame_state.outer_frame_state()->opcode(), IrOpcode::kFrameState);
switch (type) {
case CreateArgumentsType::kMappedArguments: {
Node* const callee = NodeProperties::GetValueInput(node, 0);
@@ -274,25 +262,20 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
- IrOpcode::kDeadValue) {
+ FrameState args_state = GetArgumentsFrameState(frame_state);
+ if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
// This protects against an incompletely propagated DeadValue node.
// If the FrameState has a DeadValue input, then this node will be
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
+ FrameStateInfo args_state_info = args_state.frame_state_info();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
- // Check that the array allocated for arguments is not "large".
- {
- const int alloc_size = FixedArray::SizeFor(length);
- if (alloc_size > kMaxRegularHeapObjectSize) return NoChange();
- }
// Prepare element backing store to be used by arguments object.
bool has_aliased_arguments = false;
- Node* const elements = AllocateAliasedArguments(
+ Node* const elements = TryAllocateAliasedArguments(
effect, control, args_state, context, shared, &has_aliased_arguments);
+ if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map = jsgraph()->Constant(
@@ -319,23 +302,18 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
- IrOpcode::kDeadValue) {
+ FrameState args_state = GetArgumentsFrameState(frame_state);
+ if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
// This protects against an incompletely propagated DeadValue node.
// If the FrameState has a DeadValue input, then this node will be
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
+ FrameStateInfo args_state_info = args_state.frame_state_info();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
- // Check that the array allocated for arguments is not "large".
- {
- const int alloc_size = FixedArray::SizeFor(length);
- if (alloc_size > kMaxRegularHeapObjectSize) return NoChange();
- }
// Prepare element backing store to be used by arguments object.
- Node* const elements = AllocateArguments(effect, control, args_state);
+ Node* const elements = TryAllocateArguments(effect, control, args_state);
+ if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the arguments object map.
Node* const arguments_map =
@@ -361,18 +339,18 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
// chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
- IrOpcode::kDeadValue) {
+ FrameState args_state = GetArgumentsFrameState(frame_state);
+ if (args_state.parameters()->opcode() == IrOpcode::kDeadValue) {
// This protects against an incompletely propagated DeadValue node.
// If the FrameState has a DeadValue input, then this node will be
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
+ FrameStateInfo args_state_info = args_state.frame_state_info();
// Prepare element backing store to be used by the rest array.
Node* const elements =
- AllocateRestArguments(effect, control, args_state, start_index);
+ TryAllocateRestArguments(effect, control, args_state, start_index);
+ if (elements == nullptr) return NoChange();
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
Node* const jsarray_map =
@@ -424,11 +402,15 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
int parameter_count_no_receiver = shared.internal_formal_parameter_count();
- int size = parameter_count_no_receiver +
- shared.GetBytecodeArray().register_count();
+ int length = parameter_count_no_receiver +
+ shared.GetBytecodeArray().register_count();
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
- ab.AllocateArray(size, MapRef(broker(), factory()->fixed_array_map()));
- for (int i = 0; i < size; ++i) {
+ if (!ab.CanAllocateArray(length, fixed_array_map)) {
+ return NoChange();
+ }
+ ab.AllocateArray(length, fixed_array_map);
+ for (int i = 0; i < length; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
jsgraph()->UndefinedConstant());
}
@@ -670,6 +652,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else {
PropertyCellRef array_constructor_protector(
broker(), factory()->array_constructor_protector());
+ array_constructor_protector.SerializeAsProtector();
can_inline_call = array_constructor_protector.value().AsSmi() ==
Protectors::kProtectorValid;
}
@@ -792,9 +775,12 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Create the register file.
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
- ab.AllocateArray(register_count,
- MapRef(broker(), factory()->fixed_array_map()));
+ if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
+ return NoChange();
+ }
+ ab.AllocateArray(register_count, fixed_array_map);
for (int i = 0; i < register_count; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
jsgraph()->UndefinedConstant());
@@ -904,13 +890,17 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
// Create the [[BoundArguments]] for the result.
Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
if (arity > 0) {
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(arity, MapRef(broker(), factory()->fixed_array_map()));
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(arity, fixed_array_map)) {
+ return NoChange();
+ }
+ ab.AllocateArray(arity, fixed_array_map);
for (int i = 0; i < arity; ++i) {
- a.Store(AccessBuilder::ForFixedArraySlot(i),
- NodeProperties::GetValueInput(node, 2 + i));
+ ab.Store(AccessBuilder::ForFixedArraySlot(i),
+ NodeProperties::GetValueInput(node, 2 + i));
}
- bound_arguments = effect = a.Finish();
+ bound_arguments = effect = ab.Finish();
}
// Create the JSBoundFunction result.
@@ -1189,7 +1179,8 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
ProcessedFeedback const& feedback =
broker()->GetFeedbackForRegExpLiteral(p.feedback());
if (!feedback.IsInsufficient()) {
- JSRegExpRef literal = feedback.AsRegExpLiteral().value();
+ RegExpBoilerplateDescriptionRef literal =
+ feedback.AsRegExpLiteral().value();
Node* value = effect = AllocateLiteralRegExp(effect, control, literal);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1427,67 +1418,70 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
// Helper that allocates a FixedArray holding argument values recorded in the
// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
- Node* frame_state) {
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+Node* JSCreateLowering::TryAllocateArguments(Node* effect, Node* control,
+ FrameState frame_state) {
+ FrameStateInfo state_info = frame_state.frame_state_info();
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
// Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ Node* const parameters = frame_state.parameters();
StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
+ auto parameters_it = parameters_access.begin_without_receiver();
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(argument_count,
- MapRef(broker(), factory()->fixed_array_map()));
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(argument_count, fixed_array_map)) {
+ return nullptr;
+ }
+ ab.AllocateArray(argument_count, fixed_array_map);
for (int i = 0; i < argument_count; ++i, ++parameters_it) {
DCHECK_NOT_NULL(parameters_it.node());
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- parameters_it.node());
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ parameters_it.node());
}
- return a.Finish();
+ return ab.Finish();
}
// Helper that allocates a FixedArray holding argument values recorded in the
// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
- Node* frame_state,
- int start_index) {
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+Node* JSCreateLowering::TryAllocateRestArguments(Node* effect, Node* control,
+ FrameState frame_state,
+ int start_index) {
+ FrameStateInfo state_info = frame_state.frame_state_info();
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
int num_elements = std::max(0, argument_count - start_index);
if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
// Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ Node* const parameters = frame_state.parameters();
StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
-
- // Skip unused arguments.
- for (int i = 0; i < start_index; i++) {
- ++parameters_it;
- }
+ auto parameters_it =
+ parameters_access.begin_without_receiver_and_skip(start_index);
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(num_elements, MapRef(broker(), factory()->fixed_array_map()));
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(num_elements, fixed_array_map)) {
+ return nullptr;
+ }
+ ab.AllocateArray(num_elements, fixed_array_map);
for (int i = 0; i < num_elements; ++i, ++parameters_it) {
DCHECK_NOT_NULL(parameters_it.node());
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
- parameters_it.node());
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ parameters_it.node());
}
- return a.Finish();
+ return ab.Finish();
}
// Helper that allocates a FixedArray serving as a parameter map for values
// recorded in the given {frame_state}. Some elements map to slots within the
// given {context}. Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateAliasedArguments(
- Node* effect, Node* control, Node* frame_state, Node* context,
+Node* JSCreateLowering::TryAllocateAliasedArguments(
+ Node* effect, Node* control, FrameState frame_state, Node* context,
const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
- FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
+ FrameStateInfo state_info = frame_state.frame_state_info();
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
@@ -1495,40 +1489,50 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// any way, we can just return an unmapped backing store instead.
int parameter_count = shared.internal_formal_parameter_count();
if (parameter_count == 0) {
- return AllocateArguments(effect, control, frame_state);
+ return TryAllocateArguments(effect, control, frame_state);
}
// Calculate number of argument values being aliased/mapped.
int mapped_count = std::min(argument_count, parameter_count);
*has_aliased_arguments = true;
+ MapRef sloppy_arguments_elements_map(
+ broker(), factory()->sloppy_arguments_elements_map());
+ if (!AllocationBuilder::CanAllocateSloppyArgumentElements(
+ mapped_count, sloppy_arguments_elements_map)) {
+ return nullptr;
+ }
+
+ MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ if (!AllocationBuilder::CanAllocateArray(argument_count, fixed_array_map)) {
+ return nullptr;
+ }
+
// Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ Node* const parameters = frame_state.parameters();
StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
+ auto parameters_it =
+ parameters_access.begin_without_receiver_and_skip(mapped_count);
// The unmapped argument values recorded in the frame state are stored yet
// another indirection away and then linked into the parameter map below,
// whereas mapped argument values are replaced with a hole instead.
- AllocationBuilder aa(jsgraph(), effect, control);
- aa.AllocateArray(argument_count,
- MapRef(broker(), factory()->fixed_array_map()));
- for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
- aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ AllocationBuilder ab(jsgraph(), effect, control);
+ ab.AllocateArray(argument_count, fixed_array_map);
+ for (int i = 0; i < mapped_count; ++i) {
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
jsgraph()->TheHoleConstant());
}
for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
DCHECK_NOT_NULL(parameters_it.node());
- aa.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
+ ab.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i),
parameters_it.node());
}
- Node* arguments = aa.Finish();
+ Node* arguments = ab.Finish();
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), arguments, control);
- a.AllocateSloppyArgumentElements(
- mapped_count,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.AllocateSloppyArgumentElements(mapped_count, sloppy_arguments_elements_map);
a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
@@ -1543,10 +1547,9 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// unknown at compile-time, the true {arguments_length} and {arguments_frame}
// values can only be determined dynamically at run-time and are provided.
// Serves as backing store for JSCreateArguments nodes.
-Node* JSCreateLowering::AllocateAliasedArguments(
- Node* effect, Node* control, Node* context, Node* arguments_frame,
- Node* arguments_length, const SharedFunctionInfoRef& shared,
- bool* has_aliased_arguments) {
+Node* JSCreateLowering::TryAllocateAliasedArguments(
+ Node* effect, Node* control, Node* context, Node* arguments_length,
+ const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
int parameter_count = shared.internal_formal_parameter_count();
@@ -1554,14 +1557,21 @@ Node* JSCreateLowering::AllocateAliasedArguments(
return graph()->NewNode(
simplified()->NewArgumentsElements(
CreateArgumentsType::kUnmappedArguments, parameter_count),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
+ }
+
+ int mapped_count = parameter_count;
+ MapRef sloppy_arguments_elements_map(
+ broker(), factory()->sloppy_arguments_elements_map());
+ if (!AllocationBuilder::CanAllocateSloppyArgumentElements(
+ mapped_count, sloppy_arguments_elements_map)) {
+ return nullptr;
}
// From here on we are going to allocate a mapped (aka. aliased) elements
// backing store. We do not statically know how many arguments exist, but
// dynamically selecting the hole for some of the "mapped" elements allows
// using a static shape for the parameter map.
- int mapped_count = parameter_count;
*has_aliased_arguments = true;
// The unmapped argument values are stored yet another indirection away and
@@ -1570,13 +1580,11 @@ Node* JSCreateLowering::AllocateAliasedArguments(
Node* arguments = effect =
graph()->NewNode(simplified()->NewArgumentsElements(
CreateArgumentsType::kMappedArguments, mapped_count),
- arguments_frame, arguments_length, effect);
+ arguments_length, effect);
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateSloppyArgumentElements(
- mapped_count,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.AllocateSloppyArgumentElements(mapped_count, sloppy_arguments_elements_map);
a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
@@ -1667,52 +1675,35 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
kFullWriteBarrier,
LoadSensitivity::kUnsafe,
const_field_info};
+ ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
+ bool is_uninitialized =
+ boilerplate_value.IsHeapObject() &&
+ boilerplate_value.AsHeapObject().map().oddball_type() ==
+ OddballType::kUninitialized;
+ if (is_uninitialized) {
+ access.const_field_info = ConstFieldInfo::None();
+ }
Node* value;
- if (boilerplate_map.IsUnboxedDoubleField(i)) {
- access.machine_type = MachineType::Float64();
- access.type = Type::Number();
- uint64_t value_bits = boilerplate.RawFastDoublePropertyAsBitsAt(index);
- if (value_bits == kHoleNanInt64) {
- // This special case is analogous to is_uninitialized being true in the
- // non-unboxed-double case below. The store of the hole NaN value here
- // will always be followed by another store that actually initializes
- // the field. The hole NaN should therefore be unobservable.
- // Load elimination expects there to be at most one const store to any
- // given field, so we always mark the unobservable ones as mutable.
- access.const_field_info = ConstFieldInfo::None();
- }
- value = jsgraph()->Constant(bit_cast<double>(value_bits));
+ if (boilerplate_value.IsJSObject()) {
+ JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
+ value = effect =
+ AllocateFastLiteral(effect, control, boilerplate_object, allocation);
+ } else if (property_details.representation().IsDouble()) {
+ double number = boilerplate_value.AsHeapNumber().value();
+ // Allocate a mutable HeapNumber box and store the value into it.
+ AllocationBuilder builder(jsgraph(), effect, control);
+ builder.Allocate(HeapNumber::kSize, allocation);
+ builder.Store(AccessBuilder::ForMap(),
+ MapRef(broker(), factory()->heap_number_map()));
+ builder.Store(AccessBuilder::ForHeapNumberValue(),
+ jsgraph()->Constant(number));
+ value = effect = builder.Finish();
+ } else if (property_details.representation().IsSmi()) {
+ // Ensure that value is stored as smi.
+ value = is_uninitialized ? jsgraph()->ZeroConstant()
+ : jsgraph()->Constant(boilerplate_value.AsSmi());
} else {
- ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
- bool is_uninitialized =
- boilerplate_value.IsHeapObject() &&
- boilerplate_value.AsHeapObject().map().oddball_type() ==
- OddballType::kUninitialized;
- if (is_uninitialized) {
- access.const_field_info = ConstFieldInfo::None();
- }
- if (boilerplate_value.IsJSObject()) {
- JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
- value = effect = AllocateFastLiteral(effect, control,
- boilerplate_object, allocation);
- } else if (property_details.representation().IsDouble()) {
- double number = boilerplate_value.AsHeapNumber().value();
- // Allocate a mutable HeapNumber box and store the value into it.
- AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(HeapNumber::kSize, allocation);
- builder.Store(AccessBuilder::ForMap(),
- MapRef(broker(), factory()->heap_number_map()));
- builder.Store(AccessBuilder::ForHeapNumberValue(),
- jsgraph()->Constant(number));
- value = effect = builder.Finish();
- } else if (property_details.representation().IsSmi()) {
- // Ensure that value is stored as smi.
- value = is_uninitialized
- ? jsgraph()->ZeroConstant()
- : jsgraph()->Constant(boilerplate_value.AsSmi());
- } else {
- value = jsgraph()->Constant(boilerplate_value);
- }
+ value = jsgraph()->Constant(boilerplate_value);
}
inobject_fields.push_back(std::make_pair(access, value));
}
@@ -1744,7 +1735,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
JSArrayRef boilerplate_array = boilerplate.AsJSArray();
builder.Store(
AccessBuilder::ForJSArrayLength(boilerplate_array.GetElementsKind()),
- boilerplate_array.length());
+ boilerplate_array.GetBoilerplateLength());
}
for (auto const& inobject_field : inobject_fields) {
builder.Store(inobject_field.first, inobject_field.second);
@@ -1755,7 +1746,7 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
JSObjectRef boilerplate,
AllocationType allocation) {
- FixedArrayBaseRef boilerplate_elements = boilerplate.elements();
+ FixedArrayBaseRef boilerplate_elements = boilerplate.elements().value();
// Empty or copy-on-write elements just store a constant.
int const elements_length = boilerplate_elements.length();
@@ -1763,7 +1754,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
if (boilerplate_elements.length() == 0 || elements_map.IsFixedCowArrayMap()) {
if (allocation == AllocationType::kOld) {
boilerplate.EnsureElementsTenured();
- boilerplate_elements = boilerplate.elements();
+ boilerplate_elements = boilerplate.elements().value();
}
return jsgraph()->HeapConstant(boilerplate_elements.object());
}
@@ -1794,48 +1785,47 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
}
// Allocate the backing store array and store the elements.
- AllocationBuilder builder(jsgraph(), effect, control);
- builder.AllocateArray(elements_length, elements_map, allocation);
+ AllocationBuilder ab(jsgraph(), effect, control);
+ CHECK(ab.CanAllocateArray(elements_length, elements_map, allocation));
+ ab.AllocateArray(elements_length, elements_map, allocation);
ElementAccess const access =
(elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
for (int i = 0; i < elements_length; ++i) {
- builder.Store(access, jsgraph()->Constant(i), elements_values[i]);
+ ab.Store(access, jsgraph()->Constant(i), elements_values[i]);
}
- return builder.Finish();
+ return ab.Finish();
}
-Node* JSCreateLowering::AllocateLiteralRegExp(Node* effect, Node* control,
- JSRegExpRef boilerplate) {
- MapRef boilerplate_map = boilerplate.map();
+Node* JSCreateLowering::AllocateLiteralRegExp(
+ Node* effect, Node* control, RegExpBoilerplateDescriptionRef boilerplate) {
+ MapRef initial_map = native_context().regexp_function().initial_map();
// Sanity check that JSRegExp object layout hasn't changed.
- STATIC_ASSERT(static_cast<int>(JSRegExp::kDataOffset) ==
- static_cast<int>(JSObject::kHeaderSize));
+ STATIC_ASSERT(JSRegExp::kDataOffset == JSObject::kHeaderSize);
STATIC_ASSERT(JSRegExp::kSourceOffset == JSRegExp::kDataOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kFlagsOffset ==
JSRegExp::kSourceOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kHeaderSize == JSRegExp::kFlagsOffset + kTaggedSize);
STATIC_ASSERT(JSRegExp::kLastIndexOffset == JSRegExp::kHeaderSize);
- STATIC_ASSERT(JSRegExp::kInObjectFieldCount == 1); // LastIndex.
-
- const AllocationType allocation = AllocationType::kYoung;
- const int size =
- JSRegExp::kHeaderSize + JSRegExp::kInObjectFieldCount * kTaggedSize;
+ DCHECK_EQ(JSRegExp::Size(), JSRegExp::kLastIndexOffset + kTaggedSize);
AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(size, allocation, Type::For(boilerplate_map));
- builder.Store(AccessBuilder::ForMap(), boilerplate_map);
+ builder.Allocate(JSRegExp::Size(), AllocationType::kYoung,
+ Type::For(initial_map));
+ builder.Store(AccessBuilder::ForMap(), initial_map);
builder.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- boilerplate.raw_properties_or_hash());
- builder.Store(AccessBuilder::ForJSObjectElements(), boilerplate.elements());
+ jsgraph()->EmptyFixedArrayConstant());
+ builder.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
builder.Store(AccessBuilder::ForJSRegExpData(), boilerplate.data());
builder.Store(AccessBuilder::ForJSRegExpSource(), boilerplate.source());
- builder.Store(AccessBuilder::ForJSRegExpFlags(), boilerplate.flags());
+ builder.Store(AccessBuilder::ForJSRegExpFlags(),
+ jsgraph()->SmiConstant(boilerplate.flags()));
builder.Store(AccessBuilder::ForJSRegExpLastIndex(),
- boilerplate.last_index());
+ jsgraph()->SmiConstant(JSRegExp::kInitialLastIndexValue));
return builder.Finish();
}
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 2fb28ebfd4..0edbda79a0 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -22,6 +22,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class CompilationDependencies;
+class FrameState;
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
@@ -82,17 +83,21 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceJSCreateObject(Node* node);
- Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
- Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
- int start_index);
- Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
- Node* context,
- const SharedFunctionInfoRef& shared,
- bool* has_aliased_arguments);
- Node* AllocateAliasedArguments(Node* effect, Node* control, Node* context,
- Node* arguments_frame, Node* arguments_length,
- const SharedFunctionInfoRef& shared,
- bool* has_aliased_arguments);
+ // The following functions all return nullptr iff there are too many arguments
+ // for inline allocation.
+ Node* TryAllocateArguments(Node* effect, Node* control,
+ FrameState frame_state);
+ Node* TryAllocateRestArguments(Node* effect, Node* control,
+ FrameState frame_state, int start_index);
+ Node* TryAllocateAliasedArguments(Node* effect, Node* control,
+ FrameState frame_state, Node* context,
+ const SharedFunctionInfoRef& shared,
+ bool* has_aliased_arguments);
+ Node* TryAllocateAliasedArguments(Node* effect, Node* control, Node* context,
+ Node* arguments_length,
+ const SharedFunctionInfoRef& shared,
+ bool* has_aliased_arguments);
+
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
AllocationType allocation);
@@ -108,7 +113,7 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
JSObjectRef boilerplate,
AllocationType allocation);
Node* AllocateLiteralRegExp(Node* effect, Node* control,
- JSRegExpRef boilerplate);
+ RegExpBoilerplateDescriptionRef boilerplate);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index c8cce37ad9..33f2f742b0 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -873,7 +873,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
Node* receiver = jsgraph()->UndefinedConstant();
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
// Register argument inputs are followed by stack argument inputs (such as
@@ -935,7 +935,7 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
@@ -997,7 +997,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
// The single available register is needed for `slot`, thus `spread` remains
// on the stack here.
@@ -1088,7 +1088,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, slot);
@@ -1128,7 +1128,7 @@ void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
Node* receiver = n.receiver();
Node* arguments_list = n.Argument(0);
Node* feedback_vector = n.feedback_vector();
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
// Shuffling inputs.
// Before: {target, receiver, arguments_list, vector}.
@@ -1193,7 +1193,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().index());
// We pass the spread in a register, not on the stack.
Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
@@ -1251,6 +1251,9 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
+// Will be lowered in SimplifiedLowering.
+void JSGenericLowering::LowerJSWasmCall(Node* node) {}
+
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
JSForInPrepareNode n(node);
Effect effect(node); // {node} is kept in the effect chain.
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 120f8ee21d..aca12b4cb5 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -48,11 +48,12 @@ Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
Node* JSGraph::Constant(const ObjectRef& ref) {
if (ref.IsSmi()) return Constant(ref.AsSmi());
- OddballType oddball_type =
- ref.AsHeapObject().GetHeapObjectType().oddball_type();
if (ref.IsHeapNumber()) {
return Constant(ref.AsHeapNumber().value());
- } else if (oddball_type == OddballType::kUndefined) {
+ }
+ OddballType oddball_type =
+ ref.AsHeapObject().GetHeapObjectType().oddball_type();
+ if (oddball_type == OddballType::kUndefined) {
DCHECK(ref.object().equals(isolate()->factory()->undefined_value()));
return UndefinedConstant();
} else if (oddball_type == OddballType::kNull) {
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index cd7e3df3e0..0d428995a1 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -32,6 +32,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-details.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils/utils.h"
@@ -45,12 +46,12 @@ namespace compiler {
#define FORWARD_DECL(Name) class Name##Data;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
-// removed.
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
-// There are five kinds of ObjectData values.
+// There are several kinds of ObjectData values.
//
// kSmi: The underlying V8 object is a Smi and the data is an instance of the
// base class (ObjectData), i.e. it's basically just the handle. Because the
@@ -61,6 +62,9 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// data is an instance of the corresponding (most-specific) subclass, e.g.
// JSFunctionData, which provides serialized information about the object.
//
+// kBackgroundSerializedHeapObject: Like kSerializedHeapObject, but
+// allows serialization from the background thread.
+//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
// data is an instance of the base class (ObjectData), i.e. it basically
// carries no information other than the handle.
@@ -78,6 +82,7 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
enum ObjectDataKind {
kSmi,
kSerializedHeapObject,
+ kBackgroundSerializedHeapObject,
kUnserializedHeapObject,
kNeverSerializedHeapObject,
kUnserializedReadOnlyHeapObject
@@ -90,6 +95,20 @@ bool IsReadOnlyHeapObject(Object object) {
(object.IsHeapObject() &&
ReadOnlyHeap::Contains(HeapObject::cast(object)));
}
+
+template <class T>
+constexpr bool IsSerializedHeapObject() {
+ return false;
+}
+
+#define DEFINE_MARKER(Name) \
+ template <> \
+ constexpr bool IsSerializedHeapObject<Name>() { \
+ return true; \
+ } \
+ STATIC_ASSERT(IsSerializedHeapObject<Name>());
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_MARKER)
+#undef DEFINE_MARKER
} // namespace
class ObjectData : public ZoneObject {
@@ -116,20 +135,24 @@ class ObjectData : public ZoneObject {
broker->mode() == JSHeapBroker::kSerializing,
broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
- (kind == kUnserializedReadOnlyHeapObject &&
- IsReadOnlyHeapObject(*object)) ||
- kind == kNeverSerializedHeapObject);
+ kind == kUnserializedReadOnlyHeapObject || kind == kSmi ||
+ kind == kNeverSerializedHeapObject ||
+ kind == kBackgroundSerializedHeapObject);
+ CHECK_IMPLIES(kind == kUnserializedReadOnlyHeapObject,
+ IsReadOnlyHeapObject(*object));
}
#define DECLARE_IS(Name) bool Is##Name() const;
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_IS)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_IS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
#undef DECLARE_IS
#define DECLARE_AS(Name) Name##Data* As##Name();
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
- // TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
- // removed.
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
#undef DECLARE_AS
@@ -155,7 +178,8 @@ class ObjectData : public ZoneObject {
class HeapObjectData : public ObjectData {
public:
HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object);
+ Handle<HeapObject> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
bool boolean_value() const { return boolean_value_; }
ObjectData* map() const { return map_; }
@@ -172,17 +196,26 @@ class HeapObjectData : public ObjectData {
class PropertyCellData : public HeapObjectData {
public:
PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object);
+ Handle<PropertyCell> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
- PropertyDetails property_details() const { return property_details_; }
+ bool Serialize(JSHeapBroker* broker);
- void Serialize(JSHeapBroker* broker);
- ObjectData* value() const { return value_; }
+ PropertyDetails property_details() const {
+ CHECK(serialized());
+ return property_details_;
+ }
- private:
- PropertyDetails const property_details_;
+ ObjectData* value() const {
+ DCHECK(serialized());
+ return value_;
+ }
+ private:
+ PropertyDetails property_details_ = PropertyDetails::Empty();
ObjectData* value_ = nullptr;
+
+ bool serialized() const { return value_ != nullptr; }
};
// TODO(mslekova): Once we have real-world usage data, we might want to
@@ -265,16 +298,65 @@ void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object)
- : HeapObjectData(broker, storage, object),
- property_details_(object->property_details()) {}
+ Handle<PropertyCell> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind) {}
-void PropertyCellData::Serialize(JSHeapBroker* broker) {
- if (value_ != nullptr) return;
+bool PropertyCellData::Serialize(JSHeapBroker* broker) {
+ if (serialized()) return true;
TraceScope tracer(broker, this, "PropertyCellData::Serialize");
auto cell = Handle<PropertyCell>::cast(object());
- value_ = broker->GetOrCreateData(cell->value());
+
+ // While this code runs on a background thread, the property cell might
+ // undergo state transitions via calls to PropertyCell::Transition. These
+ // transitions follow a certain protocol on which we rely here to ensure that
+ // we only report success when we can guarantee consistent data. A key
+ // property is that after transitioning from cell type A to B (A != B), there
+ // will never be a transition back to A, unless A is kConstant and the new
+ // value is the hole (i.e. the property cell was invalidated, which is a final
+ // state).
+
+ PropertyDetails property_details = cell->property_details(kAcquireLoad);
+
+ Handle<Object> value =
+ broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
+ if (broker->ObjectMayBeUninitialized(value)) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+
+ {
+ PropertyDetails property_details_again =
+ cell->property_details(kAcquireLoad);
+ if (property_details != property_details_again) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+ }
+
+ if (property_details.cell_type() == PropertyCellType::kConstant) {
+ Handle<Object> value_again =
+ broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
+ if (*value != *value_again) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+ }
+
+ ObjectData* value_data = broker->TryGetOrCreateData(value, false);
+ if (value_data == nullptr) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+
+ PropertyCell::CheckDataIsCompatible(property_details, *value);
+
+ DCHECK(!serialized());
+ property_details_ = property_details;
+ value_ = value_data;
+ DCHECK(serialized());
+ return true;
}
void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
@@ -303,32 +385,6 @@ void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
data_ = broker->GetOrCreateData(call_handler_info->data());
}
-class JSObjectField {
- public:
- bool IsDouble() const { return object_ == nullptr; }
- uint64_t AsBitsOfDouble() const {
- CHECK(IsDouble());
- return number_bits_;
- }
- double AsDouble() const {
- CHECK(IsDouble());
- return bit_cast<double>(number_bits_);
- }
-
- bool IsObject() const { return object_ != nullptr; }
- ObjectData* AsObject() const {
- CHECK(IsObject());
- return object_;
- }
-
- explicit JSObjectField(uint64_t value_bits) : number_bits_(value_bits) {}
- explicit JSObjectField(ObjectData* value) : object_(value) {}
-
- private:
- ObjectData* object_ = nullptr;
- uint64_t number_bits_ = 0;
-};
-
class JSReceiverData : public HeapObjectData {
public:
JSReceiverData(JSHeapBroker* broker, ObjectData** storage,
@@ -343,7 +399,7 @@ class JSObjectData : public JSReceiverData {
// Recursive serialization of all reachable JSObjects.
void SerializeAsBoilerplate(JSHeapBroker* broker);
- const JSObjectField& GetInobjectField(int property_index) const;
+ ObjectData* GetInobjectField(int property_index) const;
// Shallow serialization of {elements}.
void SerializeElements(JSHeapBroker* broker);
@@ -382,7 +438,7 @@ class JSObjectData : public JSReceiverData {
bool serialized_as_boilerplate_ = false;
bool serialized_elements_ = false;
- ZoneVector<JSObjectField> inobject_fields_;
+ ZoneVector<ObjectData*> inobject_fields_;
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
@@ -424,6 +480,7 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
}
namespace {
+
base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
Handle<Object> receiver,
uint32_t index,
@@ -491,33 +548,30 @@ ObjectData* JSObjectData::GetOwnDataProperty(JSHeapBroker* broker,
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSTypedArray> object);
+ Handle<JSTypedArray> object)
+ : JSObjectData(broker, storage, object) {}
- bool is_on_heap() const { return is_on_heap_; }
- size_t length() const { return length_; }
- void* data_ptr() const { return data_ptr_; }
+ // TODO(v8:7790): Once JSObject is no longer serialized, also make
+ // JSTypedArrayRef never-serialized.
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
+ bool is_on_heap() const { return is_on_heap_; }
+ size_t length() const { return length_; }
+ void* data_ptr() const { return data_ptr_; }
+
ObjectData* buffer() const { return buffer_; }
private:
- bool const is_on_heap_;
- size_t const length_;
- void* const data_ptr_;
-
bool serialized_ = false;
+ bool is_on_heap_ = false;
+ size_t length_ = 0;
+ void* data_ptr_ = nullptr;
ObjectData* buffer_ = nullptr;
};
-JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSTypedArray> object)
- : JSObjectData(broker, storage, object),
- is_on_heap_(object->is_on_heap()),
- length_(object->length()),
- data_ptr_(object->DataPtr()) {}
-
void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
@@ -525,6 +579,10 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
+ is_on_heap_ = typed_array->is_on_heap();
+ length_ = typed_array->length();
+ data_ptr_ = typed_array->DataPtr();
+
if (!is_on_heap()) {
DCHECK_NULL(buffer_);
buffer_ = broker->GetOrCreateData(typed_array->buffer());
@@ -599,7 +657,6 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; }
- bool HasAttachedOptimizedCode() const { return has_attached_optimized_code_; }
bool PrototypeRequiresRuntimeLookup() const {
return PrototypeRequiresRuntimeLookup_;
}
@@ -607,14 +664,29 @@ class JSFunctionData : public JSObjectData {
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
+ void SerializeCodeAndFeedback(JSHeapBroker* broker);
+ bool serialized_code_and_feedback() const {
+ return serialized_code_and_feedback_;
+ }
+
ObjectData* context() const { return context_; }
ObjectData* native_context() const { return native_context_; }
ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
ObjectData* shared() const { return shared_; }
- ObjectData* raw_feedback_cell() const { return feedback_cell_; }
- ObjectData* feedback_vector() const { return feedback_vector_; }
- ObjectData* code() const { return code_; }
+ ObjectData* raw_feedback_cell() const {
+ DCHECK(serialized_code_and_feedback());
+ return feedback_cell_;
+ }
+ ObjectData* feedback_vector() const {
+ DCHECK(serialized_code_and_feedback());
+ return feedback_vector_;
+ }
+ ObjectData* code() const {
+ DCHECK(serialized_code_and_feedback());
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ return code_;
+ }
int initial_map_instance_size_with_min_slack() const {
CHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -624,10 +696,10 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector_;
bool has_initial_map_;
bool has_prototype_;
- bool has_attached_optimized_code_;
bool PrototypeRequiresRuntimeLookup_;
bool serialized_ = false;
+ bool serialized_code_and_feedback_ = false;
ObjectData* context_ = nullptr;
ObjectData* native_context_ = nullptr;
@@ -640,35 +712,40 @@ class JSFunctionData : public JSObjectData {
int initial_map_instance_size_with_min_slack_;
};
-class JSRegExpData : public JSObjectData {
+class RegExpBoilerplateDescriptionData : public HeapObjectData {
public:
- JSRegExpData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSRegExp> object)
- : JSObjectData(broker, storage, object) {}
-
- void SerializeAsRegExpBoilerplate(JSHeapBroker* broker);
+ RegExpBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<RegExpBoilerplateDescription> object)
+ : HeapObjectData(broker, storage, object) {}
- ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
- ObjectData* data() const { return data_; }
- ObjectData* source() const { return source_; }
- ObjectData* flags() const { return flags_; }
- ObjectData* last_index() const { return last_index_; }
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* data() const {
+ CHECK(serialized_);
+ return data_;
+ }
+ ObjectData* source() const {
+ CHECK(serialized_);
+ return source_;
+ }
+ int flags() const {
+ CHECK(serialized_);
+ return flags_;
+ }
private:
- bool serialized_as_reg_exp_boilerplate_ = false;
-
- ObjectData* raw_properties_or_hash_ = nullptr;
+ bool serialized_ = false;
ObjectData* data_ = nullptr;
ObjectData* source_ = nullptr;
- ObjectData* flags_ = nullptr;
- ObjectData* last_index_ = nullptr;
+ int flags_;
};
class HeapNumberData : public HeapObjectData {
public:
HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapNumber> object)
- : HeapObjectData(broker, storage, object), value_(object->value()) {}
+ Handle<HeapNumber> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
+ : HeapObjectData(broker, storage, object, kind),
+ value_(object->value()) {}
double value() const { return value_; }
@@ -741,21 +818,27 @@ class NativeContextData : public ContextData {
#undef DECL_ACCESSOR
const ZoneVector<ObjectData*>& function_maps() const {
- CHECK(serialized_);
+ CHECK_NE(state_, State::kUnserialized);
return function_maps_;
}
ObjectData* scope_info() const {
- CHECK(serialized_);
+ CHECK_NE(state_, State::kUnserialized);
return scope_info_;
}
NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object);
void Serialize(JSHeapBroker* broker);
+ void SerializeOnBackground(JSHeapBroker* broker);
private:
- bool serialized_ = false;
+ // After Serialize is called the class is partially serialized and it the
+ // kSerializedOnMainThread state. It then becomes kFullySerialized once
+ // SerializeOnBackground is called.
+ enum class State { kUnserialized, kSerializedOnMainThread, kFullySerialized };
+ State state_;
+
#define DECL_MEMBER(type, name) ObjectData* name##_ = nullptr;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
@@ -766,9 +849,7 @@ class NativeContextData : public ContextData {
class NameData : public HeapObjectData {
public:
NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
+ : HeapObjectData(broker, storage, object) {}
};
class StringData : public NameData {
@@ -781,7 +862,7 @@ class StringData : public NameData {
bool is_external_string() const { return is_external_string_; }
bool is_seq_string() const { return is_seq_string_; }
- ObjectData* GetCharAsString(
+ ObjectData* GetCharAsStringOrUndefined(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
@@ -814,9 +895,7 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
to_number_(TryStringToDouble(broker->local_isolate(), object)),
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
- chars_as_strings_(broker->zone()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
+ chars_as_strings_(broker->zone()) {}
class InternalizedStringData : public StringData {
public:
@@ -827,8 +906,9 @@ class InternalizedStringData : public StringData {
}
};
-ObjectData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
+ObjectData* StringData::GetCharAsStringOrUndefined(JSHeapBroker* broker,
+ uint32_t index,
+ SerializationPolicy policy) {
if (index >= static_cast<uint32_t>(length())) return nullptr;
for (auto const& p : chars_as_strings_) {
@@ -902,7 +982,6 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
DCHECK_EQ(kData, details.kind());
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -968,8 +1047,9 @@ class AllocationSiteData : public HeapObjectData {
class BigIntData : public HeapObjectData {
public:
- BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
- : HeapObjectData(broker, storage, object),
+ BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
+ : HeapObjectData(broker, storage, object, kind),
as_uint64_(object->AsUint64(nullptr)) {}
uint64_t AsUint64() const { return as_uint64_; }
@@ -993,12 +1073,12 @@ struct PropertyDescriptor {
FieldIndex field_index;
ObjectData* field_owner = nullptr;
ObjectData* field_type = nullptr;
- bool is_unboxed_double_field = false;
};
class MapData : public HeapObjectData {
public:
- MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object);
+ MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
InstanceType instance_type() const { return instance_type_; }
int instance_size() const { return instance_size_; }
@@ -1157,8 +1237,8 @@ void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
}
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object)
- : ObjectData(broker, storage, object, kSerializedHeapObject),
+ Handle<HeapObject> object, ObjectDataKind kind)
+ : ObjectData(broker, storage, object, kind),
boolean_value_(object->BooleanValue(broker->isolate())),
// We have to use a raw cast below instead of AsMap() because of
// recursion. AsMap() would call IsMap(), which accesses the
@@ -1166,7 +1246,10 @@ HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
// meta map (whose map is itself), this member has not yet been
// initialized.
map_(broker->GetOrCreateData(object->map())) {
- CHECK_EQ(broker->mode(), JSHeapBroker::kSerializing);
+ CHECK_IMPLIES(kind == kSerializedHeapObject,
+ broker->mode() == JSHeapBroker::kSerializing);
+ CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
+ kind == kBackgroundSerializedHeapObject);
}
InstanceType HeapObjectData::GetMapInstanceType() const {
@@ -1192,8 +1275,7 @@ bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
return map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(map->elements_kind()) &&
map->prototype().IsJSArray() &&
- isolate->IsAnyInitialArrayPrototype(
- handle(JSArray::cast(map->prototype()), isolate)) &&
+ isolate->IsAnyInitialArrayPrototype(JSArray::cast(map->prototype())) &&
Protectors::IsNoElementsIntact(isolate);
}
@@ -1203,8 +1285,9 @@ bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
}
} // namespace
-MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object)
- : HeapObjectData(broker, storage, object),
+MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind),
instance_type_(object->instance_type()),
instance_size_(object->instance_size()),
bit_field_(object->bit_field()),
@@ -1238,7 +1321,6 @@ JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()),
- has_attached_optimized_code_(object->HasAttachedOptimizedCode()),
PrototypeRequiresRuntimeLookup_(
object->PrototypeRequiresRuntimeLookup()) {}
@@ -1254,18 +1336,11 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
- DCHECK_NULL(feedback_cell_);
- DCHECK_NULL(feedback_vector_);
- DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context());
native_context_ = broker->GetOrCreateData(function->native_context());
shared_ = broker->GetOrCreateData(function->shared());
- feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
- feedback_vector_ = has_feedback_vector()
- ? broker->GetOrCreateData(function->feedback_vector())
- : nullptr;
- code_ = broker->GetOrCreateData(function->code());
+
initial_map_ = has_initial_map()
? broker->GetOrCreateData(function->initial_map())
: nullptr;
@@ -1288,6 +1363,29 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
}
}
+void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
+ DCHECK(serialized_);
+ if (serialized_code_and_feedback_) return;
+ serialized_code_and_feedback_ = true;
+
+ TraceScope tracer(broker, this, "JSFunctionData::SerializeCodeAndFeedback");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object());
+
+ DCHECK_NULL(feedback_cell_);
+ DCHECK_NULL(feedback_vector_);
+ DCHECK_NULL(code_);
+ if (!FLAG_turbo_direct_heap_access) {
+ // This is conditionalized because Code objects are never serialized now.
+ // We only need to represent the code object in serialized data when
+ // we're unable to perform direct heap accesses.
+ code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
+ }
+ feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
+ feedback_vector_ = has_feedback_vector()
+ ? broker->GetOrCreateData(function->feedback_vector())
+ : nullptr;
+}
+
void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
if (serialized_elements_kind_generalizations_) return;
serialized_elements_kind_generalizations_ = true;
@@ -1314,12 +1412,78 @@ class DescriptorArrayData : public HeapObjectData {
Handle<DescriptorArray> object)
: HeapObjectData(broker, storage, object), contents_(broker->zone()) {}
- ZoneMap<int, PropertyDescriptor>& contents() { return contents_; }
+ ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_owner;
+ }
+
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).details;
+ }
+
+ ObjectData* GetPropertyKey(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).key;
+ }
+
+ FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_index;
+ }
+
+ ObjectData* GetFieldType(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_type;
+ }
+
+ ObjectData* GetStrongValue(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).value;
+ }
+
+ bool serialized_descriptor(InternalIndex descriptor_index) const {
+ return contents_.find(descriptor_index.as_int()) != contents_.end();
+ }
+
+ void SerializeDescriptor(JSHeapBroker* broker, Handle<Map> map,
+ InternalIndex descriptor_index);
private:
ZoneMap<int, PropertyDescriptor> contents_;
};
+void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker,
+ Handle<Map> map,
+ InternalIndex descriptor_index) {
+ CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
+ if (contents_.find(descriptor_index.as_int()) != contents_.end()) return;
+
+ Isolate* const isolate = broker->isolate();
+ auto descriptors = Handle<DescriptorArray>::cast(object());
+ CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
+
+ PropertyDescriptor d;
+ d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
+ MaybeObject value = descriptors->GetValue(descriptor_index);
+ HeapObject obj;
+ if (value.GetHeapObjectIfStrong(&obj)) {
+ d.value = broker->GetOrCreateData(obj);
+ }
+ d.details = descriptors->GetDetails(descriptor_index);
+ if (d.details.location() == kField) {
+ d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
+ d.field_owner =
+ broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
+ d.field_type =
+ broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
+ }
+ contents_[descriptor_index.as_int()] = d;
+
+ if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
+ // Recurse on the owner map.
+ d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
+ }
+
+ TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
+ << this << " (" << contents_.size()
+ << " total)");
+}
+
class FeedbackCellData : public HeapObjectData {
public:
FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
@@ -1334,7 +1498,11 @@ class FeedbackCellData : public HeapObjectData {
FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackCell> object)
: HeapObjectData(broker, storage, object),
- value_(broker->GetOrCreateData(object->value())) {}
+ value_(object->value().IsFeedbackVector()
+ ? broker->GetOrCreateData(object->value())
+ : nullptr) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
class FeedbackVectorData : public HeapObjectData {
public:
@@ -1365,7 +1533,9 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
Handle<FeedbackVector> object)
: HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()),
- closure_feedback_cell_array_(broker->zone()) {}
+ closure_feedback_cell_array_(broker->zone()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
int index) const {
@@ -1555,51 +1725,18 @@ class BytecodeArrayData : public FixedArrayBaseData {
return incoming_new_target_or_generator_register_;
}
- Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
- return constant_pool_[index]->object();
- }
-
- bool IsConstantAtIndexSmi(int index) const {
- return constant_pool_[index]->is_smi();
- }
-
- Smi GetConstantAtIndexAsSmi(int index) const {
- return *(Handle<Smi>::cast(constant_pool_[index]->object()));
- }
-
- void SerializeForCompilation(JSHeapBroker* broker) {
- if (is_serialized_for_compilation_) return;
-
- // Convinience cast: object() is already a canonical persistent handle.
- Handle<BytecodeArray> bytecodes = Handle<BytecodeArray>::cast(object());
-
- DCHECK(constant_pool_.empty());
- Handle<FixedArray> constant_pool(bytecodes->constant_pool(),
- broker->isolate());
- constant_pool_.reserve(constant_pool->length());
- for (int i = 0; i < constant_pool->length(); i++) {
- constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i)));
- }
-
- is_serialized_for_compilation_ = true;
- }
-
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
register_count_(object->register_count()),
parameter_count_(object->parameter_count()),
incoming_new_target_or_generator_register_(
- object->incoming_new_target_or_generator_register()),
- constant_pool_(broker->zone()) {}
+ object->incoming_new_target_or_generator_register()) {}
private:
int const register_count_;
int const parameter_count_;
interpreter::Register const incoming_new_target_or_generator_register_;
-
- bool is_serialized_for_compilation_ = false;
- ZoneVector<ObjectData*> constant_pool_;
};
class JSArrayData : public JSObjectData {
@@ -1608,7 +1745,10 @@ class JSArrayData : public JSObjectData {
Handle<JSArray> object);
void Serialize(JSHeapBroker* broker);
- ObjectData* length() const { return length_; }
+ ObjectData* length() const {
+ CHECK(serialized_);
+ return length_;
+ }
ObjectData* GetOwnElement(
JSHeapBroker* broker, uint32_t index,
@@ -1630,6 +1770,8 @@ JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
: JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
void JSArrayData::Serialize(JSHeapBroker* broker) {
+ CHECK(!FLAG_turbo_direct_heap_access);
+
if (serialized_) return;
serialized_ = true;
@@ -1969,7 +2111,12 @@ class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
: HeapObjectData(broker, storage, object),
- inlined_bytecode_size_(object->inlined_bytecode_size()) {}
+ inlined_bytecode_size_(object->inlined_bytecode_size() > 0 &&
+ !object->marked_for_deoptimization()
+ ? object->inlined_bytecode_size()
+ : 0) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
@@ -1988,16 +2135,21 @@ class CodeData : public HeapObjectData {
return InstanceTypeChecker::Is##Name(instance_type); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
#undef DEFINE_IS
-#define DEFINE_AS(Name) \
- Name##Data* ObjectData::As##Name() { \
- CHECK(Is##Name()); \
- CHECK_EQ(kind_, kSerializedHeapObject); \
- return static_cast<Name##Data*>(this); \
+#define DEFINE_AS(Name) \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK(Is##Name()); \
+ CHECK(kind_ == kSerializedHeapObject || \
+ kind_ == kBackgroundSerializedHeapObject); \
+ return static_cast<Name##Data*>(this); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
@@ -2014,7 +2166,7 @@ HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
-const JSObjectField& JSObjectData::GetInobjectField(int property_index) const {
+ObjectData* JSObjectData::GetInobjectField(int property_index) const {
CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
return inobject_fields_[property_index];
}
@@ -2023,7 +2175,10 @@ bool JSObjectData::cow_or_empty_elements_tenured() const {
return cow_or_empty_elements_tenured_;
}
-ObjectData* JSObjectData::elements() const { return elements_; }
+ObjectData* JSObjectData::elements() const {
+ CHECK(serialized_elements_);
+ return elements_;
+}
void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
@@ -2086,14 +2241,6 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
}
}
-ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const {
- DescriptorArrayData* descriptor_array =
- instance_descriptors()->AsDescriptorArray();
- auto data = descriptor_array->contents().find(descriptor_index.as_int());
- if (data == descriptor_array->contents().end()) return nullptr;
- return data->second.value;
-}
-
void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
InternalIndex descriptor_index) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
@@ -2104,42 +2251,11 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
broker->GetOrCreateData(map->instance_descriptors(kRelaxedLoad));
}
- ZoneMap<int, PropertyDescriptor>& contents =
- instance_descriptors()->AsDescriptorArray()->contents();
- CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
- if (contents.find(descriptor_index.as_int()) != contents.end()) return;
-
- Isolate* const isolate = broker->isolate();
- auto descriptors =
- Handle<DescriptorArray>::cast(instance_descriptors()->object());
- CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
-
- PropertyDescriptor d;
- d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
- MaybeObject value = descriptors->GetValue(descriptor_index);
- HeapObject obj;
- if (value.GetHeapObjectIfStrong(&obj)) {
- d.value = broker->GetOrCreateData(obj);
- }
- d.details = descriptors->GetDetails(descriptor_index);
- if (d.details.location() == kField) {
- d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
- d.field_owner =
- broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
- d.field_type =
- broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
- d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
- }
- contents[descriptor_index.as_int()] = d;
-
- if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
- // Recurse on the owner map.
- d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
+ if (!instance_descriptors()->should_access_heap()) {
+ DescriptorArrayData* descriptors =
+ instance_descriptors()->AsDescriptorArray();
+ descriptors->SerializeDescriptor(broker, map, descriptor_index);
}
-
- TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
- << instance_descriptors() << " ("
- << contents.size() << " total)");
}
void MapData::SerializeRootMap(JSHeapBroker* broker) {
@@ -2191,6 +2307,8 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
}
DCHECK_NULL(elements_);
+ DCHECK(!serialized_elements_);
+ serialized_elements_ = true;
elements_ = broker->GetOrCreateData(elements_object);
DCHECK(elements_->IsFixedArrayBase());
@@ -2238,33 +2356,24 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
// this field.
DCHECK_EQ(field_index.property_index(),
static_cast<int>(inobject_fields_.size()));
- if (boilerplate->IsUnboxedDoubleField(field_index)) {
- uint64_t value_bits =
- boilerplate->RawFastDoublePropertyAsBitsAt(field_index);
- inobject_fields_.push_back(JSObjectField{value_bits});
- } else {
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
- isolate);
- // In case of double fields we use a sentinel NaN value to mark
- // uninitialized fields. A boilerplate value with such a field may migrate
- // from its double to a tagged representation. If the double is unboxed,
- // the raw double is converted to a heap number, otherwise the (boxed)
- // double ceases to be mutable, and becomes a normal heap number. The
- // sentinel value carries no special meaning when it occurs in a heap
- // number, so we would like to recover the uninitialized value. We check
- // for the sentinel here, specifically, since migrations might have been
- // triggered as part of boilerplate serialization.
- if (!details.representation().IsDouble() && value->IsHeapNumber() &&
- HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
- value = isolate->factory()->uninitialized_value();
- }
- ObjectData* value_data = broker->GetOrCreateData(value);
- if (value_data->IsJSObject() && !value_data->should_access_heap()) {
- value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
- depth - 1);
- }
- inobject_fields_.push_back(JSObjectField{value_data});
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ // In case of double fields we use a sentinel NaN value to mark
+ // uninitialized fields. A boilerplate value with such a field may migrate
+ // from its double to a tagged representation. The sentinel value carries
+ // no special meaning when it occurs in a heap number, so we would like to
+ // recover the uninitialized value. We check for the sentinel here,
+ // specifically, since migrations might have been triggered as part of
+ // boilerplate serialization.
+ if (!details.representation().IsDouble() && value->IsHeapNumber() &&
+ HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
+ value = isolate->factory()->uninitialized_value();
}
+ ObjectData* value_data = broker->GetOrCreateData(value);
+ if (value_data->IsJSObject() && !value_data->should_access_heap()) {
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
+ }
+ inobject_fields_.push_back(value_data);
}
TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
@@ -2272,24 +2381,22 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
map()->AsMap()->SerializeOwnDescriptors(broker);
}
- if (IsJSArray()) AsJSArray()->Serialize(broker);
+ if (IsJSArray() && !FLAG_turbo_direct_heap_access) {
+ AsJSArray()->Serialize(broker);
+ }
}
-void JSRegExpData::SerializeAsRegExpBoilerplate(JSHeapBroker* broker) {
- if (serialized_as_reg_exp_boilerplate_) return;
- serialized_as_reg_exp_boilerplate_ = true;
-
- TraceScope tracer(broker, this, "JSRegExpData::SerializeAsRegExpBoilerplate");
- Handle<JSRegExp> boilerplate = Handle<JSRegExp>::cast(object());
+void RegExpBoilerplateDescriptionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return; // Only serialize once.
+ serialized_ = true;
- SerializeElements(broker);
+ TraceScope tracer(broker, this,
+ "RegExpBoilerplateDescriptionData::Serialize");
+ auto boilerplate = Handle<RegExpBoilerplateDescription>::cast(object());
- raw_properties_or_hash_ =
- broker->GetOrCreateData(boilerplate->raw_properties_or_hash());
data_ = broker->GetOrCreateData(boilerplate->data());
source_ = broker->GetOrCreateData(boilerplate->source());
- flags_ = broker->GetOrCreateData(boilerplate->flags());
- last_index_ = broker->GetOrCreateData(boilerplate->last_index());
+ flags_ = boilerplate->flags();
}
#ifdef DEBUG
@@ -2374,6 +2481,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
+ is_isolate_bootstrapping_(isolate->bootstrapper()->IsActive()),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
@@ -2427,6 +2535,12 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
DCHECK_NOT_NULL(local_isolate_);
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
+
+ if (FLAG_turbo_direct_heap_access) {
+ // Ensure any serialization that happens on the background has been
+ // performed.
+ target_native_context().SerializeOnBackground();
+ }
}
void JSHeapBroker::DetachLocalIsolate(OptimizedCompilationInfo* info) {
@@ -2547,7 +2661,8 @@ void JSHeapBroker::SetTargetNativeContextRef(
(mode() == kSerializing &&
target_native_context_->object().equals(native_context) &&
target_native_context_->data_->kind() == kUnserializedHeapObject));
- target_native_context_ = NativeContextRef(this, native_context);
+ target_native_context_ =
+ NativeContextRef(this, CanonicalPersistentHandle(*native_context));
}
void JSHeapBroker::CollectArrayAndObjectPrototypes() {
@@ -2637,18 +2752,21 @@ void JSHeapBroker::InitializeAndStartSerializing(
// Throw away the dummy data that we created while disabled.
refs_->Clear();
- refs_ = nullptr;
-
refs_ =
zone()->New<RefsMap>(kInitialRefsBucketCount, AddressMatcher(), zone());
SetTargetNativeContextRef(native_context);
target_native_context().Serialize();
+ if (!FLAG_turbo_direct_heap_access) {
+ // Perform full native context serialization now if we can't do it later on
+ // the background thread.
+ target_native_context().SerializeOnBackground();
+ }
CollectArrayAndObjectPrototypes();
Factory* const f = isolate()->factory();
- {
+ if (!FLAG_turbo_direct_heap_access) {
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
@@ -2676,56 +2794,125 @@ void JSHeapBroker::InitializeAndStartSerializing(
TRACE(this, "Finished serializing standard objects");
}
+ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object,
+ ObjectRef::BackgroundSerialization background_serialization) {
+ ObjectData* return_value =
+ TryGetOrCreateData(object, true, background_serialization);
+ DCHECK_NOT_NULL(return_value);
+ return return_value;
+}
+
// clang-format off
-ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
- RefsMap::Entry* entry = refs_->LookupOrInsert(object.address());
- ObjectData* object_data = entry->value;
-
- if (object_data == nullptr) {
- ObjectData** data_storage = &(entry->value);
- // TODO(neis): Remove these Allow* once we serialize everything upfront.
- AllowHandleDereference handle_dereference;
- if (object->IsSmi()) {
- object_data = zone()->New<ObjectData>(this, data_storage, object, kSmi);
- } else if (IsReadOnlyHeapObject(*object)) {
- object_data = zone()->New<ObjectData>(this, data_storage, object,
- kUnserializedReadOnlyHeapObject);
+ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
+ bool crash_on_error,
+ ObjectRef::BackgroundSerialization background_serialization) {
+ RefsMap::Entry* entry = refs_->Lookup(object.address());
+ if (entry != nullptr) return entry->value;
+
+ if (mode() == JSHeapBroker::kDisabled) {
+ entry = refs_->LookupOrInsert(object.address());
+ ObjectData** storage = &(entry->value);
+ if (*storage == nullptr) {
+ entry->value = zone()->New<ObjectData>(
+ this, storage, object,
+ object->IsSmi() ? kSmi : kUnserializedHeapObject);
+ }
+ return *storage;
+ }
+
+ CHECK(mode() == JSHeapBroker::kSerializing ||
+ mode() == JSHeapBroker::kSerialized);
+
+ ObjectData* object_data;
+ if (object->IsSmi()) {
+ entry = refs_->LookupOrInsert(object.address());
+ object_data = zone()->New<ObjectData>(this, &(entry->value), object, kSmi);
+ } else if (IsReadOnlyHeapObject(*object)) {
+ entry = refs_->LookupOrInsert(object.address());
+ object_data = zone()->New<ObjectData>(this, &(entry->value), object,
+ kUnserializedReadOnlyHeapObject);
// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the
// FLAG_turbo_direct_heap_access.
-#define CREATE_DATA_FOR_DIRECT_READ(name) \
- } else if (object->Is##name()) { \
- if (FLAG_turbo_direct_heap_access) { \
- object_data = zone()->New<ObjectData>( \
- this, data_storage, object, kNeverSerializedHeapObject); \
- } else { \
- CHECK_EQ(mode(), kSerializing); \
- AllowHandleAllocation handle_allocation; \
- object_data = zone()->New<name##Data>(this, data_storage, \
- Handle<name>::cast(object)); \
- }
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
+#define CREATE_DATA_FOR_DIRECT_READ(name) \
+ } else if (object->Is##name()) { \
+ if (FLAG_turbo_direct_heap_access) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<ObjectData>( \
+ this, &(entry->value), object, kNeverSerializedHeapObject); \
+ } else if (mode() == kSerializing) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object)); \
+ } else { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
+ }
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
#undef CREATE_DATA_FOR_DIRECT_READ
-#define CREATE_DATA_FOR_SERIALIZATION(name) \
- } else if (object->Is##name()) { \
- CHECK_EQ(mode(), kSerializing); \
- AllowHandleAllocation handle_allocation; \
- object_data = zone()->New<name##Data>(this, data_storage, \
- Handle<name>::cast(object));
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
+#define CREATE_DATA_FOR_POSSIBLE_SERIALIZATION(name) \
+ } else if (object->Is##name()) { \
+ if (mode() == kSerialized && \
+ background_serialization != \
+ ObjectRef::BackgroundSerialization::kAllowed) { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
+ } \
+ entry = refs_->LookupOrInsert(object.address()); \
+ ObjectDataKind kind = (background_serialization == \
+ ObjectRef::BackgroundSerialization::kAllowed) \
+ ? kBackgroundSerializedHeapObject \
+ : kSerializedHeapObject; \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object), \
+ kind);
+ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(
+ CREATE_DATA_FOR_POSSIBLE_SERIALIZATION)
+#undef CREATE_DATA_FOR_POSSIBLE_SERIALIZATION
+#define CREATE_DATA_FOR_BACKGROUND_SERIALIZATION(name) \
+ } else if (object->Is##name()) { \
+ if (FLAG_turbo_direct_heap_access) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>( \
+ this, &(entry->value), Handle<name>::cast(object), \
+ kBackgroundSerializedHeapObject); \
+ } else if (mode() == kSerializing) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object)); \
+ } else { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
+ }
+ HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(
+ CREATE_DATA_FOR_BACKGROUND_SERIALIZATION)
#undef CREATE_DATA_FOR_SERIALIZATION
- } else {
- UNREACHABLE();
+#define CREATE_DATA_FOR_SERIALIZATION(name) \
+ } else if (object->Is##name()) { \
+ if (mode() == kSerializing) { \
+ entry = refs_->LookupOrInsert(object.address()); \
+ object_data = zone()->New<name##Data>(this, &(entry->value), \
+ Handle<name>::cast(object)); \
+ } else { \
+ CHECK(!crash_on_error); \
+ return nullptr; \
}
- // At this point the entry pointer is not guaranteed to be valid as
- // the refs_ hash hable could be resized by one of the constructors above.
- DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
+#undef CREATE_DATA_FOR_SERIALIZATION
+ } else {
+ UNREACHABLE();
}
+ // At this point the entry pointer is not guaranteed to be valid as
+ // the refs_ hash hable could be resized by one of the constructors above.
+ DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
return object_data;
}
// clang-format on
-ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
- return GetOrCreateData(CanonicalPersistentHandle(object));
+ObjectData* JSHeapBroker::GetOrCreateData(
+ Object object,
+ ObjectRef::BackgroundSerialization background_serialization) {
+ return GetOrCreateData(CanonicalPersistentHandle(object),
+ background_serialization);
}
#define DEFINE_IS_AND_AS(Name) \
@@ -2735,9 +2922,24 @@ ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
return Name##Ref(broker(), data()); \
}
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
+bool JSHeapBroker::StackHasOverflowed() const {
+ DCHECK_IMPLIES(local_isolate_ == nullptr,
+ ThreadId::Current() == isolate_->thread_id());
+ return (local_isolate_ != nullptr)
+ ? StackLimitCheck::HasOverflowed(local_isolate_)
+ : StackLimitCheck(isolate_).HasOverflowed();
+}
+
+bool JSHeapBroker::ObjectMayBeUninitialized(Handle<Object> object) const {
+ return !IsMainThread() && object->IsHeapObject() &&
+ isolate()->heap()->IsPendingAllocation(HeapObject::cast(*object));
+}
+
bool ObjectRef::IsSmi() const { return data()->is_smi(); }
int ObjectRef::AsSmi() const {
@@ -2900,7 +3102,13 @@ OddballType MapRef::oddball_type() const {
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
if (data_->should_access_heap()) {
- return FeedbackCellRef(broker(), object()->GetClosureFeedbackCell(index));
+ FeedbackCell cell = object()->closure_feedback_cell(index);
+ Handle<FeedbackCell> cell_handle =
+ broker()->CanonicalPersistentHandle(cell);
+ // These should all be available because we request the cell for each
+ // CreateClosure bytecode.
+ ObjectData* cell_data = broker()->GetOrCreateData(cell_handle);
+ return FeedbackCellRef(broker(), cell_data);
}
return FeedbackCellRef(
@@ -2908,24 +3116,6 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
}
-double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
- if (data_->should_access_heap()) {
- return object()->RawFastDoublePropertyAt(index);
- }
- JSObjectData* object_data = data()->AsJSObject();
- CHECK(index.is_inobject());
- return object_data->GetInobjectField(index.property_index()).AsDouble();
-}
-
-uint64_t JSObjectRef::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
- if (data_->should_access_heap()) {
- return object()->RawFastDoublePropertyAsBitsAt(index);
- }
- JSObjectData* object_data = data()->AsJSObject();
- CHECK(index.is_inobject());
- return object_data->GetInobjectField(index.property_index()).AsBitsOfDouble();
-}
-
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
if (data_->should_access_heap()) {
return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
@@ -2933,9 +3123,8 @@ ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
- return ObjectRef(
- broker(),
- object_data->GetInobjectField(index.property_index()).AsObject());
+ return ObjectRef(broker(),
+ object_data->GetInobjectField(index.property_index()));
}
bool AllocationSiteRef::IsFastLiteral() const {
@@ -2961,7 +3150,7 @@ void JSObjectRef::SerializeElements() {
void JSObjectRef::EnsureElementsTenured() {
if (data_->should_access_heap()) {
- Handle<FixedArrayBase> object_elements = elements().object();
+ Handle<FixedArrayBase> object_elements = elements().value().object();
if (ObjectInYoungGeneration(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
@@ -2977,12 +3166,13 @@ void JSObjectRef::EnsureElementsTenured() {
}
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) {
return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->contents().at(descriptor_index.as_int()).field_index;
+ return descriptors->GetFieldIndexFor(descriptor_index);
}
int MapRef::GetInObjectPropertyOffset(int i) const {
@@ -2994,27 +3184,13 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
PropertyDetails MapRef::GetPropertyDetails(
InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return object()
- ->instance_descriptors(kRelaxedLoad)
- .GetDetails(descriptor_index);
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->contents().at(descriptor_index.as_int()).details;
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetPropertyDetails(descriptor_index);
}
NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return NameRef(broker(), broker()->CanonicalPersistentHandle(
- object()
- ->instance_descriptors(kRelaxedLoad)
- .GetKey(descriptor_index)));
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return NameRef(broker(),
- descriptors->contents().at(descriptor_index.as_int()).key);
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetPropertyKey(descriptor_index);
}
bool MapRef::IsFixedCowArrayMap() const {
@@ -3028,20 +3204,21 @@ bool MapRef::IsPrimitiveMap() const {
}
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- Handle<Map> owner(
- object()->FindFieldOwner(broker()->isolate(), descriptor_index),
- broker()->isolate());
- return MapRef(broker(), owner);
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // TODO(solanes, v8:7790): Consider caching the result of the field owner on
+ // the descriptor array. It would be useful for same map as well as any
+ // other map sharing that descriptor array.
+ return MapRef(broker(), broker()->GetOrCreateData(object()->FindFieldOwner(
+ broker()->isolate(), descriptor_index)));
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return MapRef(
- broker(),
- descriptors->contents().at(descriptor_index.as_int()).field_owner);
+ return MapRef(broker(), descriptors->FindFieldOwner(descriptor_index));
}
ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) {
Handle<FieldType> field_type(object()
->instance_descriptors(kRelaxedLoad)
@@ -3051,21 +3228,22 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
}
DescriptorArrayData* descriptors =
data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return ObjectRef(
- broker(),
- descriptors->contents().at(descriptor_index.as_int()).field_type);
+ return ObjectRef(broker(), descriptors->GetFieldType(descriptor_index));
}
-bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
+base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
+ uint32_t index, SerializationPolicy policy) const {
if (data_->should_access_heap()) {
- return object()->IsUnboxedDoubleField(
- FieldIndex::ForDescriptor(*object(), descriptor_index));
+ // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optimization for
+ // concurrent inlining when we have the infrastructure to safely do so.
+ if (broker()->is_concurrent_inlining()) return base::nullopt;
+ CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
+ return GetOwnElementFromHeap(broker(), object(), index, true);
}
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return descriptors->contents()
- .at(descriptor_index.as_int())
- .is_unboxed_double_field;
+ ObjectData* element =
+ data()->AsString()->GetCharAsStringOrUndefined(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
}
base::Optional<int> StringRef::length() const {
@@ -3142,40 +3320,6 @@ Float64 FixedDoubleArrayRef::get(int i) const {
}
}
-uint8_t BytecodeArrayRef::get(int index) const { return object()->get(index); }
-
-Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
- return object()->GetFirstBytecodeAddress();
-}
-
-Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
- if (data_->should_access_heap()) {
- return broker()->CanonicalPersistentHandle(
- object()->constant_pool().get(index));
- }
- return data()->AsBytecodeArray()->GetConstantAtIndex(index,
- broker()->isolate());
-}
-
-bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
- if (data_->should_access_heap()) {
- return object()->constant_pool().get(index).IsSmi();
- }
- return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
-}
-
-Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
- if (data_->should_access_heap()) {
- return Smi::cast(object()->constant_pool().get(index));
- }
- return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
-}
-
-void BytecodeArrayRef::SerializeForCompilation() {
- if (data_->should_access_heap()) return;
- data()->AsBytecodeArray()->SerializeForCompilation(broker());
-}
-
Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
}
@@ -3273,8 +3417,6 @@ BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR_C(HeapNumber, double, value)
-BIMODAL_ACCESSOR(JSArray, Object, length)
-
BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function)
BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
@@ -3284,7 +3426,6 @@ BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
-BIMODAL_ACCESSOR_C(JSFunction, bool, HasAttachedOptimizedCode)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
@@ -3293,14 +3434,9 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
-BIMODAL_ACCESSOR(JSFunction, Code, code)
BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
-BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
-BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
-BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
-
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
@@ -3329,11 +3465,9 @@ BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR(Map, HeapObject, prototype)
BIMODAL_ACCESSOR_C(Map, InstanceType, instance_type)
BIMODAL_ACCESSOR(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR(Map, HeapObject, GetBackPointer)
+BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
-BIMODAL_ACCESSOR_C(Code, unsigned, inlined_bytecode_size)
-
#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
BIMODAL_ACCESSOR(NativeContext, type, name)
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
@@ -3344,6 +3478,10 @@ BIMODAL_ACCESSOR_C(ObjectBoilerplateDescription, int, size)
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
+BIMODAL_ACCESSOR(RegExpBoilerplateDescription, FixedArray, data)
+BIMODAL_ACCESSOR(RegExpBoilerplateDescription, String, source)
+BIMODAL_ACCESSOR_C(RegExpBoilerplateDescription, int, flags)
+
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
return CallHandlerInfoRef(broker(), broker()->CanonicalPersistentHandle(
@@ -3474,24 +3612,39 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
return ObjectRef ::data()->AsSharedFunctionInfo()->GetInlineability();
}
-BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
-
-base::Optional<ObjectRef> MapRef::GetStrongValue(
- InternalIndex descriptor_index) const {
+base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
if (data_->should_access_heap()) {
- MaybeObject value =
- object()->instance_descriptors(kRelaxedLoad).GetValue(descriptor_index);
- HeapObject object;
- if (value.GetHeapObjectIfStrong(&object)) {
- return ObjectRef(broker(), broker()->CanonicalPersistentHandle((object)));
+ // Note that we use the synchronized accessor.
+ Object value = object()->value(kAcquireLoad);
+ if (!value.IsFeedbackVector()) return base::nullopt;
+ auto vector_handle = broker()->CanonicalPersistentHandle(value);
+ ObjectData* vector = broker()->TryGetOrCreateData(vector_handle);
+ if (vector) {
+ return FeedbackVectorRef(broker(), vector);
}
+ TRACE_BROKER_MISSING(
+ broker(),
+ "Unable to retrieve FeedbackVector from FeedbackCellRef " << *this);
return base::nullopt;
}
- ObjectData* value = data()->AsMap()->GetStrongValue(descriptor_index);
- if (!value) {
- return base::nullopt;
+ ObjectData* vector = ObjectRef::data()->AsFeedbackCell()->value();
+ return FeedbackVectorRef(broker(), vector->AsFeedbackVector());
+}
+
+base::Optional<ObjectRef> MapRef::GetStrongValue(
+ InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetStrongValue(descriptor_index);
+}
+
+DescriptorArrayRef MapRef::instance_descriptors() const {
+ if (data_->should_access_heap()) {
+ return DescriptorArrayRef(
+ broker(), broker()->CanonicalPersistentHandle(
+ object()->instance_descriptors(kRelaxedLoad)));
}
- return ObjectRef(broker(), value);
+
+ return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors());
}
void MapRef::SerializeRootMap() {
@@ -3513,8 +3666,51 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
return base::nullopt;
}
+bool JSTypedArrayRef::is_on_heap() const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - host object seen by serializer.
+ // - underlying field written 1. during initialization or 2. with
+ // release-store.
+ return object()->is_on_heap(kAcquireLoad);
+ }
+ return data()->AsJSTypedArray()->data_ptr();
+}
+
+size_t JSTypedArrayRef::length() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - immutable after initialization.
+ // - host object seen by serializer.
+ return object()->length();
+ }
+ return data()->AsJSTypedArray()->length();
+}
+
+HeapObjectRef JSTypedArrayRef::buffer() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - immutable after initialization.
+ // - host object seen by serializer.
+ Handle<JSArrayBuffer> value =
+ broker()->CanonicalPersistentHandle(object()->buffer());
+ return JSObjectRef{broker(), value};
+ }
+ return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+}
+
void* JSTypedArrayRef::data_ptr() const {
- if (data_->should_access_heap()) {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Safe to read concurrently because:
+ // - host object seen by serializer.
+ // - underlying field written 1. during initialization or 2. protected by
+ // the is_on_heap release/acquire semantics (external_pointer store
+ // happens-before base_pointer store, and this external_pointer load
+ // happens-after base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
return object()->DataPtr();
}
return data()->AsJSTypedArray()->data_ptr();
@@ -3707,23 +3903,14 @@ Maybe<double> ObjectRef::OddballToNumber() const {
}
}
-base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
+base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
- if (!(IsJSObject() || IsString())) return base::nullopt;
if (data_->should_access_heap()) {
- // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optmization for
- // concurrent inlining when we have the infrastructure to safely do so.
- if (broker()->is_concurrent_inlining() && IsString()) return base::nullopt;
CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
return GetOwnElementFromHeap(broker(), object(), index, true);
}
- ObjectData* element = nullptr;
- if (IsJSObject()) {
- element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- } else if (IsString()) {
- element = data()->AsString()->GetCharAsString(broker(), index, policy);
- }
+ ObjectData* element =
+ data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
if (element == nullptr) return base::nullopt;
return ObjectRef(broker(), element);
}
@@ -3742,25 +3929,81 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty(
return ObjectRef(broker(), property);
}
-base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- uint32_t index, SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- if (!object()->elements().IsCowArray()) return base::nullopt;
- return GetOwnElementFromHeap(broker(), object(), index, false);
- }
+ObjectRef JSArrayRef::GetBoilerplateLength() const {
+ // Safe to read concurrently because:
+ // - boilerplates are immutable after initialization.
+ // - boilerplates are published into the feedback vector.
+ return length_unsafe();
+}
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- data()->AsJSObject()->SerializeElements(broker());
- } else if (!data()->AsJSObject()->serialized_elements()) {
- TRACE(broker(), "'elements' on " << this);
- return base::nullopt;
+ObjectRef JSArrayRef::length_unsafe() const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ Object o = object()->length(broker()->isolate(), kRelaxedLoad);
+ return ObjectRef{broker(), broker()->CanonicalPersistentHandle(o)};
+ } else {
+ return ObjectRef{broker(), data()->AsJSArray()->length()};
}
- if (!elements().map().IsFixedCowArrayMap()) return base::nullopt;
+}
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
+base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
+ FixedArrayBaseRef elements_ref, uint32_t index,
+ SerializationPolicy policy) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // `elements` are currently still serialized as members of JSObjectRef.
+ // TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once
+ // JSObject is no longer serialized.
+ static_assert(std::is_base_of<JSObject, JSArray>::value, "");
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
+
+ // The elements_ref is passed in by callers to make explicit that it is
+ // also used outside of this function, and must match the `elements` used
+ // inside this function.
+ DCHECK(elements_ref.equals(elements().value()));
+
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. But consistency is guaranteed at runtime due to the
+ // `elements` equality check in the caller.
+ ElementsKind elements_kind = GetElementsKind();
+
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
+
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // equality check in the caller thus also guards the value of `length`.
+ ObjectRef length_ref = length_unsafe();
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref.IsSmi()) return {};
+
+ base::Optional<Object> result =
+ ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(),
+ elements_kind, length_ref.AsSmi(), index);
+
+ if (!result.has_value()) return {};
+
+ return ObjectRef{broker(),
+ broker()->CanonicalPersistentHandle(result.value())};
+ } else {
+ DCHECK(!data_->should_access_heap());
+ DCHECK(!FLAG_turbo_direct_heap_access);
+
+ // Just to clarify that `elements_ref` is not used on this path.
+ // GetOwnElement accesses the serialized `elements` field on its own.
+ USE(elements_ref);
+
+ if (!elements().value().map().IsFixedCowArrayMap()) return base::nullopt;
+
+ ObjectData* element =
+ data()->AsJSArray()->GetOwnElement(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
+ }
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
@@ -3784,30 +4027,12 @@ ObjectRef SourceTextModuleRef::import_meta() const {
}
ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+ BackgroundSerialization background_serialization,
bool check_type)
: broker_(broker) {
- switch (broker->mode()) {
- // We may have to create data in JSHeapBroker::kSerialized as well since we
- // read the data from read only heap objects directly instead of serializing
- // them.
- case JSHeapBroker::kSerialized:
- case JSHeapBroker::kSerializing:
- data_ = broker->GetOrCreateData(object);
- break;
- case JSHeapBroker::kDisabled: {
- RefsMap::Entry* entry = broker->refs_->LookupOrInsert(object.address());
- ObjectData** storage = &(entry->value);
- if (*storage == nullptr) {
- entry->value = broker->zone()->New<ObjectData>(
- broker, storage, object,
- object->IsSmi() ? kSmi : kUnserializedHeapObject);
- }
- data_ = *storage;
- break;
- }
- case JSHeapBroker::kRetired:
- UNREACHABLE();
- }
+ CHECK_NE(broker->mode(), JSHeapBroker::kRetired);
+
+ data_ = broker->GetOrCreateData(object, background_serialization);
if (!data_) { // TODO(mslekova): Remove once we're on the background thread.
object->Print();
}
@@ -3873,12 +4098,17 @@ ElementsKind JSObjectRef::GetElementsKind() const {
return map().elements_kind();
}
-FixedArrayBaseRef JSObjectRef::elements() const {
+base::Optional<FixedArrayBaseRef> JSObjectRef::elements() const {
if (data_->should_access_heap()) {
return FixedArrayBaseRef(
broker(), broker()->CanonicalPersistentHandle(object()->elements()));
}
- return FixedArrayBaseRef(broker(), data()->AsJSObject()->elements());
+ const JSObjectData* d = data()->AsJSObject();
+ if (!d->serialized_elements()) {
+ TRACE(broker(), "'elements' on " << this);
+ return base::nullopt;
+ }
+ return FixedArrayBaseRef(broker(), d->elements());
}
int FixedArrayBaseRef::length() const {
@@ -3897,12 +4127,59 @@ Float64 FixedDoubleArrayData::Get(int i) const {
return contents_[i];
}
+PropertyDetails DescriptorArrayRef::GetPropertyDetails(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ return object()->GetDetails(descriptor_index);
+ }
+ return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index);
+}
+
+NameRef DescriptorArrayRef::GetPropertyKey(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ NameRef result(broker(), broker()->CanonicalPersistentHandle(
+ object()->GetKey(descriptor_index)));
+ CHECK(result.IsUniqueName());
+ return result;
+ }
+ return NameRef(broker(),
+ data()->AsDescriptorArray()->GetPropertyKey(descriptor_index));
+}
+
+base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ HeapObject heap_object;
+ if (object()
+ ->GetValue(descriptor_index)
+ .GetHeapObjectIfStrong(&heap_object)) {
+ // Since the descriptors in the descriptor array can be changed in-place
+ // via DescriptorArray::Replace, we might get a value that we haven't seen
+ // before.
+ ObjectData* data = broker()->TryGetOrCreateData(
+ broker()->CanonicalPersistentHandle(heap_object));
+ if (data) return ObjectRef(broker(), data);
+
+ TRACE_BROKER_MISSING(broker(), "strong value for descriptor array "
+ << *this << " at index "
+ << descriptor_index.as_int());
+ // Fall through to the base::nullopt below.
+ }
+ return base::nullopt;
+ }
+ ObjectData* value =
+ data()->AsDescriptorArray()->GetStrongValue(descriptor_index);
+ if (!value) return base::nullopt;
+ return ObjectRef(broker(), value);
+}
+
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- if (value().IsFeedbackVector()) {
- FeedbackVectorRef vector = value().AsFeedbackVector();
+ if (value()) {
+ FeedbackVectorRef vector = *value();
if (vector.serialized()) {
- return value().AsFeedbackVector().shared_function_info();
+ return vector.shared_function_info();
}
}
return base::nullopt;
@@ -3924,36 +4201,23 @@ bool NameRef::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-ObjectRef JSRegExpRef::data() const {
- IF_ACCESS_FROM_HEAP(Object, data);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->data());
-}
-
-ObjectRef JSRegExpRef::flags() const {
- IF_ACCESS_FROM_HEAP(Object, flags);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->flags());
-}
-
-ObjectRef JSRegExpRef::last_index() const {
- IF_ACCESS_FROM_HEAP(Object, last_index);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->last_index());
-}
-
-ObjectRef JSRegExpRef::raw_properties_or_hash() const {
- IF_ACCESS_FROM_HEAP(Object, raw_properties_or_hash);
- return ObjectRef(broker(),
- ObjectRef::data()->AsJSRegExp()->raw_properties_or_hash());
-}
-
-ObjectRef JSRegExpRef::source() const {
- IF_ACCESS_FROM_HEAP(Object, source);
- return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
-}
-
-void JSRegExpRef::SerializeAsRegExpBoilerplate() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker());
+void RegExpBoilerplateDescriptionRef::Serialize() {
+ if (data_->should_access_heap()) {
+ // Even if the regexp boilerplate object itself is no longer serialized,
+ // both `data` and `source` fields still are and thus we need to make sure
+ // to visit them.
+ // TODO(jgruber,v8:7790): Remove once these are no longer serialized types.
+ STATIC_ASSERT(IsSerializedHeapObject<FixedArray>());
+ FixedArrayRef data_ref{
+ broker(), broker()->CanonicalPersistentHandle(object()->data())};
+ STATIC_ASSERT(IsSerializedHeapObject<String>());
+ StringRef source_ref{
+ broker(), broker()->CanonicalPersistentHandle(object()->source())};
+ } else {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ HeapObjectRef::data()->AsRegExpBoilerplateDescription()->Serialize(
+ broker());
+ }
}
Handle<Object> ObjectRef::object() const {
@@ -3983,6 +4247,8 @@ Handle<Object> ObjectRef::object() const {
#endif // DEBUG
HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
+HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
+HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
@@ -4015,11 +4281,13 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
NativeContextData::NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object)
- : ContextData(broker, storage, object), function_maps_(broker->zone()) {}
+ : ContextData(broker, storage, object),
+ state_(State::kUnserialized),
+ function_maps_(broker->zone()) {}
void NativeContextData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+ if (state_ != State::kUnserialized) return;
+ state_ = State::kSerializedOnMainThread;
TraceScope tracer(broker, this, "NativeContextData::Serialize");
Handle<NativeContext> context = Handle<NativeContext>::cast(object());
@@ -4028,14 +4296,16 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(name##_); \
name##_ = broker->GetOrCreateData(context->name()); \
if (!name##_->should_access_heap()) { \
- if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker); \
if (name##_->IsMap() && \
!InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
name##_->AsMap()->SerializeConstructor(broker); \
} \
+ if (name##_->IsJSFunction()) { \
+ name##_->AsJSFunction()->Serialize(broker); \
+ } \
}
BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
- if (!broker->isolate()->bootstrapper()->IsActive()) {
+ if (!broker->is_isolate_bootstrapping()) {
BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
}
#undef SERIALIZE_MEMBER
@@ -4048,15 +4318,39 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
broker);
}
+ scope_info_ = broker->GetOrCreateData(context->scope_info());
+}
+
+void NativeContextData::SerializeOnBackground(JSHeapBroker* broker) {
+ if (state_ == State::kFullySerialized) return;
+ DCHECK_EQ(state_, State::kSerializedOnMainThread);
+ state_ = State::kSerializedOnMainThread;
+
+ UnparkedScopeIfNeeded unparked_scope(broker);
+ TraceScope tracer(broker, this, "NativeContextData::SerializeOnBackground");
+ Handle<NativeContext> context = Handle<NativeContext>::cast(object());
+
+ constexpr auto kAllowed = ObjectRef::BackgroundSerialization::kAllowed;
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name(), kAllowed); \
+ if (!name##_->should_access_heap()) { \
+ DCHECK(!name##_->IsJSFunction()); \
+ }
+ BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ if (!broker->is_isolate_bootstrapping()) {
+ BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ }
+#undef SERIALIZE_MEMBER
+
DCHECK(function_maps_.empty());
int const first = Context::FIRST_FUNCTION_MAP_INDEX;
int const last = Context::LAST_FUNCTION_MAP_INDEX;
function_maps_.reserve(last + 1 - first);
for (int i = first; i <= last; ++i) {
- function_maps_.push_back(broker->GetOrCreateData(context->get(i)));
+ function_maps_.push_back(
+ broker->GetOrCreateData(context->get(i), kAllowed));
}
-
- scope_info_ = broker->GetOrCreateData(context->scope_info());
}
void JSFunctionRef::Serialize() {
@@ -4065,6 +4359,12 @@ void JSFunctionRef::Serialize() {
data()->AsJSFunction()->Serialize(broker());
}
+void JSFunctionRef::SerializeCodeAndFeedback() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSFunction()->SerializeCodeAndFeedback(broker());
+}
+
bool JSBoundFunctionRef::serialized() const {
if (data_->should_access_heap()) return true;
return data()->AsJSBoundFunction()->serialized();
@@ -4075,6 +4375,20 @@ bool JSFunctionRef::serialized() const {
return data()->AsJSFunction()->serialized();
}
+bool JSFunctionRef::serialized_code_and_feedback() const {
+ if (data_->should_access_heap()) return true;
+ return data()->AsJSFunction()->serialized_code_and_feedback();
+}
+
+CodeRef JSFunctionRef::code() const {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ return CodeRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->code(kAcquireLoad)));
+ }
+
+ return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
+}
+
void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -4091,9 +4405,11 @@ base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (data_->should_access_heap()) {
if (object()->IsApiFunction()) {
- return FunctionTemplateInfoRef(
- broker(), broker()->CanonicalPersistentHandle(
- object()->function_data(kAcquireLoad)));
+ ObjectData* data =
+ broker()->TryGetOrCreateData(broker()->CanonicalPersistentHandle(
+ object()->function_data(kAcquireLoad)));
+ if (data == nullptr) return base::nullopt;
+ return FunctionTemplateInfoRef(broker(), data, true);
}
return base::nullopt;
}
@@ -4123,6 +4439,7 @@ void JSObjectRef::SerializeObjectCreateMap() {
}
void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
@@ -4133,10 +4450,10 @@ bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) return true;
ObjectData* maybe_desc_array_data = data()->AsMap()->instance_descriptors();
if (!maybe_desc_array_data) return false;
+ if (maybe_desc_array_data->should_access_heap()) return true;
DescriptorArrayData* desc_array_data =
maybe_desc_array_data->AsDescriptorArray();
- return desc_array_data->contents().find(descriptor_index.as_int()) !=
- desc_array_data->contents().end();
+ return desc_array_data->serialized_descriptor(descriptor_index);
}
void MapRef::SerializeBackPointer() {
@@ -4169,10 +4486,28 @@ void NativeContextRef::Serialize() {
data()->AsNativeContext()->Serialize(broker());
}
-void JSTypedArrayRef::Serialize() {
+void NativeContextRef::SerializeOnBackground() {
if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker());
+ CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
+ broker()->mode() == JSHeapBroker::kSerialized);
+ data()->AsNativeContext()->SerializeOnBackground(broker());
+}
+
+void JSTypedArrayRef::Serialize() {
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ // Even if the typed array object itself is no longer serialized (besides
+ // the JSObject parts), the `buffer` field still is and thus we need to
+ // make sure to visit it.
+ // TODO(jgruber,v8:7790): Remove once JSObject is no longer serialized.
+ static_assert(
+ std::is_base_of<JSObject, decltype(object()->buffer())>::value, "");
+ STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
+ JSObjectRef data_ref{
+ broker(), broker()->CanonicalPersistentHandle(object()->buffer())};
+ } else {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSTypedArray()->Serialize(broker());
+ }
}
bool JSTypedArrayRef::serialized() const {
@@ -4180,16 +4515,22 @@ bool JSTypedArrayRef::serialized() const {
return data()->AsJSTypedArray()->serialized();
}
+bool JSTypedArrayRef::ShouldHaveBeenSerialized() const {
+ if (FLAG_turbo_direct_heap_access) return false;
+ return ObjectRef::ShouldHaveBeenSerialized();
+}
+
bool JSBoundFunctionRef::Serialize() {
if (data_->should_access_heap()) return true;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
return data()->AsJSBoundFunction()->Serialize(broker());
}
-void PropertyCellRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsPropertyCell()->Serialize(broker());
+bool PropertyCellRef::Serialize() const {
+ if (data_->should_access_heap()) return true;
+ CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
+ broker()->mode() == JSHeapBroker::kSerialized);
+ return data()->AsPropertyCell()->Serialize(broker());
}
void FunctionTemplateInfoRef::SerializeCallCode() {
@@ -4285,7 +4626,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4330,6 +4670,8 @@ bool GlobalAccessFeedback::immutable() const {
base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
if (IsPropertyCell()) {
+ bool cell_serialized = property_cell().Serialize();
+ CHECK(cell_serialized); // Can't fail on the main thread.
return property_cell().value();
} else if (IsScriptContextSlot() && immutable()) {
return script_context().get(slot_index());
@@ -4652,7 +4994,10 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
// The wanted name belongs (or did belong) to a property on the global
// object and the feedback is the cell holding its value.
PropertyCellRef cell(this, Handle<PropertyCell>::cast(feedback_value));
- cell.Serialize();
+ ObjectRef(
+ this,
+ CanonicalPersistentHandle(
+ Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad)));
return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind());
}
@@ -4727,9 +5072,9 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
return NewInsufficientFeedback(nexus.kind());
}
- JSRegExpRef regexp(this, handle(object, isolate()));
- regexp.SerializeAsRegExpBoilerplate();
- return *zone()->New<RegExpLiteralFeedback>(regexp, nexus.kind());
+ RegExpBoilerplateDescriptionRef boilerplate(this, handle(object, isolate()));
+ boilerplate.Serialize();
+ return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
@@ -4753,6 +5098,12 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
base::Optional<HeapObjectRef> target_ref;
{
+ // TODO(mvstanton): this read has a special danger when done on the
+ // background thread, because the CallIC has a site in generated code
+ // where a JSFunction is installed in this slot without store ordering.
+ // Therefore, we will need to check {maybe_target} to ensure that it
+ // has been store ordered by the heap's mechanism for store-ordering
+ // batches of new objects.
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
if (maybe_target->GetHeapObject(&target_object)) {
@@ -5123,42 +5474,18 @@ TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const {
return *static_cast<TemplateObjectFeedback const*>(this);
}
-bool JSHeapBroker::StackHasOverflowed() const {
- DCHECK_IMPLIES(local_isolate_ == nullptr,
- ThreadId::Current() == isolate_->thread_id());
- return (local_isolate_ != nullptr)
- ? StackLimitCheck::HasOverflowed(local_isolate_)
- : StackLimitCheck(isolate_).HasOverflowed();
-}
-
-OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
- : array_(bytecode_array) {}
-
-int OffHeapBytecodeArray::length() const { return array_.length(); }
-
-int OffHeapBytecodeArray::parameter_count() const {
- return array_.parameter_count();
-}
-
-uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); }
-
-void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); }
-
-Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const {
- return array_.GetFirstBytecodeAddress();
-}
-
-Handle<Object> OffHeapBytecodeArray::GetConstantAtIndex(
- int index, Isolate* isolate) const {
- return array_.GetConstantAtIndex(index);
-}
-
-bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const {
- return array_.IsConstantAtIndexSmi(index);
-}
+unsigned CodeRef::GetInlinedBytecodeSize() const {
+ if (data_->should_access_heap()) {
+ unsigned value = object()->inlined_bytecode_size();
+ if (value > 0) {
+ // Don't report inlined bytecode size if the code object was already
+ // deoptimized.
+ value = object()->marked_for_deoptimization() ? 0 : value;
+ }
+ return value;
+ }
-Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const {
- return array_.GetConstantAtIndexAsSmi(index);
+ return ObjectRef::data()->AsCode()->inlined_bytecode_size();
}
#undef BIMODAL_ACCESSOR
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 97d54c39c1..cc86b1451c 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -104,6 +104,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
+ bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_native_context_independent() const {
return code_kind_ == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
@@ -148,9 +149,21 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Handle<Object> GetRootHandle(Object object);
// Never returns nullptr.
- ObjectData* GetOrCreateData(Handle<Object>);
+ ObjectData* GetOrCreateData(
+ Handle<Object>,
+ ObjectRef::BackgroundSerialization background_serialization =
+ ObjectRef::BackgroundSerialization::kDisallowed);
// Like the previous but wraps argument in handle first (for convenience).
- ObjectData* GetOrCreateData(Object);
+ ObjectData* GetOrCreateData(
+ Object, ObjectRef::BackgroundSerialization background_serialization =
+ ObjectRef::BackgroundSerialization::kDisallowed);
+
+ // Gets data only if we have it. However, thin wrappers will be created for
+ // smis, read-only objects and never-serialized objects.
+ ObjectData* TryGetOrCreateData(
+ Handle<Object>, bool crash_on_error = false,
+ ObjectRef::BackgroundSerialization background_serialization =
+ ObjectRef::BackgroundSerialization::kDisallowed);
// Check if {object} is any native context's %ArrayPrototype% or
// %ObjectPrototype%.
@@ -300,6 +313,16 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
friend class HeapObjectRef;
friend class ObjectRef;
friend class ObjectData;
+ friend class PropertyCellData;
+
+ bool IsMainThread() const {
+ return local_isolate() == nullptr || local_isolate()->is_main_thread();
+ }
+
+ // If this returns false, the object is guaranteed to be fully initialized and
+ // thus safe to read from a memory safety perspective. The converse does not
+ // necessarily hold.
+ bool ObjectMayBeUninitialized(Handle<Object> object) const;
bool CanUseFeedback(const FeedbackNexus& nexus) const;
const ProcessedFeedback& NewInsufficientFeedback(FeedbackSlotKind kind) const;
@@ -369,6 +392,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
+ bool const is_isolate_bootstrapping_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
@@ -443,23 +467,6 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
// compilation is finished.
bool CanInlineElementAccess(MapRef const& map);
-class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
- public:
- explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array);
-
- int length() const override;
- int parameter_count() const override;
- uint8_t get(int index) const override;
- void set(int index, uint8_t value) override;
- Address GetFirstBytecodeAddress() const override;
- Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const override;
- bool IsConstantAtIndexSmi(int index) const override;
- Smi GetConstantAtIndexAsSmi(int index) const override;
-
- private:
- BytecodeArrayRef array_;
-};
-
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 7ebc383ea5..3b45b9d82b 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -29,8 +29,10 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckClosure: {
FeedbackCellRef cell(broker(), FeedbackCellOf(node->op()));
- FeedbackVectorRef feedback_vector = cell.value().AsFeedbackVector();
- feedback_vector.Serialize();
+ base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
+ if (feedback_vector.has_value()) {
+ feedback_vector->Serialize();
+ }
break;
}
case IrOpcode::kHeapConstant: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index a09644ff9a..b38199bfff 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -44,7 +44,6 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
<< feedback_vector << " (missing data)");
return false;
}
-
TRACE("Considering " << shared << " for inlining with " << feedback_vector);
return true;
}
@@ -57,12 +56,13 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- if (!function.serialized()) {
+ if (!function.serialized() || !function.serialized_code_and_feedback()) {
TRACE_BROKER_MISSING(
broker, "data for " << function << " (cannot consider for inlining)");
TRACE("Cannot consider " << function << " for inlining (missing data)");
return false;
}
+
return CanConsiderForInlining(broker, function.shared(),
function.feedback_vector());
}
@@ -111,12 +111,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell(broker(), FeedbackCellOf(m.op()));
- SharedFunctionInfoRef shared_info =
- feedback_cell.shared_function_info().value();
+ SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
out.shared_info = shared_info;
- if (feedback_cell.value().IsFeedbackVector() &&
- CanConsiderForInlining(broker(), shared_info,
- feedback_cell.value().AsFeedbackVector())) {
+ if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
out.num_functions = 1;
@@ -129,9 +126,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
SharedFunctionInfoRef shared_info(broker(), p.shared_info());
out.shared_info = shared_info;
- if (feedback_cell.value().IsFeedbackVector() &&
- CanConsiderForInlining(broker(), shared_info,
- feedback_cell.value().AsFeedbackVector())) {
+ if (feedback_cell.value().has_value() &&
+ CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
out.bytecode[0] = shared_info.GetBytecodeArray();
}
out.num_functions = 1;
@@ -142,6 +138,12 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
Reduction JSInliningHeuristic::Reduce(Node* node) {
+ if (mode() == kWasmOnly) {
+ return (node->opcode() == IrOpcode::kJSWasmCall)
+ ? inliner_.ReduceJSWasmCall(node)
+ : NoChange();
+ }
+ DCHECK_EQ(mode(), kJSOnly);
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute) {
@@ -165,8 +167,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
bool can_inline_candidate = false, candidate_is_small = true;
candidate.total_size = 0;
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
+ FrameStateInfo const& frame_info = frame_state.frame_state_info();
Handle<SharedFunctionInfo> frame_shared_info;
for (int i = 0; i < candidate.num_functions; ++i) {
if (!candidate.bytecode[i].has_value()) {
@@ -202,10 +204,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
unsigned inlined_bytecode_size = 0;
if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value();
- if (function.HasAttachedOptimizedCode()) {
- inlined_bytecode_size = function.code().inlined_bytecode_size();
- candidate.total_size += inlined_bytecode_size;
- }
+ inlined_bytecode_size = function.code().GetInlinedBytecodeSize();
+ candidate.total_size += inlined_bytecode_size;
}
candidate_is_small = candidate_is_small &&
IsSmall(bytecode.length() + inlined_bytecode_size);
@@ -335,19 +335,18 @@ Node* JSInliningHeuristic::DuplicateStateValuesAndRename(Node* state_values,
namespace {
-bool CollectFrameStateUniqueUses(Node* node, Node* frame_state,
+bool CollectFrameStateUniqueUses(Node* node, FrameState frame_state,
NodeAndIndex* uses_buffer, size_t* use_count,
size_t max_uses) {
// Only accumulate states that are not shared with other users.
if (frame_state->UseCount() > 1) return true;
- if (frame_state->InputAt(kFrameStateStackInput) == node) {
+ if (frame_state.stack() == node) {
if (*use_count >= max_uses) return false;
- uses_buffer[*use_count] = {frame_state, kFrameStateStackInput};
+ uses_buffer[*use_count] = {frame_state, FrameState::kFrameStateStackInput};
(*use_count)++;
}
- if (!CollectStateValuesOwnedUses(node,
- frame_state->InputAt(kFrameStateLocalsInput),
- uses_buffer, use_count, max_uses)) {
+ if (!CollectStateValuesOwnedUses(node, frame_state.locals(), uses_buffer,
+ use_count, max_uses)) {
return false;
}
return true;
@@ -355,28 +354,28 @@ bool CollectFrameStateUniqueUses(Node* node, Node* frame_state,
} // namespace
-Node* JSInliningHeuristic::DuplicateFrameStateAndRename(Node* frame_state,
- Node* from, Node* to,
- StateCloneMode mode) {
+FrameState JSInliningHeuristic::DuplicateFrameStateAndRename(
+ FrameState frame_state, Node* from, Node* to, StateCloneMode mode) {
// Only rename in states that are not shared with other users. This needs to
// be in sync with the condition in {DuplicateFrameStateAndRename}.
if (frame_state->UseCount() > 1) return frame_state;
- Node* copy = mode == kChangeInPlace ? frame_state : nullptr;
- if (frame_state->InputAt(kFrameStateStackInput) == from) {
+ Node* copy =
+ mode == kChangeInPlace ? static_cast<Node*>(frame_state) : nullptr;
+ if (frame_state.stack() == from) {
if (!copy) {
copy = graph()->CloneNode(frame_state);
}
- copy->ReplaceInput(kFrameStateStackInput, to);
+ copy->ReplaceInput(FrameState::kFrameStateStackInput, to);
}
- Node* locals = frame_state->InputAt(kFrameStateLocalsInput);
+ Node* locals = frame_state.locals();
Node* new_locals = DuplicateStateValuesAndRename(locals, from, to, mode);
if (new_locals != locals) {
if (!copy) {
copy = graph()->CloneNode(frame_state);
}
- copy->ReplaceInput(kFrameStateLocalsInput, new_locals);
+ copy->ReplaceInput(FrameState::kFrameStateLocalsInput, new_locals);
}
- return copy ? copy : frame_state;
+ return copy != nullptr ? FrameState{copy} : frame_state;
}
bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
@@ -538,14 +537,15 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
Node* checkpoint_state = nullptr;
if (checkpoint) {
checkpoint_state = checkpoint->InputAt(0);
- if (!CollectFrameStateUniqueUses(callee, checkpoint_state, replaceable_uses,
- &replaceable_uses_count, kMaxUses)) {
+ if (!CollectFrameStateUniqueUses(callee, FrameState{checkpoint_state},
+ replaceable_uses, &replaceable_uses_count,
+ kMaxUses)) {
return false;
}
}
// Collect the uses to check case 3.
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
if (!CollectFrameStateUniqueUses(callee, frame_state, replaceable_uses,
&replaceable_uses_count, kMaxUses)) {
return false;
@@ -582,15 +582,15 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
if (checkpoint) {
// Duplicate the checkpoint.
- Node* new_checkpoint_state = DuplicateFrameStateAndRename(
- checkpoint_state, callee, target,
+ FrameState new_checkpoint_state = DuplicateFrameStateAndRename(
+ FrameState{checkpoint_state}, callee, target,
(i == num_calls - 1) ? kChangeInPlace : kCloneState);
effect = graph()->NewNode(checkpoint->op(), new_checkpoint_state, effect,
control);
}
// Duplicate the call.
- Node* new_lazy_frame_state = DuplicateFrameStateAndRename(
+ FrameState new_lazy_frame_state = DuplicateFrameStateAndRename(
frame_state, callee, target,
(i == num_calls - 1) ? kChangeInPlace : kCloneState);
inputs[0] = target;
@@ -670,6 +670,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
bool small_function) {
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
+ DCHECK_NE(node->opcode(), IrOpcode::kJSWasmCall);
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
@@ -788,9 +789,11 @@ void JSInliningHeuristic::PrintCandidates() {
os << ", bytecode size: " << candidate.bytecode[i]->length();
if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value();
- if (function.HasAttachedOptimizedCode()) {
+ unsigned inlined_bytecode_size =
+ function.code().GetInlinedBytecodeSize();
+ if (inlined_bytecode_size > 0) {
os << ", existing opt code's inlined bytecode size: "
- << function.code().inlined_bytecode_size();
+ << inlined_bytecode_size;
}
}
} else {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index a613dacaaa..02280deaf3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -13,17 +13,20 @@ namespace compiler {
class JSInliningHeuristic final : public AdvancedReducer {
public:
+ enum Mode { kJSOnly, kWasmOnly };
+
JSInliningHeuristic(Editor* editor, Zone* local_zone,
OptimizedCompilationInfo* info, JSGraph* jsgraph,
JSHeapBroker* broker,
- SourcePositionTable* source_positions)
+ SourcePositionTable* source_positions, Mode mode)
: AdvancedReducer(editor),
inliner_(editor, local_zone, info, jsgraph, broker, source_positions),
candidates_(local_zone),
seen_(local_zone),
source_positions_(source_positions),
jsgraph_(jsgraph),
- broker_(broker) {}
+ broker_(broker),
+ mode_(mode) {}
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -78,8 +81,8 @@ class JSInliningHeuristic final : public AdvancedReducer {
bool TryReuseDispatch(Node* node, Node* callee, Node** if_successes,
Node** calls, Node** inputs, int input_count);
enum StateCloneMode { kCloneState, kChangeInPlace };
- Node* DuplicateFrameStateAndRename(Node* frame_state, Node* from, Node* to,
- StateCloneMode mode);
+ FrameState DuplicateFrameStateAndRename(FrameState frame_state, Node* from,
+ Node* to, StateCloneMode mode);
Node* DuplicateStateValuesAndRename(Node* state_values, Node* from, Node* to,
StateCloneMode mode);
Candidate CollectFunctions(Node* node, int functions_size);
@@ -91,6 +94,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const { return jsgraph_->isolate(); }
SimplifiedOperatorBuilder* simplified() const;
+ Mode mode() const { return mode_; }
JSInliner inliner_;
Candidates candidates_;
@@ -99,6 +103,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
int total_inlined_bytecode_size_ = 0;
+ const Mode mode_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 050e26799e..5da0c9c181 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -20,6 +20,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/parsing/parse-info.h"
@@ -60,8 +61,8 @@ class JSCallAccessor {
Node* new_target() const { return JSConstructNode{call_}.new_target(); }
- Node* frame_state() const {
- return NodeProperties::GetFrameStateInput(call_);
+ FrameState frame_state() const {
+ return FrameState{NodeProperties::GetFrameStateInput(call_)};
}
int argument_count() const {
@@ -80,11 +81,25 @@ class JSCallAccessor {
Node* call_;
};
+Reduction JSInliner::InlineJSWasmCall(Node* call, Node* new_target,
+ Node* context, Node* frame_state,
+ StartNode start, Node* end,
+ Node* exception_target,
+ const NodeVector& uncaught_subcalls) {
+ JSWasmCallNode n(call);
+ return InlineCall(
+ call, new_target, context, frame_state, start, end, exception_target,
+ uncaught_subcalls,
+ static_cast<int>(n.Parameters().signature()->parameter_count()));
+}
+
Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
- Node* frame_state, Node* start, Node* end,
+ Node* frame_state, StartNode start, Node* end,
Node* exception_target,
- const NodeVector& uncaught_subcalls) {
- JSCallAccessor c(call);
+ const NodeVector& uncaught_subcalls,
+ int argument_count) {
+ DCHECK_IMPLIES(IrOpcode::IsInlineeOpcode(call->opcode()),
+ argument_count == JSCallAccessor(call).argument_count());
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee, and {effect} becomes
@@ -92,16 +107,13 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
- int const inlinee_new_target_index =
- static_cast<int>(start->op()->ValueOutputCount()) - 3;
- int const inlinee_arity_index =
- static_cast<int>(start->op()->ValueOutputCount()) - 2;
- int const inlinee_context_index =
- static_cast<int>(start->op()->ValueOutputCount()) - 1;
+ int const inlinee_new_target_index = start.NewTargetOutputIndex();
+ int const inlinee_arity_index = start.ArgCountOutputIndex();
+ int const inlinee_context_index = start.ContextOutputIndex();
// {inliner_inputs} counts the target, receiver/new_target, and arguments; but
// not feedback vector, context, effect or control.
- const int inliner_inputs = c.argument_count() +
+ const int inliner_inputs = argument_count +
JSCallOrConstructNode::kExtraInputCount -
JSCallOrConstructNode::kFeedbackVectorInputCount;
// Iterate over all uses of the start node.
@@ -120,7 +132,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Replace(use, new_target);
} else if (index == inlinee_arity_index) {
// The projection is requesting the number of arguments.
- Replace(use, jsgraph()->Constant(c.argument_count()));
+ Replace(use, jsgraph()->Constant(argument_count));
} else if (index == inlinee_context_index) {
// The projection is requesting the inlinee function context.
Replace(use, context);
@@ -231,12 +243,10 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
}
}
-Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count,
- BailoutId bailout_id,
- FrameStateType frame_state_type,
- SharedFunctionInfoRef shared,
- Node* context) {
+FrameState JSInliner::CreateArtificialFrameState(
+ Node* node, FrameState outer_frame_state, int parameter_count,
+ BytecodeOffset bailout_id, FrameStateType frame_state_type,
+ SharedFunctionInfoRef shared, Node* context) {
const int parameter_count_with_receiver =
parameter_count + JSCallOrConstructNode::kReceiverOrNewTargetInputCount;
const FrameStateFunctionInfo* state_info =
@@ -259,9 +269,9 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
if (context == nullptr) context = jsgraph()->UndefinedConstant();
- return graph()->NewNode(op, params_node, node0, node0, context,
- node->InputAt(JSCallOrConstructNode::TargetIndex()),
- outer_frame_state);
+ return FrameState{graph()->NewNode(
+ op, params_node, node0, node0, context,
+ node->InputAt(JSCallOrConstructNode::TargetIndex()), outer_frame_state)};
}
namespace {
@@ -374,8 +384,70 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
UNREACHABLE();
}
+Reduction JSInliner::ReduceJSWasmCall(Node* node) {
+ // Create the subgraph for the inlinee.
+ Node* start_node;
+ Node* end;
+ {
+ Graph::SubgraphScope scope(graph());
+
+ graph()->SetEnd(nullptr);
+
+ JSWasmCallNode n(node);
+ const JSWasmCallParameters& wasm_call_params = n.Parameters();
+
+ // Create a nested frame state inside the frame state attached to the
+ // call; this will ensure that lazy deoptimizations at this point will
+ // still return the result of the Wasm function call.
+ Node* continuation_frame_state =
+ CreateJSWasmCallBuiltinContinuationFrameState(
+ jsgraph(), n.context(), n.frame_state(),
+ wasm_call_params.signature());
+ JSWasmCallData js_wasm_call_data(wasm_call_params.signature());
+ BuildInlinedJSToWasmWrapper(
+ graph()->zone(), jsgraph(), wasm_call_params.signature(),
+ wasm_call_params.module(), source_positions_,
+ StubCallMode::kCallBuiltinPointer, wasm::WasmFeatures::FromFlags(),
+ &js_wasm_call_data, continuation_frame_state);
+
+ // Extract the inlinee start/end nodes.
+ start_node = graph()->start();
+ end = graph()->end();
+ }
+ StartNode start{start_node};
+
+ Node* exception_target = nullptr;
+ NodeProperties::IsExceptionalCall(node, &exception_target);
+
+ // If we are inlining into a surrounding exception handler, we collect all
+ // potentially throwing nodes within the inlinee that are not handled locally
+ // by the inlinee itself. They are later wired into the surrounding handler.
+ NodeVector uncaught_subcalls(local_zone_);
+ if (exception_target != nullptr) {
+ // Find all uncaught 'calls' in the inlinee.
+ AllNodes inlined_nodes(local_zone_, end, graph());
+ for (Node* subnode : inlined_nodes.reachable) {
+ // Every possibly throwing node should get {IfSuccess} and {IfException}
+ // projections, unless there already is local exception handling.
+ if (subnode->op()->HasProperty(Operator::kNoThrow)) continue;
+ if (!NodeProperties::IsExceptionalCall(subnode)) {
+ DCHECK_EQ(2, subnode->op()->ControlOutputCount());
+ uncaught_subcalls.push_back(subnode);
+ }
+ }
+ }
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* new_target = jsgraph()->UndefinedConstant();
+
+ return InlineJSWasmCall(node, new_target, context, frame_state, start, end,
+ exception_target, uncaught_subcalls);
+}
+
Reduction JSInliner::ReduceJSCall(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ DCHECK_NE(node->opcode(), IrOpcode::kJSWasmCall);
JSCallAccessor call(node);
// Determine the call target.
@@ -405,9 +477,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// To ensure inlining always terminates, we have an upper limit on inlining
// the nested calls.
int nesting_level = 0;
- for (Node* frame_state = call.frame_state();
+ for (FrameState frame_state = FrameState{call.frame_state()};
frame_state->opcode() == IrOpcode::kFrameState;
- frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
+ frame_state = frame_state.outer_frame_state()) {
nesting_level++;
if (nesting_level > kMaxDepthForInlining) {
TRACE("Not inlining "
@@ -433,15 +505,19 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
shared_info->object());
}
- TRACE("Inlining " << *shared_info << " into " << outer_shared_info
- << ((exception_target != nullptr) ? " (inside try-block)"
- : ""));
// Determine the target's feedback vector and its context.
Node* context;
FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
- CHECK(broker()->IsSerializedForCompilation(
- *shared_info, feedback_cell.value().AsFeedbackVector()));
+ if (!broker()->IsSerializedForCompilation(*shared_info,
+ *feedback_cell.value())) {
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because it wasn't serialized for compilation.");
+ return NoChange();
+ }
+ TRACE("Inlining " << *shared_info << " into " << outer_shared_info
+ << ((exception_target != nullptr) ? " (inside try-block)"
+ : ""));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
@@ -454,7 +530,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
source_positions_->GetSourcePosition(node));
// Create the subgraph for the inlinee.
- Node* start;
+ Node* start_node;
Node* end;
{
// Run the BytecodeGraphBuilder to create the subgraph.
@@ -470,15 +546,16 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
{
CallFrequency frequency = call.frequency();
BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_cell,
- BailoutId::None(), jsgraph(), frequency,
+ BytecodeOffset::None(), jsgraph(), frequency,
source_positions_, inlining_id, info_->code_kind(),
flags, &info_->tick_counter());
}
// Extract the inlinee start/end nodes.
- start = graph()->start();
+ start_node = graph()->start();
end = graph()->end();
}
+ StartNode start{start_node};
// If we are inlining into a surrounding exception handler, we collect all
// potentially throwing nodes within the inlinee that are not handled locally
@@ -498,7 +575,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
}
- Node* frame_state = call.frame_state();
+ FrameState frame_state = call.frame_state();
Node* new_target = jsgraph()->UndefinedConstant();
// Inline {JSConstruct} requires some additional magic.
@@ -523,7 +600,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Control control = n.control();
Node* frame_state_inside = CreateArtificialFrameState(
node, frame_state, n.ArgumentCount(),
- BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
+ BytecodeOffset::ConstructStubCreate(), FrameStateType::kConstructStub,
*shared_info, context);
Node* create =
graph()->NewNode(javascript()->Create(), call.target(), new_target,
@@ -577,8 +654,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
- node, frame_state, n.ArgumentCount(), BailoutId::ConstructStubInvoke(),
- FrameStateType::kConstructStub, *shared_info, context);
+ node, frame_state, n.ArgumentCount(),
+ BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
+ *shared_info, context);
}
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
@@ -600,19 +678,18 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
// Insert argument adaptor frame if required. The callees formal parameter
- // count (i.e. value outputs of start node minus target, receiver, new target,
- // arguments count and context) have to match the number of arguments passed
+ // count have to match the number of arguments passed
// to the call.
int parameter_count = shared_info->internal_formal_parameter_count();
- DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
+ DCHECK_EQ(parameter_count, start.FormalParameterCountWithoutReceiver());
if (call.argument_count() != parameter_count) {
frame_state = CreateArtificialFrameState(
- node, frame_state, call.argument_count(), BailoutId::None(),
+ node, frame_state, call.argument_count(), BytecodeOffset::None(),
FrameStateType::kArgumentsAdaptor, *shared_info);
}
return InlineCall(node, new_target, context, frame_state, start, end,
- exception_target, uncaught_subcalls);
+ exception_target, uncaught_subcalls, call.argument_count());
}
Graph* JSInliner::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 0648c86f62..e1e1bdfa0a 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-class BailoutId;
+class BytecodeOffset;
class OptimizedCompilationInfo;
namespace compiler {
@@ -41,6 +41,8 @@ class JSInliner final : public AdvancedReducer {
// using the above generic reducer interface of the inlining machinery.
Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSWasmCall(Node* node);
+
private:
Zone* zone() const { return local_zone_; }
CommonOperatorBuilder* common() const;
@@ -61,16 +63,20 @@ class JSInliner final : public AdvancedReducer {
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
FeedbackCellRef DetermineCallContext(Node* node, Node** context_out);
- Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
- int parameter_count, BailoutId bailout_id,
- FrameStateType frame_state_type,
- SharedFunctionInfoRef shared,
- Node* context = nullptr);
+ FrameState CreateArtificialFrameState(
+ Node* node, FrameState outer_frame_state, int parameter_count,
+ BytecodeOffset bailout_id, FrameStateType frame_state_type,
+ SharedFunctionInfoRef shared, Node* context = nullptr);
Reduction InlineCall(Node* call, Node* new_target, Node* context,
- Node* frame_state, Node* start, Node* end,
+ Node* frame_state, StartNode start, Node* end,
Node* exception_target,
- const NodeVector& uncaught_subcalls);
+ const NodeVector& uncaught_subcalls, int argument_count);
+
+ Reduction InlineJSWasmCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, StartNode start, Node* end,
+ Node* exception_target,
+ const NodeVector& uncaught_subcalls);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 68d9fa05f0..2d105e55a8 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -802,9 +802,9 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
Node* node, Node* lookup_start_object, Node* receiver, Node* value,
NameRef const& name, AccessMode access_mode, Node* key,
PropertyCellRef const& property_cell, Node* effect) {
- Node* control = NodeProperties::GetControlInput(node);
- if (effect == nullptr) {
- effect = NodeProperties::GetEffectInput(node);
+ if (!property_cell.Serialize()) {
+ TRACE_BROKER_MISSING(broker(), "usable data for " << property_cell);
+ return NoChange();
}
ObjectRef property_cell_value = property_cell.value();
@@ -819,6 +819,11 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
PropertyCellType property_cell_type = property_details.cell_type();
DCHECK_EQ(kData, property_details.kind());
+ Node* control = NodeProperties::GetControlInput(node);
+ if (effect == nullptr) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+
// We have additional constraints for stores.
if (access_mode == AccessMode::kStore) {
DCHECK_EQ(receiver, lookup_start_object);
@@ -923,10 +928,6 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
DCHECK_EQ(receiver, lookup_start_object);
DCHECK(!property_details.IsReadOnly());
switch (property_details.cell_type()) {
- case PropertyCellType::kUndefined: {
- UNREACHABLE();
- break;
- }
case PropertyCellType::kConstant: {
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
@@ -997,6 +998,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
jsgraph()->Constant(property_cell), value, effect, control);
break;
}
+ case PropertyCellType::kUndefined:
+ UNREACHABLE();
}
}
@@ -1950,26 +1953,36 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
NumberMatcher mkey(key);
if (mkey.IsInteger() && mkey.IsInRange(0.0, kMaxUInt32 - 1.0)) {
uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
- base::Optional<ObjectRef> element =
- receiver_ref.GetOwnConstantElement(index);
- if (!element.has_value() && receiver_ref.IsJSArray()) {
- // We didn't find a constant element, but if the receiver is a cow-array
- // we can exploit the fact that any future write to the element will
- // replace the whole elements storage.
- element = receiver_ref.AsJSArray().GetOwnCowElement(index);
- if (element.has_value()) {
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- FixedArrayRef array_elements =
- receiver_ref.AsJSArray().elements().AsFixedArray();
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), elements,
- jsgraph()->Constant(array_elements));
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kCowArrayElementsChanged),
- check, effect, control);
+ base::Optional<ObjectRef> element;
+
+ if (receiver_ref.IsJSObject()) {
+ element = receiver_ref.AsJSObject().GetOwnConstantElement(index);
+ if (!element.has_value() && receiver_ref.IsJSArray()) {
+ // We didn't find a constant element, but if the receiver is a cow-array
+ // we can exploit the fact that any future write to the element will
+ // replace the whole elements storage.
+ JSArrayRef array_ref = receiver_ref.AsJSArray();
+ base::Optional<FixedArrayBaseRef> array_elements = array_ref.elements();
+ if (array_elements.has_value()) {
+ element = array_ref.GetOwnCowElement(*array_elements, index);
+ if (element.has_value()) {
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), elements,
+ jsgraph()->Constant(*array_elements));
+ effect = graph()->NewNode(
+ simplified()->CheckIf(
+ DeoptimizeReason::kCowArrayElementsChanged),
+ check, effect, control);
+ }
+ }
}
+ } else if (receiver_ref.IsString()) {
+ element = receiver_ref.AsString().GetCharAsStringOrUndefined(index);
}
+
if (element.has_value()) {
Node* value = access_mode == AccessMode::kHas
? jsgraph()->TrueConstant()
@@ -2469,43 +2482,40 @@ JSNativeContextSpecialization::BuildPropertyStore(
value = effect =
graph()->NewNode(simplified()->CheckNumber(FeedbackSource()), value,
effect, control);
- if (!field_index.is_inobject() || !FLAG_unbox_double_fields) {
- if (access_info.HasTransitionMap()) {
- // Allocate a HeapNumber for the new property.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
- Type::OtherInternal());
- a.Store(AccessBuilder::ForMap(),
- MapRef(broker(), factory()->heap_number_map()));
- FieldAccess value_field_access =
- AccessBuilder::ForHeapNumberValue();
- value_field_access.const_field_info = field_access.const_field_info;
- a.Store(value_field_access, value);
- value = effect = a.Finish();
-
- field_access.type = Type::Any();
- field_access.machine_type = MachineType::TaggedPointer();
- field_access.write_barrier_kind = kPointerWriteBarrier;
- } else {
- // We just store directly to the HeapNumber.
- FieldAccess const storage_access = {
- kTaggedBase,
- field_index.offset(),
- name.object(),
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier,
- LoadSensitivity::kUnsafe,
- access_info.GetConstFieldInfo(),
- access_mode == AccessMode::kStoreInLiteral};
- storage = effect =
- graph()->NewNode(simplified()->LoadField(storage_access),
- storage, effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- field_access.machine_type = MachineType::Float64();
- }
+ if (access_info.HasTransitionMap()) {
+ // Allocate a HeapNumber for the new property.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
+ Type::OtherInternal());
+ a.Store(AccessBuilder::ForMap(),
+ MapRef(broker(), factory()->heap_number_map()));
+ FieldAccess value_field_access = AccessBuilder::ForHeapNumberValue();
+ value_field_access.const_field_info = field_access.const_field_info;
+ a.Store(value_field_access, value);
+ value = effect = a.Finish();
+
+ field_access.type = Type::Any();
+ field_access.machine_type = MachineType::TaggedPointer();
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+ } else {
+ // We just store directly to the HeapNumber.
+ FieldAccess const storage_access = {
+ kTaggedBase,
+ field_index.offset(),
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ access_info.GetConstFieldInfo(),
+ access_mode == AccessMode::kStoreInLiteral};
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(storage_access), storage,
+ effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
}
if (store_to_existing_constant_field) {
DCHECK(!access_info.HasTransitionMap());
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index da3af62bf2..e565f1dfce 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -676,6 +676,50 @@ ForInParameters const& ForInParametersOf(const Operator* op) {
return OpParameter<ForInParameters>(op);
}
+JSWasmCallParameters const& JSWasmCallParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSWasmCall, op->opcode());
+ return OpParameter<JSWasmCallParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, JSWasmCallParameters const& p) {
+ return os << p.module() << ", " << p.signature() << ", " << p.feedback();
+}
+
+size_t hash_value(JSWasmCallParameters const& p) {
+ return base::hash_combine(p.module(), p.signature(),
+ FeedbackSource::Hash()(p.feedback()));
+}
+
+bool operator==(JSWasmCallParameters const& lhs,
+ JSWasmCallParameters const& rhs) {
+ return lhs.module() == rhs.module() && lhs.signature() == rhs.signature() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+int JSWasmCallParameters::arity_without_implicit_args() const {
+ return static_cast<int>(signature_->parameter_count());
+}
+
+int JSWasmCallParameters::input_count() const {
+ return static_cast<int>(signature_->parameter_count()) +
+ JSWasmCallNode::kExtraInputCount;
+}
+
+// static
+Type JSWasmCallNode::TypeForWasmReturnType(const wasm::ValueType& type) {
+ switch (type.kind()) {
+ case wasm::kI32:
+ return Type::Signed32();
+ case wasm::kI64:
+ return Type::BigInt();
+ case wasm::kF32:
+ case wasm::kF64:
+ return Type::Number();
+ default:
+ UNREACHABLE();
+ }
+}
+
#define CACHED_OP_LIST(V) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
@@ -874,6 +918,17 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CallWasm(
+ const wasm::WasmModule* wasm_module,
+ const wasm::FunctionSig* wasm_signature, FeedbackSource const& feedback) {
+ JSWasmCallParameters parameters(wasm_module, wasm_signature, feedback);
+ return zone()->New<Operator1<JSWasmCallParameters>>(
+ IrOpcode::kJSWasmCall, Operator::kNoProperties, // opcode
+ "JSWasmCall", // name
+ parameters.input_count(), 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::ConstructForwardVarargs(
size_t arity, uint32_t start_index) {
ConstructForwardVarargsParameters parameters(arity, start_index);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 518eff7fc0..46258f3bb1 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/codegen/tnode.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/globals.h"
#include "src/compiler/node-properties.h"
@@ -25,6 +26,10 @@ class ArrayBoilerplateDescription;
class FeedbackCell;
class SharedFunctionInfo;
+namespace wasm {
+class ValueType;
+}
+
namespace compiler {
// Forward declarations.
@@ -816,6 +821,35 @@ size_t hash_value(ForInParameters const&);
std::ostream& operator<<(std::ostream&, ForInParameters const&);
const ForInParameters& ForInParametersOf(const Operator* op);
+class JSWasmCallParameters {
+ public:
+ explicit JSWasmCallParameters(const wasm::WasmModule* module,
+ const wasm::FunctionSig* signature,
+ FeedbackSource const& feedback)
+ : module_(module), signature_(signature), feedback_(feedback) {
+ DCHECK_NOT_NULL(module);
+ DCHECK_NOT_NULL(signature);
+ }
+
+ const wasm::WasmModule* module() const { return module_; }
+ const wasm::FunctionSig* signature() const { return signature_; }
+ FeedbackSource const& feedback() const { return feedback_; }
+ int input_count() const;
+ int arity_without_implicit_args() const;
+
+ private:
+ const wasm::WasmModule* const module_;
+ const wasm::FunctionSig* const signature_;
+ const FeedbackSource feedback_;
+};
+
+JSWasmCallParameters const& JSWasmCallParametersOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ JSWasmCallParameters const&);
+size_t hash_value(JSWasmCallParameters const&);
+bool operator==(JSWasmCallParameters const&, JSWasmCallParameters const&);
+
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -925,6 +959,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
+ const Operator* CallWasm(const wasm::WasmModule* wasm_module,
+ const wasm::FunctionSig* wasm_signature,
+ FeedbackSource const& feedback);
+
const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Construct(uint32_t arity,
CallFrequency const& frequency = CallFrequency(),
@@ -1247,7 +1285,8 @@ class JSCallOrConstructNode : public JSNodeWrapperBase {
node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstruct ||
node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
- node->opcode() == IrOpcode::kJSConstructWithSpread);
+ node->opcode() == IrOpcode::kJSConstructWithSpread ||
+ node->opcode() == IrOpcode::kJSWasmCall);
}
#define INPUTS(V) \
@@ -1259,8 +1298,8 @@ class JSCallOrConstructNode : public JSNodeWrapperBase {
// Besides actual arguments, JSCall nodes (and variants) also take the
// following. Note that we rely on the fact that all variants (JSCall,
// JSCallWithArrayLike, JSCallWithSpread, JSConstruct,
- // JSConstructWithArrayLike, JSConstructWithSpread) have the same underlying
- // node layout.
+ // JSConstructWithArrayLike, JSConstructWithSpread, JSWasmCall) have the same
+ // underlying node layout.
static constexpr int kTargetInputCount = 1;
static constexpr int kReceiverOrNewTargetInputCount = 1;
static constexpr int kFeedbackVectorInputCount = 1;
@@ -1355,6 +1394,35 @@ using JSCallNode = JSCallNodeBase<IrOpcode::kJSCall>;
using JSCallWithSpreadNode = JSCallNodeBase<IrOpcode::kJSCallWithSpread>;
using JSCallWithArrayLikeNode = JSCallNodeBase<IrOpcode::kJSCallWithArrayLike>;
+class JSWasmCallNode final : public JSCallOrConstructNode {
+ public:
+ explicit constexpr JSWasmCallNode(Node* node) : JSCallOrConstructNode(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSWasmCall);
+ }
+
+ const JSWasmCallParameters& Parameters() const {
+ return OpParameter<JSWasmCallParameters>(node()->op());
+ }
+
+#define INPUTS(V) \
+ V(Target, target, 0, Object) \
+ V(Receiver, receiver, 1, Object)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+
+ static constexpr int kReceiverInputCount = 1;
+ STATIC_ASSERT(kReceiverInputCount ==
+ JSCallOrConstructNode::kReceiverOrNewTargetInputCount);
+
+ int ArgumentCount() const override {
+ // Note: The count reported by this function depends only on the parameter
+ // count, thus adding/removing inputs will not affect it.
+ return Parameters().arity_without_implicit_args();
+ }
+
+ static Type TypeForWasmReturnType(const wasm::ValueType& type);
+};
+
template <int kOpcode>
class JSConstructNodeBase final : public JSCallOrConstructNode {
public:
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index a8dce04d44..008aacdb39 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -594,6 +594,8 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
PropertyCellRef string_length_protector(
broker(), factory()->string_length_protector());
+ string_length_protector.SerializeAsProtector();
+
if (string_length_protector.value().AsSmi() ==
Protectors::kProtectorValid) {
// We can just deoptimize if the {length} is out-of-bounds. Besides
@@ -1569,15 +1571,6 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
-
-#ifndef V8_NO_ARGUMENTS_ADAPTOR
-bool NeedsArgumentAdaptorFrame(SharedFunctionInfoRef shared, int arity) {
- static const int sentinel = kDontAdaptArgumentsSentinel;
- const int num_decl_parms = shared.internal_formal_parameter_count();
- return (num_decl_parms != arity && num_decl_parms != sentinel);
-}
-#endif
-
} // namespace
Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
@@ -1722,7 +1715,10 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
shared = SharedFunctionInfoRef(broker(), ccp.shared_info());
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
- shared = cell.value().AsFeedbackVector().shared_function_info();
+ base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
+ if (feedback_vector.has_value()) {
+ shared = feedback_vector->shared_function_info();
+ }
}
if (shared.has_value()) {
@@ -1762,7 +1758,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
int formal_count = shared->internal_formal_parameter_count();
if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
@@ -1781,22 +1776,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + formal_count,
flags | CallDescriptor::kCanUseRoots)));
-#else
- if (NeedsArgumentAdaptorFrame(*shared, arity)) {
- node->RemoveInput(n.FeedbackVectorIndex());
- // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(
- graph()->zone(), 4,
- jsgraph()->Constant(shared->internal_formal_parameter_count()));
- NodeProperties::ChangeOp(
- node, common()->Call(Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 1 + arity, flags)));
-#endif
} else if (shared->HasBuiltinId() &&
Builtins::IsCpp(shared->builtin_id())) {
// Patch {node} to a direct CEntry call.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index ea316513d8..4f1565d0a9 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -77,21 +77,6 @@ MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
return zone->New<MachineSignature>(return_count, param_count, types);
}
-int CallDescriptor::GetFirstUnusedStackSlot() const {
- int slots_above_sp = 0;
- for (size_t i = 0; i < InputCount(); ++i) {
- LinkageLocation operand = GetInputLocation(i);
- if (!operand.IsRegister()) {
- int new_candidate =
- -operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > slots_above_sp) {
- slots_above_sp = new_candidate;
- }
- }
- }
- return slots_above_sp;
-}
-
int CallDescriptor::GetStackParameterDelta(
CallDescriptor const* tail_caller) const {
// In the IsTailCallForTierUp case, the callee has
@@ -100,8 +85,8 @@ int CallDescriptor::GetStackParameterDelta(
// inputs to the TailCall node, since they already exist on the stack.
if (IsTailCallForTierUp()) return 0;
- int callee_slots_above_sp = GetFirstUnusedStackSlot();
- int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
+ int callee_slots_above_sp = GetOffsetToReturns();
+ int tail_caller_slots_above_sp = tail_caller->GetOffsetToReturns();
int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
if (ShouldPadArguments(stack_param_delta)) {
if (callee_slots_above_sp % 2 != 0) {
@@ -119,10 +104,43 @@ int CallDescriptor::GetStackParameterDelta(
return stack_param_delta;
}
+int CallDescriptor::GetFirstUnusedStackSlot() const {
+ int start_of_args = 0;
+ for (size_t i = 0; i < InputCount(); ++i) {
+ LinkageLocation operand = GetInputLocation(i);
+ if (!operand.IsRegister()) {
+ // Reverse, since arguments have negative offsets in the frame.
+ int reverse_location =
+ -operand.GetLocation() + operand.GetSizeInPointers() - 1;
+ DCHECK_GE(reverse_location, 0);
+ start_of_args = std::max(start_of_args, reverse_location);
+ }
+ }
+ return start_of_args;
+}
+
int CallDescriptor::GetOffsetToReturns() const {
- int offset = static_cast<int>(StackParameterCount());
- if (ShouldPadArguments(offset)) offset++;
- return offset;
+ // If there are return stack slots, return the first slot of the last one.
+ constexpr int kNoReturnSlot = std::numeric_limits<int>::max();
+ int end_of_returns = kNoReturnSlot;
+ for (size_t i = 0; i < ReturnCount(); ++i) {
+ LinkageLocation operand = GetReturnLocation(i);
+ if (!operand.IsRegister()) {
+ // Reverse, since returns have negative offsets in the frame.
+ int reverse_location = -operand.GetLocation() - 1;
+ DCHECK_GE(reverse_location, 0);
+ end_of_returns = std::min(end_of_returns, reverse_location);
+ }
+ }
+ if (end_of_returns != kNoReturnSlot) return end_of_returns;
+
+ // Otherwise, return the first unused slot before the parameters, with any
+ // additional padding slot if it exists.
+ end_of_returns = GetFirstUnusedStackSlot();
+ if (ShouldPadArguments(end_of_returns)) end_of_returns++;
+
+ DCHECK_EQ(end_of_returns == 0, StackParameterCount() == 0);
+ return end_of_returns;
}
int CallDescriptor::GetTaggedParameterSlots() const {
@@ -138,11 +156,12 @@ int CallDescriptor::GetTaggedParameterSlots() const {
bool CallDescriptor::CanTailCall(const CallDescriptor* callee) const {
if (ReturnCount() != callee->ReturnCount()) return false;
- const int stack_param_delta = callee->GetStackParameterDelta(this);
+ const int stack_returns_delta =
+ GetOffsetToReturns() - callee->GetOffsetToReturns();
for (size_t i = 0; i < ReturnCount(); ++i) {
if (GetReturnLocation(i).IsCallerFrameSlot() &&
callee->GetReturnLocation(i).IsCallerFrameSlot()) {
- if (GetReturnLocation(i).AsCallerFrameSlot() - stack_param_delta !=
+ if (GetReturnLocation(i).AsCallerFrameSlot() + stack_returns_delta !=
callee->GetReturnLocation(i).AsCallerFrameSlot()) {
return false;
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 5e3a1163cc..4aecb7c3a8 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -389,12 +389,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool UsesOnlyRegisters() const;
- // Returns the first stack slot that is not used by the stack parameters.
- int GetFirstUnusedStackSlot() const;
-
int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
- // Returns the number of slots to the first return value slot.
+ // Returns the first stack slot that is not used by the stack parameters,
+ // which is the return slot area, or a padding slot for frame alignment.
+ int GetFirstUnusedStackSlot() const;
+
+ // If there are return stack slots, returns the first slot of the last one.
+ // Otherwise, return the first unused slot before the parameters. This is the
+ // slot where returns would go if there were any.
int GetOffsetToReturns() const;
int GetTaggedParameterSlots() const;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 228375349d..2a0189ae12 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -1081,8 +1081,9 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
// Make sure we don't resurrect dead {replacement} nodes.
// Skip lowering if the type of the {replacement} node is not a subtype
// of the original {node}'s type.
- // TODO(tebbi): We should insert a {TypeGuard} for the intersection of
- // these two types here once we properly handle {Type::None} everywhere.
+ // TODO(turbofan): We should insert a {TypeGuard} for the intersection
+ // of these two types here once we properly handle {Type::None}
+ // everywhere.
if (!replacement->IsDead() && NodeProperties::GetType(replacement)
.Is(NodeProperties::GetType(node))) {
ReplaceWithValue(node, replacement, effect);
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 11c610fc88..582eebd8f5 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -542,7 +542,44 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
return loop_tree;
}
-Node* LoopTree::HeaderNode(Loop* loop) {
+bool LoopFinder::HasMarkedExits(LoopTree* loop_tree,
+ const LoopTree::Loop* loop) {
+ // Look for returns and if projections that are outside the loop but whose
+ // control input is inside the loop.
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ for (Node* node : loop_tree->LoopNodes(loop)) {
+ for (Node* use : node->uses()) {
+ if (!loop_tree->Contains(loop, use)) {
+ bool unmarked_exit;
+ switch (node->opcode()) {
+ case IrOpcode::kLoopExit:
+ unmarked_exit = (node->InputAt(1) != loop_node);
+ break;
+ case IrOpcode::kLoopExitValue:
+ case IrOpcode::kLoopExitEffect:
+ unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
+ break;
+ default:
+ unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
+ }
+ if (unmarked_exit) {
+ if (FLAG_trace_turbo_loop) {
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ PrintF(
+ "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
+ "(%s) is inside loop, but its use %i (%s) is outside.\n",
+ loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+Node* LoopTree::HeaderNode(const Loop* loop) {
Node* first = *HeaderNodes(loop).begin();
if (first->opcode() == IrOpcode::kLoop) return first;
DCHECK(IrOpcode::IsPhiOpcode(first->opcode()));
@@ -551,6 +588,54 @@ Node* LoopTree::HeaderNode(Loop* loop) {
return header;
}
+Node* NodeCopier::map(Node* node, uint32_t copy_index) {
+ DCHECK_LT(copy_index, copy_count_);
+ if (node_map_.Get(node) == 0) return node;
+ return copies_->at(node_map_.Get(node) + copy_index);
+}
+
+void NodeCopier::Insert(Node* original, const NodeVector& new_copies) {
+ DCHECK_EQ(new_copies.size(), copy_count_);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ copies_->insert(copies_->end(), new_copies.begin(), new_copies.end());
+}
+
+void NodeCopier::Insert(Node* original, Node* copy) {
+ DCHECK_EQ(copy_count_, 1);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ copies_->push_back(copy);
+}
+
+void NodeCopier::CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead,
+ NodeRange nodes,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins) {
+ // Copy all the nodes first.
+ for (Node* original : nodes) {
+ SourcePositionTable::Scope position(
+ source_positions, source_positions->GetSourcePosition(original));
+ NodeOriginTable::Scope origin_scope(node_origins, "copy nodes", original);
+ node_map_.Set(original, copies_->size() + 1);
+ copies_->push_back(original);
+ for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
+ Node* copy = graph->CloneNode(original);
+ copies_->push_back(copy);
+ }
+ }
+
+ // Fix inputs of the copies.
+ for (Node* original : nodes) {
+ for (uint32_t copy_index = 0; copy_index < copy_count_; copy_index++) {
+ Node* copy = map(original, copy_index);
+ for (int i = 0; i < copy->InputCount(); i++) {
+ copy->ReplaceInput(i, map(original->InputAt(i), copy_index));
+ }
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 043833a54c..3cce611be9 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -7,7 +7,11 @@
#include "src/base/iterator.h"
#include "src/common/globals.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-origin-table.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/zone/zone-containers.h"
@@ -41,11 +45,11 @@ class LoopTree : public ZoneObject {
public:
Loop* parent() const { return parent_; }
const ZoneVector<Loop*>& children() const { return children_; }
- size_t HeaderSize() const { return body_start_ - header_start_; }
- size_t BodySize() const { return exits_start_ - body_start_; }
- size_t ExitsSize() const { return exits_end_ - exits_start_; }
- size_t TotalSize() const { return exits_end_ - header_start_; }
- size_t depth() const { return static_cast<size_t>(depth_); }
+ uint32_t HeaderSize() const { return body_start_ - header_start_; }
+ uint32_t BodySize() const { return exits_start_ - body_start_; }
+ uint32_t ExitsSize() const { return exits_end_ - exits_start_; }
+ uint32_t TotalSize() const { return exits_end_ - header_start_; }
+ uint32_t depth() const { return depth_; }
private:
friend class LoopTree;
@@ -77,7 +81,7 @@ class LoopTree : public ZoneObject {
// Check if the {loop} contains the {node}, either directly or by containing
// a nested loop that contains {node}.
- bool Contains(Loop* loop, Node* node) {
+ bool Contains(const Loop* loop, Node* node) {
for (Loop* c = ContainingLoop(node); c != nullptr; c = c->parent_) {
if (c == loop) return true;
}
@@ -87,40 +91,51 @@ class LoopTree : public ZoneObject {
// Return the list of outer loops.
const ZoneVector<Loop*>& outer_loops() const { return outer_loops_; }
+ // Return a new vector containing the inner loops.
+ ZoneVector<const Loop*> inner_loops() const {
+ ZoneVector<const Loop*> inner_loops(zone_);
+ for (const Loop& loop : all_loops_) {
+ if (loop.children().empty()) {
+ inner_loops.push_back(&loop);
+ }
+ }
+ return inner_loops;
+ }
+
// Return the unique loop number for a given loop. Loop numbers start at {1}.
- int LoopNum(Loop* loop) const {
+ int LoopNum(const Loop* loop) const {
return 1 + static_cast<int>(loop - &all_loops_[0]);
}
// Return a range which can iterate over the header nodes of {loop}.
- NodeRange HeaderNodes(Loop* loop) {
+ NodeRange HeaderNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->header_start_,
&loop_nodes_[0] + loop->body_start_);
}
// Return the header control node for a loop.
- Node* HeaderNode(Loop* loop);
+ Node* HeaderNode(const Loop* loop);
// Return a range which can iterate over the body nodes of {loop}.
- NodeRange BodyNodes(Loop* loop) {
+ NodeRange BodyNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->body_start_,
&loop_nodes_[0] + loop->exits_start_);
}
// Return a range which can iterate over the body nodes of {loop}.
- NodeRange ExitNodes(Loop* loop) {
+ NodeRange ExitNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->exits_start_,
&loop_nodes_[0] + loop->exits_end_);
}
// Return a range which can iterate over the nodes of {loop}.
- NodeRange LoopNodes(Loop* loop) {
+ NodeRange LoopNodes(const Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->header_start_,
&loop_nodes_[0] + loop->exits_end_);
}
// Return the node that represents the control, i.e. the loop node itself.
- Node* GetLoopControl(Loop* loop) {
+ Node* GetLoopControl(const Loop* loop) {
// TODO(turbofan): make the loop control node always first?
for (Node* node : HeaderNodes(loop)) {
if (node->opcode() == IrOpcode::kLoop) return node;
@@ -161,8 +176,49 @@ class V8_EXPORT_PRIVATE LoopFinder {
// Build a loop tree for the entire graph.
static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter,
Zone* temp_zone);
+
+ static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop);
};
+// Copies a range of nodes any number of times.
+class NodeCopier {
+ public:
+ // {max}: The maximum number of nodes that this copier will track, including
+ // The original nodes and all copies.
+ // {p}: A vector that holds the original nodes and all copies.
+ // {copy_count}: How many times the nodes should be copied.
+ NodeCopier(Graph* graph, uint32_t max, NodeVector* p, uint32_t copy_count)
+ : node_map_(graph, max), copies_(p), copy_count_(copy_count) {
+ DCHECK_GT(copy_count, 0);
+ }
+
+ // Returns the mapping of {node} in the {copy_index}'th copy, or {node} itself
+ // if it is not present in the mapping. The copies are 0-indexed.
+ Node* map(Node* node, uint32_t copy_index);
+
+ // Helper version of {map} for one copy.
+ V8_INLINE Node* map(Node* node) { return map(node, 0); }
+
+ // Insert a new mapping from {original} to {new_copies} into the copier.
+ void Insert(Node* original, const NodeVector& new_copies);
+
+ // Helper version of {Insert} for one copy.
+ void Insert(Node* original, Node* copy);
+
+ void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins);
+
+ bool Marked(Node* node) { return node_map_.Get(node) > 0; }
+
+ private:
+ // Maps a node to its index in the {copies_} vector.
+ NodeMarker<size_t> node_map_;
+ // The vector which contains the mapped nodes.
+ NodeVector* copies_;
+ // How many copies of the nodes should be generated.
+ const uint32_t copy_count_;
+};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 3cbf7b583a..e666f8c642 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -3,9 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/loop-peeling.h"
+
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
+#include "src/compiler/loop-analysis.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
@@ -103,59 +105,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-struct Peeling {
- // Maps a node to its index in the {pairs} vector.
- NodeMarker<size_t> node_map;
- // The vector which contains the mapped nodes.
- NodeVector* pairs;
-
- Peeling(Graph* graph, size_t max, NodeVector* p)
- : node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
-
- Node* map(Node* node) {
- if (node_map.Get(node) == 0) return node;
- return pairs->at(node_map.Get(node));
- }
-
- void Insert(Node* original, Node* copy) {
- node_map.Set(original, 1 + pairs->size());
- pairs->push_back(original);
- pairs->push_back(copy);
- }
-
- void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins) {
- NodeVector inputs(tmp_zone_);
- // Copy all the nodes first.
- for (Node* node : nodes) {
- SourcePositionTable::Scope position(
- source_positions, source_positions->GetSourcePosition(node));
- NodeOriginTable::Scope origin_scope(node_origins, "copy nodes", node);
- inputs.clear();
- for (Node* input : node->inputs()) {
- inputs.push_back(map(input));
- }
- Node* copy = graph->NewNode(node->op(), node->InputCount(), &inputs[0]);
- if (NodeProperties::IsTyped(node)) {
- NodeProperties::SetType(copy, NodeProperties::GetType(node));
- }
- Insert(node, copy);
- }
-
- // Fix remaining inputs of the copies.
- for (Node* original : nodes) {
- Node* copy = pairs->at(node_map.Get(original));
- for (int i = 0; i < copy->InputCount(); i++) {
- copy->ReplaceInput(i, map(original->InputAt(i)));
- }
- }
- }
-
- bool Marked(Node* node) { return node_map.Get(node) > 0; }
-};
-
-
class PeeledIterationImpl : public PeeledIteration {
public:
NodeVector node_pairs_;
@@ -173,43 +122,6 @@ Node* PeeledIteration::map(Node* node) {
return node;
}
-bool LoopPeeler::CanPeel(LoopTree::Loop* loop) {
- // Look for returns and if projections that are outside the loop but whose
- // control input is inside the loop.
- Node* loop_node = loop_tree_->GetLoopControl(loop);
- for (Node* node : loop_tree_->LoopNodes(loop)) {
- for (Node* use : node->uses()) {
- if (!loop_tree_->Contains(loop, use)) {
- bool unmarked_exit;
- switch (node->opcode()) {
- case IrOpcode::kLoopExit:
- unmarked_exit = (node->InputAt(1) != loop_node);
- break;
- case IrOpcode::kLoopExitValue:
- case IrOpcode::kLoopExitEffect:
- unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
- break;
- default:
- unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
- }
- if (unmarked_exit) {
- if (FLAG_trace_turbo_loop) {
- Node* loop_node = loop_tree_->GetLoopControl(loop);
- PrintF(
- "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
- "(%s) is inside "
- "loop, but its use %i (%s) is outside.\n",
- loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- }
- return false;
- }
- }
- }
- }
- return true;
-}
-
PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
if (!CanPeel(loop)) return nullptr;
@@ -217,19 +129,19 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
// Construct the peeled iteration.
//============================================================================
PeeledIterationImpl* iter = tmp_zone_->New<PeeledIterationImpl>(tmp_zone_);
- size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
- Peeling peeling(graph_, estimated_peeled_size, &iter->node_pairs_);
+ uint32_t estimated_peeled_size = 5 + loop->TotalSize() * 2;
+ NodeCopier copier(graph_, estimated_peeled_size, &iter->node_pairs_, 1);
Node* dead = graph_->NewNode(common_->Dead());
// Map the loop header nodes to their entry values.
for (Node* node : loop_tree_->HeaderNodes(loop)) {
- peeling.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
+ copier.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
}
// Copy all the nodes of loop body for the peeled iteration.
- peeling.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
- source_positions_, node_origins_);
+ copier.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
+ source_positions_, node_origins_);
//============================================================================
// Replace the entry to the loop with the output of the peeled iteration.
@@ -242,7 +154,7 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
// from the peeled iteration.
NodeVector inputs(tmp_zone_);
for (int i = 1; i < loop_node->InputCount(); i++) {
- inputs.push_back(peeling.map(loop_node->InputAt(i)));
+ inputs.push_back(copier.map(loop_node->InputAt(i)));
}
Node* merge =
graph_->NewNode(common_->Merge(backedges), backedges, &inputs[0]);
@@ -252,7 +164,7 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
if (node->opcode() == IrOpcode::kLoop) continue; // already done.
inputs.clear();
for (int i = 0; i < backedges; i++) {
- inputs.push_back(peeling.map(node->InputAt(1 + i)));
+ inputs.push_back(copier.map(node->InputAt(1 + i)));
}
for (Node* input : inputs) {
if (input != inputs[0]) { // Non-redundant phi.
@@ -269,9 +181,9 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
// Only one backedge, simply replace the input to loop with output of
// peeling.
for (Node* node : loop_tree_->HeaderNodes(loop)) {
- node->ReplaceInput(0, peeling.map(node->InputAt(1)));
+ node->ReplaceInput(0, copier.map(node->InputAt(1)));
}
- new_entry = peeling.map(loop_node->InputAt(1));
+ new_entry = copier.map(loop_node->InputAt(1));
}
loop_node->ReplaceInput(0, new_entry);
@@ -282,18 +194,18 @@ PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
switch (exit->opcode()) {
case IrOpcode::kLoopExit:
// Change the loop exit node to a merge node.
- exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
+ exit->ReplaceInput(1, copier.map(exit->InputAt(0)));
NodeProperties::ChangeOp(exit, common_->Merge(2));
break;
case IrOpcode::kLoopExitValue:
// Change exit marker to phi.
- exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, copier.map(exit->InputAt(0)));
NodeProperties::ChangeOp(
exit, common_->Phi(LoopExitValueRepresentationOf(exit->op()), 2));
break;
case IrOpcode::kLoopExitEffect:
// Change effect exit marker to effect phi.
- exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, copier.map(exit->InputAt(0)));
NodeProperties::ChangeOp(exit, common_->EffectPhi(2));
break;
default:
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 730900af54..af7b5f6ce0 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -43,7 +43,9 @@ class V8_EXPORT_PRIVATE LoopPeeler {
tmp_zone_(tmp_zone),
source_positions_(source_positions),
node_origins_(node_origins) {}
- bool CanPeel(LoopTree::Loop* loop);
+ bool CanPeel(LoopTree::Loop* loop) {
+ return LoopFinder::HasMarkedExits(loop_tree_, loop);
+ }
PeeledIteration* Peel(LoopTree::Loop* loop);
void PeelInnerLoopsOfTree();
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 5eeb5dc248..16ff3ff936 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -582,7 +582,7 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
node->opcode() == IrOpcode::kStore &&
- CanBeTaggedPointer(
+ IsAnyTagged(
StoreRepresentationOf(node->op()).representation())) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
@@ -977,7 +977,7 @@ class MachineRepresentationChecker {
return IsAnyCompressed(actual);
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
- // TODO(tebbi): At the moment, the machine graph doesn't contain
+ // TODO(turbofan): At the moment, the machine graph doesn't contain
// reliable information if a node is kTaggedSigned, kTaggedPointer or
// kTagged, and often this is context-dependent. We should at least
// check for obvious violations: kTaggedSigned where we expect
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 918caaf8fd..2220cdb82f 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -145,6 +145,19 @@ class Word64Adapter {
MachineOperatorReducer* r_;
};
+namespace {
+
+// TODO(jgruber): Consider replacing all uses of this function by
+// std::numeric_limits<T>::quiet_NaN().
+template <class T>
+T SilenceNaN(T x) {
+ DCHECK(std::isnan(x));
+ // Do some calculation to make a signalling NaN quiet.
+ return x - x;
+}
+
+} // namespace
+
MachineOperatorReducer::MachineOperatorReducer(Editor* editor,
MachineGraph* mcgraph,
bool allow_signalling_nan)
@@ -465,14 +478,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat32(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN - x => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.left().ResolvedValue() -
- m.left().ResolvedValue());
+ return ReplaceFloat32(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat32(m.left().ResolvedValue() -
@@ -499,6 +508,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
+ if (m.right().IsNaN()) { // x + NaN => NaN
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
+ }
+ if (m.left().IsNaN()) { // NaN + x => NaN
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
+ }
if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
return ReplaceFloat64(m.left().ResolvedValue() +
m.right().ResolvedValue());
@@ -512,14 +527,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN - x => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().ResolvedValue() -
- m.left().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat64(m.left().ResolvedValue() -
@@ -555,9 +566,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Changed(node);
}
if (m.right().IsNaN()) { // x * NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
return ReplaceFloat64(m.left().ResolvedValue() *
@@ -576,14 +585,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node()); // x / 1.0 => x
// TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
if (m.right().IsNaN()) { // x / NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().ResolvedValue() -
- m.right().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN / x => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().ResolvedValue() -
- m.left().ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
return ReplaceFloat64(
@@ -610,10 +615,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceFloat64(std::numeric_limits<double>::quiet_NaN());
}
if (m.right().IsNaN()) { // x % NaN => NaN
- return Replace(m.right().node());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) { // NaN % x => NaN
- return Replace(m.left().node());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
return ReplaceFloat64(
@@ -660,10 +665,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Atan2: {
Float64BinopMatcher m(node);
if (m.right().IsNaN()) {
- return Replace(m.right().node());
+ return ReplaceFloat64(SilenceNaN(m.right().ResolvedValue()));
}
if (m.left().IsNaN()) {
- return Replace(m.left().node());
+ return ReplaceFloat64(SilenceNaN(m.left().ResolvedValue()));
}
if (m.IsFoldable()) {
return ReplaceFloat64(base::ieee754::atan2(m.left().ResolvedValue(),
@@ -732,21 +737,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
m.right().ResolvedValue()));
} else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
- } else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
- node->ReplaceInput(0, Float64Constant(1.0));
- node->ReplaceInput(1, Float64Mul(m.left().node(), m.left().node()));
- NodeProperties::ChangeOp(node, machine()->Float64Div());
- return Changed(node);
} else if (m.right().Is(2.0)) { // x ** 2.0 => x * x
node->ReplaceInput(1, m.left().node());
NodeProperties::ChangeOp(node, machine()->Float64Mul());
return Changed(node);
- } else if (m.right().Is(-0.5)) {
- // x ** 0.5 => 1 / (if x <= -Infinity then Infinity else sqrt(0.0 + x))
- node->ReplaceInput(0, Float64Constant(1.0));
- node->ReplaceInput(1, Float64PowHalf(m.left().node()));
- NodeProperties::ChangeOp(node, machine()->Float64Div());
- return Changed(node);
} else if (m.right().Is(0.5)) {
// x ** 0.5 => if x <= -Infinity then Infinity else sqrt(0.0 + x)
return Replace(Float64PowHalf(m.left().node()));
@@ -781,8 +775,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Float32Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
- // Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat64(m.ResolvedValue() + m.ResolvedValue());
+ return ReplaceFloat64(SilenceNaN(m.ResolvedValue()));
}
return ReplaceFloat64(m.ResolvedValue());
}
@@ -856,10 +849,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
- // Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat32(
- DoubleToFloat32(m.ResolvedValue() + m.ResolvedValue()));
+ if (!allow_signalling_nan_ && m.IsNaN()) {
+ return ReplaceFloat32(DoubleToFloat32(SilenceNaN(m.ResolvedValue())));
}
return ReplaceFloat32(DoubleToFloat32(m.ResolvedValue()));
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 9a90fa6f7c..f90f9345a3 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator.h"
-#include <type_traits>
+#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
@@ -207,6 +207,17 @@ ShiftKind ShiftKindOf(Operator const* op) {
return OpParameter<ShiftKind>(op);
}
+size_t hash_value(TruncateKind kind) { return static_cast<size_t>(kind); }
+
+std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
+ switch (kind) {
+ case TruncateKind::kArchitectureDefault:
+ return os << "kArchitectureDefault";
+ case TruncateKind::kSetOverflowToMin:
+ return os << "kSetOverflowToMin";
+ }
+}
+
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
#define PURE_BINARY_OP_LIST_32(V) \
@@ -382,6 +393,9 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F64x2Floor, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Trunc, Operator::kNoProperties, 1, 0, 1) \
V(F64x2NearestInt, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2ConvertLowI32x4S, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2ConvertLowI32x4U, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2PromoteLowF32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -409,8 +423,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F32x4Floor, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Trunc, Operator::kNoProperties, 1, 0, 1) \
V(F32x4NearestInt, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4DemoteF64x2Zero, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Abs, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SConvertI32x4Low, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SConvertI32x4High, Operator::kNoProperties, 1, 0, 1) \
@@ -423,6 +439,9 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ShrU, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ExtMulLowI32x4S, Operator::kCommutative, 2, 0, 1) \
V(I64x2ExtMulHighI32x4S, Operator::kCommutative, 2, 0, 1) \
@@ -464,6 +483,8 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I32x4SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I32x4ExtAddPairwiseI16x8S, Operator::kNoProperties, 1, 0, 1) \
V(I32x4ExtAddPairwiseI16x8U, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4TruncSatF64x2SZero, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4TruncSatF64x2UZero, Operator::kNoProperties, 1, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -542,11 +563,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
- V(V32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V128AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
@@ -653,6 +673,14 @@ ShiftKind ShiftKindOf(Operator const* op) {
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
+#define ATOMIC_PAIR_BINOP_LIST(V) \
+ V(Add) \
+ V(Sub) \
+ V(And) \
+ V(Or) \
+ V(Xor) \
+ V(Exchange)
+
#define SIMD_LANE_OP_LIST(V) \
V(F64x2, 2) \
V(F32x4, 4) \
@@ -673,397 +701,551 @@ ShiftKind ShiftKindOf(Operator const* op) {
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
-template <IrOpcode::Value op, int value_input_count, int effect_input_count,
- int control_input_count, int value_output_count,
- int effect_output_count, int control_output_count>
-struct CachedOperator : public Operator {
- CachedOperator(Operator::Properties properties, const char* mnemonic)
- : Operator(op, properties, mnemonic, value_input_count,
- effect_input_count, control_input_count, value_output_count,
- effect_output_count, control_output_count) {}
-};
-
-template <IrOpcode::Value op, int value_input_count, int control_input_count,
- int value_output_count>
-struct CachedPureOperator : public Operator {
- CachedPureOperator(Operator::Properties properties, const char* mnemonic)
- : Operator(op, Operator::kPure | properties, mnemonic, value_input_count,
- 0, control_input_count, value_output_count, 0, 0) {}
-};
-
-template <class Op>
-const Operator* GetCachedOperator() {
- STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
- static const Op op;
- return &op;
-}
-
-template <class Op>
-const Operator* GetCachedOperator(Operator::Properties properties,
- const char* mnemonic) {
-#ifdef DEBUG
- static Operator::Properties const initial_properties = properties;
- static const char* const initial_mnemonic = mnemonic;
- DCHECK_EQ(properties, initial_properties);
- DCHECK_EQ(mnemonic, initial_mnemonic);
-#endif
- STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
- static const Op op(properties, mnemonic);
- return &op;
-}
-
struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
explicit StackSlotOperator(int size, int alignment)
- : Operator1(IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
- "StackSlot", 0, 0, 0, 1, 0, 0,
- StackSlotRepresentation(size, alignment)) {}
-};
-
-template <int size, int alignment>
-struct CachedStackSlotOperator : StackSlotOperator {
- CachedStackSlotOperator() : StackSlotOperator(size, alignment) {}
+ : Operator1<StackSlotRepresentation>(
+ IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
+ "StackSlot", 0, 0, 0, 1, 0, 0,
+ StackSlotRepresentation(size, alignment)) {}
};
+struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
- const OptionalOperator MachineOperatorBuilder::Name() { \
- return OptionalOperator( \
- flags_ & k##Name, \
- GetCachedOperator< \
- CachedPureOperator<IrOpcode::k##Name, value_input_count, \
- control_input_count, output_count>>(properties, \
- #Name)); \
- }
-PURE_OPTIONAL_OP_LIST(PURE)
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
+ value_input_count, 0, control_input_count, output_count, 0, \
+ 0) {} \
+ }; \
+ Name##Operator k##Name;
+ MACHINE_PURE_OP_LIST(PURE)
+ struct NormalWord32SarOperator final : public Operator1<ShiftKind> {
+ NormalWord32SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord32Sar, Operator::kPure,
+ "Word32Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kNormal) {}
+ };
+ NormalWord32SarOperator kNormalWord32Sar;
+ struct ShiftOutZerosWord32SarOperator final : public Operator1<ShiftKind> {
+ ShiftOutZerosWord32SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord32Sar, Operator::kPure,
+ "Word32Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kShiftOutZeros) {}
+ };
+ ShiftOutZerosWord32SarOperator kShiftOutZerosWord32Sar;
+ struct NormalWord64SarOperator final : public Operator1<ShiftKind> {
+ NormalWord64SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord64Sar, Operator::kPure,
+ "Word64Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kNormal) {}
+ };
+ NormalWord64SarOperator kNormalWord64Sar;
+ struct ShiftOutZerosWord64SarOperator final : public Operator1<ShiftKind> {
+ ShiftOutZerosWord64SarOperator()
+ : Operator1<ShiftKind>(IrOpcode::kWord64Sar, Operator::kPure,
+ "Word64Sar", 2, 0, 0, 1, 0, 0,
+ ShiftKind::kShiftOutZeros) {}
+ };
+ ShiftOutZerosWord64SarOperator kShiftOutZerosWord64Sar;
+
+ struct ArchitectureDefaultTruncateFloat32ToUint32Operator final
+ : public Operator1<TruncateKind> {
+ ArchitectureDefaultTruncateFloat32ToUint32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToUint32,
+ Operator::kPure, "TruncateFloat32ToUint32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kArchitectureDefault) {}
+ };
+ ArchitectureDefaultTruncateFloat32ToUint32Operator
+ kArchitectureDefaultTruncateFloat32ToUint32;
+ struct SetOverflowToMinTruncateFloat32ToUint32Operator final
+ : public Operator1<TruncateKind> {
+ SetOverflowToMinTruncateFloat32ToUint32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToUint32,
+ Operator::kPure, "TruncateFloat32ToUint32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kSetOverflowToMin) {}
+ };
+ SetOverflowToMinTruncateFloat32ToUint32Operator
+ kSetOverflowToMinTruncateFloat32ToUint32;
+
+ struct ArchitectureDefaultTruncateFloat32ToInt32Operator final
+ : public Operator1<TruncateKind> {
+ ArchitectureDefaultTruncateFloat32ToInt32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToInt32,
+ Operator::kPure, "TruncateFloat32ToInt32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kArchitectureDefault) {}
+ };
+ ArchitectureDefaultTruncateFloat32ToInt32Operator
+ kArchitectureDefaultTruncateFloat32ToInt32;
+ struct SetOverflowToMinTruncateFloat32ToInt32Operator final
+ : public Operator1<TruncateKind> {
+ SetOverflowToMinTruncateFloat32ToInt32Operator()
+ : Operator1<TruncateKind>(IrOpcode::kTruncateFloat32ToInt32,
+ Operator::kPure, "TruncateFloat32ToInt32", 1,
+ 0, 0, 1, 0, 0,
+ TruncateKind::kSetOverflowToMin) {}
+ };
+ SetOverflowToMinTruncateFloat32ToInt32Operator
+ kSetOverflowToMinTruncateFloat32ToInt32;
+
+ struct ArchitectureDefaultTruncateFloat64ToInt64Operator final
+ : public Operator1<TruncateKind> {
+ ArchitectureDefaultTruncateFloat64ToInt64Operator()
+ : Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
+ "TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0,
+ TruncateKind::kArchitectureDefault) {}
+ };
+ ArchitectureDefaultTruncateFloat64ToInt64Operator
+ kArchitectureDefaultTruncateFloat64ToInt64;
+ struct SetOverflowToMinTruncateFloat64ToInt64Operator final
+ : public Operator1<TruncateKind> {
+ SetOverflowToMinTruncateFloat64ToInt64Operator()
+ : Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
+ "TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0,
+ TruncateKind::kSetOverflowToMin) {}
+ };
+ SetOverflowToMinTruncateFloat64ToInt64Operator
+ kSetOverflowToMinTruncateFloat64ToInt64;
+ PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
-#define OVERFLOW_OP(Name, properties) \
- const Operator* MachineOperatorBuilder::Name() { \
- return GetCachedOperator< \
- CachedOperator<IrOpcode::k##Name, 2, 0, 1, 2, 0, 0>>( \
- Operator::kEliminatable | Operator::kNoRead | properties, #Name); \
- }
-OVERFLOW_OP_LIST(OVERFLOW_OP)
+ struct PrefetchTemporalOperator final : public Operator {
+ PrefetchTemporalOperator()
+ : Operator(IrOpcode::kPrefetchTemporal,
+ Operator::kNoDeopt | Operator::kNoThrow, "PrefetchTemporal",
+ 2, 1, 1, 0, 1, 0) {}
+ };
+ PrefetchTemporalOperator kPrefetchTemporal;
+ struct PrefetchNonTemporalOperator final : public Operator {
+ PrefetchNonTemporalOperator()
+ : Operator(IrOpcode::kPrefetchNonTemporal,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "PrefetchNonTemporal", 2, 1, 1, 0, 1, 0) {}
+ };
+ PrefetchNonTemporalOperator kPrefetchNonTemporal;
+
+#define OVERFLOW_OP(Name, properties) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kEliminatable | Operator::kNoRead | properties, \
+ #Name, 2, 0, 1, 2, 0, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
-template <ShiftKind kind>
-struct Word32SarOperator : Operator1<ShiftKind> {
- Word32SarOperator()
- : Operator1(IrOpcode::kWord32Sar, Operator::kPure, "Word32Sar", 2, 0, 0,
- 1, 0, 0, kind) {}
-};
-
-const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
- switch (kind) {
- case ShiftKind::kNormal:
- return GetCachedOperator<Word32SarOperator<ShiftKind::kNormal>>();
- case ShiftKind::kShiftOutZeros:
- return GetCachedOperator<Word32SarOperator<ShiftKind::kShiftOutZeros>>();
- }
-}
-
-template <ShiftKind kind>
-struct Word64SarOperator : Operator1<ShiftKind> {
- Word64SarOperator()
- : Operator1(IrOpcode::kWord64Sar, Operator::kPure, "Word64Sar", 2, 0, 0,
- 1, 0, 0, kind) {}
-};
-
-const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
- switch (kind) {
- case ShiftKind::kNormal:
- return GetCachedOperator<Word64SarOperator<ShiftKind::kNormal>>();
- case ShiftKind::kShiftOutZeros:
- return GetCachedOperator<Word64SarOperator<ShiftKind::kShiftOutZeros>>();
- }
-}
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct LoadOperator : public Operator1<LoadRepresentation> {
- LoadOperator()
- : Operator1(IrOpcode::kLoad, Operator::kEliminatable, "Load", 2, 1, 1, 1,
- 1, 0, LoadRepresentation(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct PoisonedLoadOperator : public Operator1<LoadRepresentation> {
- PoisonedLoadOperator()
- : Operator1(IrOpcode::kPoisonedLoad, Operator::kEliminatable,
- "PoisonedLoad", 2, 1, 1, 1, 1, 0,
- LoadRepresentation(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct UnalignedLoadOperator : public Operator1<LoadRepresentation> {
- UnalignedLoadOperator()
- : Operator1(IrOpcode::kUnalignedLoad, Operator::kEliminatable,
- "UnalignedLoad", 2, 1, 1, 1, 1, 0,
- LoadRepresentation(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct ProtectedLoadOperator : public Operator1<LoadRepresentation> {
- ProtectedLoadOperator()
- : Operator1(IrOpcode::kProtectedLoad,
- Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2,
- 1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {}
-};
-
-template <MemoryAccessKind kind, LoadTransformation type>
-struct LoadTransformOperator : public Operator1<LoadTransformParameters> {
- LoadTransformOperator()
- : Operator1(IrOpcode::kLoadTransform,
- kind == MemoryAccessKind::kProtected
- ? Operator::kNoDeopt | Operator::kNoThrow
- : Operator::kEliminatable,
- "LoadTransform", 2, 1, 1, 1, 1, 0,
- LoadTransformParameters{kind, type}) {}
-};
-
-template <MemoryAccessKind kind, MachineRepresentation rep, MachineSemantic sem,
- uint8_t laneidx>
-struct LoadLaneOperator : public Operator1<LoadLaneParameters> {
- LoadLaneOperator()
- : Operator1(
- IrOpcode::kLoadLane,
- kind == MemoryAccessKind::kProtected
- ? Operator::kNoDeopt | Operator::kNoThrow
- : Operator::kEliminatable,
- "LoadLane", 3, 1, 1, 1, 1, 0,
- LoadLaneParameters{kind, LoadRepresentation(rep, sem), laneidx}) {}
-};
-
-template <MachineRepresentation rep, WriteBarrierKind write_barrier_kind>
-struct StoreOperator : public Operator1<StoreRepresentation> {
- StoreOperator()
- : Operator1(IrOpcode::kStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Store", 3, 1, 1, 0, 1, 0,
- StoreRepresentation(rep, write_barrier_kind)) {}
-};
-
-template <MachineRepresentation rep>
-struct UnalignedStoreOperator : public Operator1<UnalignedStoreRepresentation> {
- UnalignedStoreOperator()
- : Operator1(IrOpcode::kUnalignedStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "UnalignedStore", 3, 1, 1, 0, 1, 0, rep) {}
-};
-
-template <MachineRepresentation rep>
-struct ProtectedStoreOperator : public Operator1<StoreRepresentation> {
- ProtectedStoreOperator()
- : Operator1(IrOpcode::kProtectedStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Store", 3, 1, 1, 0, 1, 0,
- StoreRepresentation(rep, kNoWriteBarrier)) {}
-};
-
-template <MemoryAccessKind kind, MachineRepresentation rep, uint8_t laneidx>
-struct StoreLaneOperator : public Operator1<StoreLaneParameters> {
- StoreLaneOperator()
- : Operator1(IrOpcode::kStoreLane,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "StoreLane", 3, 1, 1, 0, 1, 0,
- StoreLaneParameters{kind, rep, laneidx}) {}
-};
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>(IrOpcode::kLoad, \
+ Operator::kEliminatable, "Load", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct PoisonedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ PoisonedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
+ "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct UnalignedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ UnalignedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kUnalignedLoad, Operator::kEliminatable, \
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct ProtectedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ ProtectedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kProtectedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 2, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
+ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
+ ProtectedLoad##Type##Operator kProtectedLoad##Type;
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word32AtomicLoadOperator : public Operator1<LoadRepresentation> {
- Word32AtomicLoadOperator()
- : Operator1(IrOpcode::kWord32AtomicLoad, Operator::kEliminatable,
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType(rep, sem)) {
- }
-};
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ struct KIND##LoadTransform##TYPE##Operator final \
+ : public Operator1<LoadTransformParameters> { \
+ KIND##LoadTransform##TYPE##Operator() \
+ : Operator1<LoadTransformParameters>( \
+ IrOpcode::kLoadTransform, \
+ MemoryAccessKind::k##KIND == MemoryAccessKind::kProtected \
+ ? Operator::kNoDeopt | Operator::kNoThrow \
+ : Operator::kEliminatable, \
+ #KIND "LoadTransform", 2, 1, 1, 1, 1, 0, \
+ LoadTransformParameters{MemoryAccessKind::k##KIND, \
+ LoadTransformation::k##TYPE}) {} \
+ }; \
+ KIND##LoadTransform##TYPE##Operator k##KIND##LoadTransform##TYPE;
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word64AtomicLoadOperator : public Operator1<LoadRepresentation> {
- Word64AtomicLoadOperator()
- : Operator1(IrOpcode::kWord64AtomicLoad, Operator::kEliminatable,
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType(rep, sem)) {
- }
-};
+#define LOAD_TRANSFORM(TYPE) \
+ LOAD_TRANSFORM_KIND(TYPE, Normal) \
+ LOAD_TRANSFORM_KIND(TYPE, Unaligned) \
+ LOAD_TRANSFORM_KIND(TYPE, Protected)
-template <MachineRepresentation rep>
-struct Word32AtomicStoreOperator : public Operator1<MachineRepresentation> {
- Word32AtomicStoreOperator()
- : Operator1(IrOpcode::kWord32AtomicStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Word32AtomicStore", 3, 1, 1, 0, 1, 0, rep) {}
-};
+ LOAD_TRANSFORM_LIST(LOAD_TRANSFORM)
+#undef LOAD_TRANSFORM
+#undef LOAD_TRANSFORM_KIND
-template <MachineRepresentation rep>
-struct Word64AtomicStoreOperator : public Operator1<MachineRepresentation> {
- Word64AtomicStoreOperator()
- : Operator1(IrOpcode::kWord64AtomicStore,
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
- "Word64AtomicStore", 3, 1, 1, 0, 1, 0, rep) {}
-};
+#define STACKSLOT(Size, Alignment) \
+ struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
+ : public StackSlotOperator { \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
+ : StackSlotOperator(Size, Alignment) {} \
+ }; \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
+ kStackSlotOfSize##Size##OfAlignment##Alignment;
+ STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
+#undef STACKSLOT
+
+#define STORE(Type) \
+ struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
+ explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ write_barrier_kind)) {} \
+ }; \
+ struct Store##Type##NoWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##NoWriteBarrier##Operator() \
+ : Store##Type##Operator(kNoWriteBarrier) {} \
+ }; \
+ struct Store##Type##AssertNoWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##AssertNoWriteBarrier##Operator() \
+ : Store##Type##Operator(kAssertNoWriteBarrier) {} \
+ }; \
+ struct Store##Type##MapWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##MapWriteBarrier##Operator() \
+ : Store##Type##Operator(kMapWriteBarrier) {} \
+ }; \
+ struct Store##Type##PointerWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##PointerWriteBarrier##Operator() \
+ : Store##Type##Operator(kPointerWriteBarrier) {} \
+ }; \
+ struct Store##Type##EphemeronKeyWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##EphemeronKeyWriteBarrier##Operator() \
+ : Store##Type##Operator(kEphemeronKeyWriteBarrier) {} \
+ }; \
+ struct Store##Type##FullWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##FullWriteBarrier##Operator() \
+ : Store##Type##Operator(kFullWriteBarrier) {} \
+ }; \
+ struct UnalignedStore##Type##Operator final \
+ : public Operator1<UnalignedStoreRepresentation> { \
+ UnalignedStore##Type##Operator() \
+ : Operator1<UnalignedStoreRepresentation>( \
+ IrOpcode::kUnalignedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "UnalignedStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ struct ProtectedStore##Type##Operator \
+ : public Operator1<StoreRepresentation> { \
+ explicit ProtectedStore##Type##Operator() \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kProtectedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 3, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ kNoWriteBarrier)) {} \
+ }; \
+ Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##AssertNoWriteBarrier##Operator \
+ kStore##Type##AssertNoWriteBarrier; \
+ Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
+ Store##Type##PointerWriteBarrier##Operator \
+ kStore##Type##PointerWriteBarrier; \
+ Store##Type##EphemeronKeyWriteBarrier##Operator \
+ kStore##Type##EphemeronKeyWriteBarrier; \
+ Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
+ UnalignedStore##Type##Operator kUnalignedStore##Type; \
+ ProtectedStore##Type##Operator kProtectedStore##Type;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
-#define ATOMIC_OP(op) \
- template <MachineRepresentation rep, MachineSemantic sem> \
- struct op##Operator : public Operator1<MachineType> { \
- op##Operator() \
- : Operator1(IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, \
- #op, 3, 1, 1, 1, 1, 0, MachineType(rep, sem)) {} \
- };
-ATOMIC_OP(Word32AtomicAdd)
-ATOMIC_OP(Word32AtomicSub)
-ATOMIC_OP(Word32AtomicAnd)
-ATOMIC_OP(Word32AtomicOr)
-ATOMIC_OP(Word32AtomicXor)
-ATOMIC_OP(Word32AtomicExchange)
-ATOMIC_OP(Word64AtomicAdd)
-ATOMIC_OP(Word64AtomicSub)
-ATOMIC_OP(Word64AtomicAnd)
-ATOMIC_OP(Word64AtomicOr)
-ATOMIC_OP(Word64AtomicXor)
-ATOMIC_OP(Word64AtomicExchange)
+#define ATOMIC_LOAD(Type) \
+ struct Word32AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ Word32AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_LOAD(Type) \
+ struct Word64AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ Word64AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+ ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_STORE(Type) \
+ struct Word32AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word32AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
+ ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef ATOMIC_STORE
+
+#define ATOMIC_STORE(Type) \
+ struct Word64AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word64AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef ATOMIC_STORE
+
+#define ATOMIC_OP(op, type) \
+ struct op##type##Operator : public Operator1<MachineType> { \
+ op##type##Operator() \
+ : Operator1<MachineType>(IrOpcode::k##op, \
+ Operator::kNoDeopt | Operator::kNoThrow, #op, \
+ 3, 1, 1, 1, 1, 0, MachineType::type()) {} \
+ }; \
+ op##type##Operator k##op##type;
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC_OP(Word32AtomicAdd, type) \
+ ATOMIC_OP(Word32AtomicSub, type) \
+ ATOMIC_OP(Word32AtomicAnd, type) \
+ ATOMIC_OP(Word32AtomicOr, type) \
+ ATOMIC_OP(Word32AtomicXor, type) \
+ ATOMIC_OP(Word32AtomicExchange, type)
+ ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
+#undef ATOMIC_OP_LIST
+#define ATOMIC64_OP_LIST(type) \
+ ATOMIC_OP(Word64AtomicAdd, type) \
+ ATOMIC_OP(Word64AtomicSub, type) \
+ ATOMIC_OP(Word64AtomicAnd, type) \
+ ATOMIC_OP(Word64AtomicOr, type) \
+ ATOMIC_OP(Word64AtomicXor, type) \
+ ATOMIC_OP(Word64AtomicExchange, type)
+ ATOMIC_U64_TYPE_LIST(ATOMIC64_OP_LIST)
+#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word32AtomicCompareExchangeOperator : public Operator1<MachineType> {
- Word32AtomicCompareExchangeOperator()
- : Operator1(IrOpcode::kWord32AtomicCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, 0,
- MachineType(rep, sem)) {}
-};
-
-template <MachineRepresentation rep, MachineSemantic sem>
-struct Word64AtomicCompareExchangeOperator : public Operator1<MachineType> {
- Word64AtomicCompareExchangeOperator()
- : Operator1(IrOpcode::kWord64AtomicCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, 0,
- MachineType(rep, sem)) {}
-};
-
-struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
-};
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word32AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word32AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord32AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word32AtomicCompareExchange##Type##Operator \
+ kWord32AtomicCompareExchange##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word64AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word64AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord64AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicCompareExchange##Type##Operator \
+ kWord64AtomicCompareExchange##Type;
+ ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
+ struct Word32AtomicPairLoadOperator : public Operator {
+ Word32AtomicPairLoadOperator()
+ : Operator(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
-struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
-};
+ struct Word32AtomicPairStoreOperator : public Operator {
+ Word32AtomicPairStoreOperator()
+ : Operator(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ };
+ Word32AtomicPairStoreOperator kWord32AtomicPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
Word32AtomicPair##op##Operator() \
: Operator(IrOpcode::kWord32AtomicPair##op, \
Operator::kNoDeopt | Operator::kNoThrow, \
- "Word32AtomicPair" #op, 4, 1, 1, 2, 1, 0) {} \
- };
-ATOMIC_PAIR_OP(Add)
-ATOMIC_PAIR_OP(Sub)
-ATOMIC_PAIR_OP(And)
-ATOMIC_PAIR_OP(Or)
-ATOMIC_PAIR_OP(Xor)
-ATOMIC_PAIR_OP(Exchange)
+ "Word32AtomicPair##op", 4, 1, 1, 2, 1, 0) {} \
+ }; \
+ Word32AtomicPair##op##Operator kWord32AtomicPair##op;
+ ATOMIC_PAIR_BINOP_LIST(ATOMIC_PAIR_OP)
#undef ATOMIC_PAIR_OP
+#undef ATOMIC_PAIR_BINOP_LIST
-struct Word32AtomicPairCompareExchangeOperator : public Operator {
- Word32AtomicPairCompareExchangeOperator()
- : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
-};
-
-struct MemoryBarrierOperator : public Operator {
- MemoryBarrierOperator()
- : Operator(IrOpcode::kMemoryBarrier,
- Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0, 1,
- 1, 0, 1, 0) {}
-};
+ struct Word32AtomicPairCompareExchangeOperator : public Operator {
+ Word32AtomicPairCompareExchangeOperator()
+ : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
-// The {BitcastWordToTagged} operator must not be marked as pure (especially
-// not idempotent), because otherwise the splitting logic in the Scheduler
-// might decide to split these operators, thus potentially creating live
-// ranges of allocation top across calls or other things that might allocate.
-// See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
-struct BitcastWordToTaggedOperator : public Operator {
- BitcastWordToTaggedOperator()
- : Operator(IrOpcode::kBitcastWordToTagged,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
-};
+ struct MemoryBarrierOperator : public Operator {
+ MemoryBarrierOperator()
+ : Operator(IrOpcode::kMemoryBarrier,
+ Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ MemoryBarrierOperator kMemoryBarrier;
+
+ // The {BitcastWordToTagged} operator must not be marked as pure (especially
+ // not idempotent), because otherwise the splitting logic in the Scheduler
+ // might decide to split these operators, thus potentially creating live
+ // ranges of allocation top across calls or other things that might allocate.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
+ struct BitcastWordToTaggedOperator : public Operator {
+ BitcastWordToTaggedOperator()
+ : Operator(IrOpcode::kBitcastWordToTagged,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastWordToTaggedOperator kBitcastWordToTagged;
-struct BitcastTaggedToWordOperator : public Operator {
- BitcastTaggedToWordOperator()
- : Operator(IrOpcode::kBitcastTaggedToWord,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
-};
+ struct BitcastTaggedToWordOperator : public Operator {
+ BitcastTaggedToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastTaggedToWordOperator kBitcastTaggedToWord;
-struct BitcastMaybeObjectToWordOperator : public Operator {
- BitcastMaybeObjectToWordOperator()
- : Operator(IrOpcode::kBitcastTaggedToWord,
- Operator::kEliminatable | Operator::kNoWrite,
- "BitcastMaybeObjectToWord", 1, 1, 1, 1, 1, 0) {}
-};
+ struct BitcastMaybeObjectToWordOperator : public Operator {
+ BitcastMaybeObjectToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastMaybeObjectToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
-struct TaggedPoisonOnSpeculationOperator : public Operator {
- TaggedPoisonOnSpeculationOperator()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
-};
+ struct TaggedPoisonOnSpeculation : public Operator {
+ TaggedPoisonOnSpeculation()
+ : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ };
+ TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
-struct Word32PoisonOnSpeculationOperator : public Operator {
- Word32PoisonOnSpeculationOperator()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
-};
+ struct Word32PoisonOnSpeculation : public Operator {
+ Word32PoisonOnSpeculation()
+ : Operator(IrOpcode::kWord32PoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ };
+ Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
-struct Word64PoisonOnSpeculationOperator : public Operator {
- Word64PoisonOnSpeculationOperator()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
-};
+ struct Word64PoisonOnSpeculation : public Operator {
+ Word64PoisonOnSpeculation()
+ : Operator(IrOpcode::kWord64PoisonOnSpeculation,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
+ };
+ Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
-struct AbortCSAAssertOperator : public Operator {
- AbortCSAAssertOperator()
- : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
- "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
-};
+ struct AbortCSAAssertOperator : public Operator {
+ AbortCSAAssertOperator()
+ : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
+ "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
+ };
+ AbortCSAAssertOperator kAbortCSAAssert;
-struct DebugBreakOperator : public Operator {
- DebugBreakOperator()
- : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0, 1,
- 1, 0, 1, 0) {}
-};
+ struct DebugBreakOperator : public Operator {
+ DebugBreakOperator()
+ : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ DebugBreakOperator kDebugBreak;
-struct UnsafePointerAddOperator : public Operator {
- UnsafePointerAddOperator()
- : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
- "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
-};
+ struct UnsafePointerAddOperator final : public Operator {
+ UnsafePointerAddOperator()
+ : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
+ "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
+ };
+ UnsafePointerAddOperator kUnsafePointerAdd;
-template <StackCheckKind kind>
-struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
- StackPointerGreaterThanOperator()
- : Operator1(IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
- "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
+ struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
+ explicit StackPointerGreaterThanOperator(StackCheckKind kind)
+ : Operator1<StackCheckKind>(
+ IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
+ "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
+ };
+#define STACK_POINTER_GREATER_THAN(Kind) \
+ struct StackPointerGreaterThan##Kind##Operator final \
+ : public StackPointerGreaterThanOperator { \
+ StackPointerGreaterThan##Kind##Operator() \
+ : StackPointerGreaterThanOperator(StackCheckKind::k##Kind) {} \
+ }; \
+ StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind;
+
+ STACK_POINTER_GREATER_THAN(JSFunctionEntry)
+ STACK_POINTER_GREATER_THAN(JSIterationBody)
+ STACK_POINTER_GREATER_THAN(CodeStubAssembler)
+ STACK_POINTER_GREATER_THAN(Wasm)
+#undef STACK_POINTER_GREATER_THAN
};
struct CommentOperator : public Operator1<const char*> {
explicit CommentOperator(const char* msg)
- : Operator1(IrOpcode::kComment, Operator::kNoThrow | Operator::kNoWrite,
- "Comment", 0, 1, 1, 0, 1, 0, msg) {}
+ : Operator1<const char*>(IrOpcode::kComment,
+ Operator::kNoThrow | Operator::kNoWrite,
+ "Comment", 0, 1, 1, 0, 1, 0, msg) {}
};
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(MachineOperatorGlobalCache,
+ GetMachineOperatorGlobalCache)
+}
+
MachineOperatorBuilder::MachineOperatorBuilder(
Zone* zone, MachineRepresentation word, Flags flags,
AlignmentRequirements alignmentRequirements)
: zone_(zone),
+ cache_(*GetMachineOperatorGlobalCache()),
word_(word),
flags_(flags),
alignment_requirements_(alignmentRequirements) {
@@ -1072,11 +1254,9 @@ MachineOperatorBuilder::MachineOperatorBuilder(
}
const Operator* MachineOperatorBuilder::UnalignedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- UnalignedLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kUnalignedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1088,8 +1268,7 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
switch (rep) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \
- return GetCachedOperator< \
- UnalignedStoreOperator<MachineRepresentation::kRep>>();
+ return &cache_.kUnalignedStore##kRep;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
@@ -1099,103 +1278,85 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
UNREACHABLE();
}
-template <TruncateKind kind>
-struct TruncateFloat32ToUint32Operator : Operator1<TruncateKind> {
- TruncateFloat32ToUint32Operator()
- : Operator1(IrOpcode::kTruncateFloat32ToUint32, Operator::kPure,
- "TruncateFloat32ToUint32", 1, 0, 0, 1, 0, 0, kind) {}
-};
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+MACHINE_PURE_OP_LIST(PURE)
+#undef PURE
-const Operator* MachineOperatorBuilder::TruncateFloat32ToUint32(
- TruncateKind kind) {
+const Operator* MachineOperatorBuilder::Word32Sar(ShiftKind kind) {
switch (kind) {
- case TruncateKind::kArchitectureDefault:
- return GetCachedOperator<TruncateFloat32ToUint32Operator<
- TruncateKind::kArchitectureDefault>>();
- case TruncateKind::kSetOverflowToMin:
- return GetCachedOperator<
- TruncateFloat32ToUint32Operator<TruncateKind::kSetOverflowToMin>>();
+ case ShiftKind::kNormal:
+ return &cache_.kNormalWord32Sar;
+ case ShiftKind::kShiftOutZeros:
+ return &cache_.kShiftOutZerosWord32Sar;
}
}
-template <TruncateKind kind>
-struct TruncateFloat32ToInt32Operator : Operator1<TruncateKind> {
- TruncateFloat32ToInt32Operator()
- : Operator1(IrOpcode::kTruncateFloat32ToInt32, Operator::kPure,
- "TruncateFloat32ToInt32", 1, 0, 0, 1, 0, 0, kind) {}
-};
+const Operator* MachineOperatorBuilder::Word64Sar(ShiftKind kind) {
+ switch (kind) {
+ case ShiftKind::kNormal:
+ return &cache_.kNormalWord64Sar;
+ case ShiftKind::kShiftOutZeros:
+ return &cache_.kShiftOutZerosWord64Sar;
+ }
+}
-const Operator* MachineOperatorBuilder::TruncateFloat32ToInt32(
+const Operator* MachineOperatorBuilder::TruncateFloat32ToUint32(
TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
- return GetCachedOperator<
- TruncateFloat32ToInt32Operator<TruncateKind::kArchitectureDefault>>();
+ return &cache_.kArchitectureDefaultTruncateFloat32ToUint32;
case TruncateKind::kSetOverflowToMin:
- return GetCachedOperator<
- TruncateFloat32ToInt32Operator<TruncateKind::kSetOverflowToMin>>();
+ return &cache_.kSetOverflowToMinTruncateFloat32ToUint32;
}
}
-template <TruncateKind kind>
-struct TruncateFloat64ToInt64Operator : Operator1<TruncateKind> {
- TruncateFloat64ToInt64Operator()
- : Operator1(IrOpcode::kTruncateFloat64ToInt64, Operator::kPure,
- "TruncateFloat64ToInt64", 1, 0, 0, 1, 0, 0, kind) {}
-};
-
const Operator* MachineOperatorBuilder::TruncateFloat64ToInt64(
TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
- return GetCachedOperator<
- TruncateFloat64ToInt64Operator<TruncateKind::kArchitectureDefault>>();
+ return &cache_.kArchitectureDefaultTruncateFloat64ToInt64;
case TruncateKind::kSetOverflowToMin:
- return GetCachedOperator<
- TruncateFloat64ToInt64Operator<TruncateKind::kSetOverflowToMin>>();
+ return &cache_.kSetOverflowToMinTruncateFloat64ToInt64;
}
}
-size_t hash_value(TruncateKind kind) { return static_cast<size_t>(kind); }
-
-std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
+const Operator* MachineOperatorBuilder::TruncateFloat32ToInt32(
+ TruncateKind kind) {
switch (kind) {
case TruncateKind::kArchitectureDefault:
- return os << "kArchitectureDefault";
+ return &cache_.kArchitectureDefaultTruncateFloat32ToInt32;
case TruncateKind::kSetOverflowToMin:
- return os << "kSetOverflowToMin";
+ return &cache_.kSetOverflowToMinTruncateFloat32ToInt32;
}
}
-#define PURE(Name, properties, value_input_count, control_input_count, \
- output_count) \
- const Operator* MachineOperatorBuilder::Name() { \
- return GetCachedOperator< \
- CachedPureOperator<IrOpcode::k##Name, value_input_count, \
- control_input_count, output_count>>(properties, \
- #Name); \
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const OptionalOperator MachineOperatorBuilder::Name() { \
+ return OptionalOperator(flags_ & k##Name, &cache_.k##Name); \
}
-MACHINE_PURE_OP_LIST(PURE)
+PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
const Operator* MachineOperatorBuilder::PrefetchTemporal() {
- return GetCachedOperator<
- CachedOperator<IrOpcode::kPrefetchTemporal, 2, 1, 1, 0, 1, 0>>(
- Operator::kNoDeopt | Operator::kNoThrow, "PrefetchTemporal");
+ return &cache_.kPrefetchTemporal;
}
const Operator* MachineOperatorBuilder::PrefetchNonTemporal() {
- return GetCachedOperator<
- CachedOperator<IrOpcode::kPrefetchNonTemporal, 2, 1, 1, 0, 1, 0>>(
- Operator::kNoDeopt | Operator::kNoThrow, "PrefetchNonTemporal");
+ return &cache_.kPrefetchNonTemporal;
}
+#define OVERFLOW_OP(Name, properties) \
+ const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
+
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- LoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1203,11 +1364,9 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- PoisonedLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kPoisonedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1215,11 +1374,9 @@ const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- ProtectedLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kProtectedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -1228,11 +1385,10 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
const Operator* MachineOperatorBuilder::LoadTransform(
MemoryAccessKind kind, LoadTransformation transform) {
-#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
- if (kind == MemoryAccessKind::k##KIND && \
- transform == LoadTransformation::k##TYPE) { \
- return GetCachedOperator<LoadTransformOperator< \
- MemoryAccessKind::k##KIND, LoadTransformation::k##TYPE>>(); \
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ transform == LoadTransformation::k##TYPE) { \
+ return &cache_.k##KIND##LoadTransform##TYPE; \
}
#define LOAD_TRANSFORM(TYPE) \
LOAD_TRANSFORM_KIND(TYPE, Normal) \
@@ -1251,9 +1407,14 @@ const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
#define LOAD_LANE_KIND(TYPE, KIND, LANEIDX) \
if (kind == MemoryAccessKind::k##KIND && rep == MachineType::TYPE() && \
laneidx == LANEIDX) { \
- return GetCachedOperator<LoadLaneOperator< \
- MemoryAccessKind::k##KIND, MachineType::TYPE().representation(), \
- MachineType::TYPE().semantic(), LANEIDX>>(); \
+ return zone_->New<Operator1<LoadLaneParameters>>( \
+ IrOpcode::kLoadLane, \
+ MemoryAccessKind::k##KIND == MemoryAccessKind::kProtected \
+ ? Operator::kNoDeopt | Operator::kNoThrow \
+ : Operator::kEliminatable, \
+ "LoadLane", 3, 1, 1, 1, 1, 0, \
+ LoadLaneParameters{MemoryAccessKind::k##KIND, \
+ LoadRepresentation::TYPE(), LANEIDX}); \
}
#define LOAD_LANE_T(T, LANE) \
@@ -1282,11 +1443,15 @@ const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
MachineRepresentation rep,
uint8_t laneidx) {
-#define STORE_LANE_KIND(REP, KIND, LANEIDX) \
- if (kind == MemoryAccessKind::k##KIND && \
- rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
- return GetCachedOperator<StoreLaneOperator< \
- MemoryAccessKind::k##KIND, MachineRepresentation::REP, LANEIDX>>(); \
+#define STORE_LANE_KIND(REP, KIND, LANEIDX) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
+ return zone_->New<Operator1<StoreLaneParameters>>( \
+ IrOpcode::kStoreLane, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "StoreLane", 3, 1, 1, 0, 1, 0, \
+ StoreLaneParameters{MemoryAccessKind::k##KIND, \
+ MachineRepresentation::REP, LANEIDX}); \
}
#define STORE_LANE_T(T, LANE) \
@@ -1315,9 +1480,9 @@ const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
-#define CASE_CACHED_SIZE(Size, Alignment) \
- if (size == Size && alignment == Alignment) { \
- return GetCachedOperator<CachedStackSlotOperator<Size, Alignment>>(); \
+#define CASE_CACHED_SIZE(Size, Alignment) \
+ if (size == Size && alignment == Alignment) { \
+ return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
}
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
@@ -1333,28 +1498,22 @@ const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- switch (store_rep.write_barrier_kind()) { \
- case kNoWriteBarrier: \
- return GetCachedOperator< \
- StoreOperator<MachineRepresentation::kRep, kNoWriteBarrier>>(); \
- case kAssertNoWriteBarrier: \
- return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
- kAssertNoWriteBarrier>>(); \
- case kMapWriteBarrier: \
- return GetCachedOperator< \
- StoreOperator<MachineRepresentation::kRep, kMapWriteBarrier>>(); \
- case kPointerWriteBarrier: \
- return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
- kPointerWriteBarrier>>(); \
- case kEphemeronKeyWriteBarrier: \
- return GetCachedOperator<StoreOperator<MachineRepresentation::kRep, \
- kEphemeronKeyWriteBarrier>>(); \
- case kFullWriteBarrier: \
- return GetCachedOperator< \
- StoreOperator<MachineRepresentation::kRep, kFullWriteBarrier>>(); \
- } \
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ switch (store_rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return &cache_.k##Store##kRep##NoWriteBarrier; \
+ case kAssertNoWriteBarrier: \
+ return &cache_.k##Store##kRep##AssertNoWriteBarrier; \
+ case kMapWriteBarrier: \
+ return &cache_.k##Store##kRep##MapWriteBarrier; \
+ case kPointerWriteBarrier: \
+ return &cache_.k##Store##kRep##PointerWriteBarrier; \
+ case kEphemeronKeyWriteBarrier: \
+ return &cache_.k##Store##kRep##EphemeronKeyWriteBarrier; \
+ case kFullWriteBarrier: \
+ return &cache_.k##Store##kRep##FullWriteBarrier; \
+ } \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1368,10 +1527,9 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
const Operator* MachineOperatorBuilder::ProtectedStore(
MachineRepresentation rep) {
switch (rep) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- return GetCachedOperator< \
- ProtectedStoreOperator<MachineRepresentation::kRep>>(); \
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kProtectedStore##kRep; \
break;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1383,46 +1541,42 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
}
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
- return GetCachedOperator<UnsafePointerAddOperator>();
+ return &cache_.kUnsafePointerAdd;
}
const Operator* MachineOperatorBuilder::StackPointerGreaterThan(
StackCheckKind kind) {
switch (kind) {
case StackCheckKind::kJSFunctionEntry:
- return GetCachedOperator<
- StackPointerGreaterThanOperator<StackCheckKind::kJSFunctionEntry>>();
+ return &cache_.kStackPointerGreaterThanJSFunctionEntry;
case StackCheckKind::kJSIterationBody:
- return GetCachedOperator<
- StackPointerGreaterThanOperator<StackCheckKind::kJSIterationBody>>();
+ return &cache_.kStackPointerGreaterThanJSIterationBody;
case StackCheckKind::kCodeStubAssembler:
- return GetCachedOperator<StackPointerGreaterThanOperator<
- StackCheckKind::kCodeStubAssembler>>();
+ return &cache_.kStackPointerGreaterThanCodeStubAssembler;
case StackCheckKind::kWasm:
- return GetCachedOperator<
- StackPointerGreaterThanOperator<StackCheckKind::kWasm>>();
+ return &cache_.kStackPointerGreaterThanWasm;
}
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
- return GetCachedOperator<BitcastWordToTaggedOperator>();
+ return &cache_.kBitcastWordToTagged;
}
const Operator* MachineOperatorBuilder::BitcastTaggedToWord() {
- return GetCachedOperator<BitcastTaggedToWordOperator>();
+ return &cache_.kBitcastTaggedToWord;
}
const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
- return GetCachedOperator<BitcastMaybeObjectToWordOperator>();
+ return &cache_.kBitcastMaybeObjectToWord;
}
const Operator* MachineOperatorBuilder::AbortCSAAssert() {
- return GetCachedOperator<AbortCSAAssertOperator>();
+ return &cache_.kAbortCSAAssert;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
- return GetCachedOperator<DebugBreakOperator>();
+ return &cache_.kDebugBreak;
}
const Operator* MachineOperatorBuilder::Comment(const char* msg) {
@@ -1430,16 +1584,14 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
}
const Operator* MachineOperatorBuilder::MemBarrier() {
- return GetCachedOperator<MemoryBarrierOperator>();
+ return &cache_.kMemoryBarrier;
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord32AtomicLoad##Type; \
}
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
@@ -1448,10 +1600,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicLoad(
const Operator* MachineOperatorBuilder::Word32AtomicStore(
MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return GetCachedOperator< \
- Word32AtomicStoreOperator<MachineRepresentation::kRep>>(); \
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord32AtomicStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1459,11 +1610,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicStore(
}
const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
-#define EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicExchangeOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicExchange##kType; \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -1472,11 +1621,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
MachineType type) {
-#define COMPARE_EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator<Word32AtomicCompareExchangeOperator< \
- MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicCompareExchange##kType; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
@@ -1484,11 +1631,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
}
const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
-#define ADD(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicAddOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAdd##kType; \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
@@ -1496,11 +1641,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
-#define SUB(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicSubOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicSub##kType; \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
@@ -1508,11 +1651,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
-#define AND(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicAndOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAnd##kType; \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
@@ -1520,11 +1661,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
-#define OR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicOrOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicOr##kType; \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
@@ -1532,11 +1671,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
-#define XOR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word32AtomicXorOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicXor##kType; \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
@@ -1545,11 +1682,9 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicLoadOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord64AtomicLoad##Type; \
}
ATOMIC_U64_TYPE_LIST(LOAD)
#undef LOAD
@@ -1558,10 +1693,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicLoad(
const Operator* MachineOperatorBuilder::Word64AtomicStore(
MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return GetCachedOperator< \
- Word64AtomicStoreOperator<MachineRepresentation::kRep>>(); \
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord64AtomicStore##kRep; \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -1569,11 +1703,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicStore(
}
const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
-#define ADD(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicAddOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAdd##kType; \
}
ATOMIC_U64_TYPE_LIST(ADD)
#undef ADD
@@ -1581,11 +1713,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
-#define SUB(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicSubOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicSub##kType; \
}
ATOMIC_U64_TYPE_LIST(SUB)
#undef SUB
@@ -1593,11 +1723,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
-#define AND(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicAndOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAnd##kType; \
}
ATOMIC_U64_TYPE_LIST(AND)
#undef AND
@@ -1605,11 +1733,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
-#define OR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicOrOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicOr##kType; \
}
ATOMIC_U64_TYPE_LIST(OR)
#undef OR
@@ -1617,11 +1743,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
-#define XOR(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicXorOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicXor##kType; \
}
ATOMIC_U64_TYPE_LIST(XOR)
#undef XOR
@@ -1629,11 +1753,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
-#define EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator< \
- Word64AtomicExchangeOperator<MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicExchange##kType; \
}
ATOMIC_U64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -1642,11 +1764,9 @@ const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
MachineType type) {
-#define COMPARE_EXCHANGE(Type) \
- if (type == MachineType::Type()) { \
- return GetCachedOperator<Word64AtomicCompareExchangeOperator< \
- MachineType::Type().representation(), \
- MachineType::Type().semantic()>>(); \
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicCompareExchange##kType; \
}
ATOMIC_U64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
@@ -1654,51 +1774,51 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
}
const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return GetCachedOperator<Word32AtomicPairLoadOperator>();
+ return &cache_.kWord32AtomicPairLoad;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return GetCachedOperator<Word32AtomicPairStoreOperator>();
+ return &cache_.kWord32AtomicPairStore;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
- return GetCachedOperator<Word32AtomicPairAddOperator>();
+ return &cache_.kWord32AtomicPairAdd;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairSub() {
- return GetCachedOperator<Word32AtomicPairSubOperator>();
+ return &cache_.kWord32AtomicPairSub;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAnd() {
- return GetCachedOperator<Word32AtomicPairAndOperator>();
+ return &cache_.kWord32AtomicPairAnd;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairOr() {
- return GetCachedOperator<Word32AtomicPairOrOperator>();
+ return &cache_.kWord32AtomicPairOr;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
- return GetCachedOperator<Word32AtomicPairXorOperator>();
+ return &cache_.kWord32AtomicPairXor;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairExchange() {
- return GetCachedOperator<Word32AtomicPairExchangeOperator>();
+ return &cache_.kWord32AtomicPairExchange;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
- return GetCachedOperator<Word32AtomicPairCompareExchangeOperator>();
+ return &cache_.kWord32AtomicPairCompareExchange;
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return GetCachedOperator<TaggedPoisonOnSpeculationOperator>();
+ return &cache_.kTaggedPoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return GetCachedOperator<Word32PoisonOnSpeculationOperator>();
+ return &cache_.kWord32PoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return GetCachedOperator<Word64PoisonOnSpeculationOperator>();
+ return &cache_.kWord64PoisonOnSpeculation;
}
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index eceeb623b0..c798580845 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -18,6 +18,7 @@ namespace internal {
namespace compiler {
// Forward declarations.
+struct MachineOperatorGlobalCache;
class Operator;
@@ -625,6 +626,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Floor();
const Operator* F64x2Trunc();
const Operator* F64x2NearestInt();
+ const Operator* F64x2ConvertLowI32x4S();
+ const Operator* F64x2ConvertLowI32x4U();
+ const Operator* F64x2PromoteLowF32x4();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -655,12 +659,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Floor();
const Operator* F32x4Trunc();
const Operator* F32x4NearestInt();
+ const Operator* F32x4DemoteF64x2Zero();
const Operator* I64x2Splat();
const Operator* I64x2SplatI32Pair();
const Operator* I64x2ExtractLane(int32_t);
const Operator* I64x2ReplaceLane(int32_t);
const Operator* I64x2ReplaceLaneI32Pair(int32_t);
+ const Operator* I64x2Abs();
const Operator* I64x2Neg();
const Operator* I64x2SConvertI32x4Low();
const Operator* I64x2SConvertI32x4High();
@@ -673,6 +679,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I64x2Sub();
const Operator* I64x2Mul();
const Operator* I64x2Eq();
+ const Operator* I64x2Ne();
+ const Operator* I64x2GtS();
+ const Operator* I64x2GeS();
const Operator* I64x2ShrU();
const Operator* I64x2ExtMulLowI32x4S();
const Operator* I64x2ExtMulHighI32x4S();
@@ -718,6 +727,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4SignSelect();
const Operator* I32x4ExtAddPairwiseI16x8S();
const Operator* I32x4ExtAddPairwiseI16x8U();
+ const Operator* I32x4TruncSatF64x2SZero();
+ const Operator* I32x4TruncSatF64x2UZero();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -813,11 +824,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16Swizzle();
const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* V32x4AnyTrue();
+ const Operator* V128AnyTrue();
+ const Operator* V64x2AllTrue();
const Operator* V32x4AllTrue();
- const Operator* V16x8AnyTrue();
const Operator* V16x8AllTrue();
- const Operator* V8x16AnyTrue();
const Operator* V8x16AllTrue();
// load [base + index]
@@ -981,6 +991,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
private:
Zone* zone_;
+ MachineOperatorGlobalCache const& cache_;
MachineRepresentation const word_;
Flags const flags_;
AlignmentRequirements const alignment_requirements_;
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 858cec5cb3..0208b3ec5f 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -41,7 +41,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kRetain:
case IrOpcode::kStackPointerGreaterThan:
case IrOpcode::kStaticAssert:
- // TODO(tebbi): Store nodes might do a bump-pointer allocation.
+ // TODO(turbofan): Store nodes might do a bump-pointer allocation.
// We should introduce a special bump-pointer store node to
// differentiate that.
case IrOpcode::kStore:
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index a342afc94d..a1e254d333 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -309,7 +309,7 @@ struct BinopMatcher : public NodeMatcher {
protected:
void SwapInputs() {
std::swap(left_, right_);
- // TODO(tebbi): This modification should notify the reducers using
+ // TODO(turbofan): This modification should notify the reducers using
// BinopMatcher. Alternatively, all reducers (especially value numbering)
// could ignore the ordering for commutative binops.
node()->ReplaceInput(0, left().node());
diff --git a/deps/v8/src/compiler/node-observer.cc b/deps/v8/src/compiler/node-observer.cc
new file mode 100644
index 0000000000..52953dbabc
--- /dev/null
+++ b/deps/v8/src/compiler/node-observer.cc
@@ -0,0 +1,61 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-observer.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ObservableNodeState::ObservableNodeState(const Node* node, Zone* zone)
+ : id_(node->id()),
+ op_(node->op()),
+ type_(NodeProperties::GetTypeOrAny(node)) {}
+
+void ObserveNodeManager::StartObserving(Node* node, NodeObserver* observer) {
+ DCHECK_NOT_NULL(node);
+ DCHECK_NOT_NULL(observer);
+ DCHECK(observations_.find(node->id()) == observations_.end());
+
+ observer->set_has_observed_changes();
+ NodeObserver::Observation observation = observer->OnNodeCreated(node);
+ if (observation == NodeObserver::Observation::kContinue) {
+ observations_[node->id()] =
+ zone_->New<NodeObservation>(observer, node, zone_);
+ } else {
+ DCHECK_EQ(observation, NodeObserver::Observation::kStop);
+ }
+}
+
+void ObserveNodeManager::OnNodeChanged(const char* reducer_name,
+ const Node* old_node,
+ const Node* new_node) {
+ const auto it = observations_.find(old_node->id());
+ if (it == observations_.end()) return;
+
+ ObservableNodeState new_state{new_node, zone_};
+ NodeObservation* observation = it->second;
+ if (observation->state == new_state) return;
+
+ ObservableNodeState old_state = observation->state;
+ observation->state = new_state;
+
+ NodeObserver::Observation result =
+ observation->observer->OnNodeChanged(reducer_name, new_node, old_state);
+ if (result == NodeObserver::Observation::kStop) {
+ observations_.erase(old_node->id());
+ } else {
+ DCHECK_EQ(result, NodeObserver::Observation::kContinue);
+ if (old_node != new_node) {
+ observations_.erase(old_node->id());
+ observations_[new_node->id()] = observation;
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/node-observer.h b/deps/v8/src/compiler/node-observer.h
new file mode 100644
index 0000000000..8978156464
--- /dev/null
+++ b/deps/v8/src/compiler/node-observer.h
@@ -0,0 +1,130 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file declares the implementation of a new intrinsic %ObserveNode(expr),
+// which has noop semantics but triggers the invocation of callbacks on a
+// NodeObserver object. The NodeObserver is set on the OptimizedCompilationInfo
+// and callbacks are called when the node generated for 'expr' is created or
+// changed in any phase, until EffectControlLinearization.
+//
+// The modifications currently observed are changes to the observed Node
+// operator and type and its replacement with another Node.
+//
+// This provides the infrastructure to write unit tests that check for the
+// construction of or the lowering to specific nodes in the TurboFan graphs.
+
+#ifndef V8_COMPILER_NODE_OBSERVER_H_
+#define V8_COMPILER_NODE_OBSERVER_H_
+
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+class Operator;
+
+class ObservableNodeState {
+ public:
+ ObservableNodeState(const Node* node, Zone* zone);
+
+ uint32_t id() const { return id_; }
+ const Operator* op() const { return op_; }
+ int16_t opcode() const { return op_->opcode(); }
+ Type type() const { return type_; }
+
+ private:
+ uint32_t id_;
+ const Operator* op_;
+ Type type_;
+};
+
+inline bool operator==(const ObservableNodeState& lhs,
+ const ObservableNodeState& rhs) {
+ return lhs.id() == rhs.id() && lhs.op() == rhs.op() &&
+ lhs.type() == rhs.type();
+}
+
+inline bool operator!=(const ObservableNodeState& lhs,
+ const ObservableNodeState& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+class NodeObserver : public ZoneObject {
+ public:
+ enum class Observation {
+ kContinue,
+ kStop,
+ };
+
+ NodeObserver() = default;
+ virtual ~NodeObserver() = 0;
+
+ NodeObserver(const NodeObserver&) = delete;
+ NodeObserver& operator=(const NodeObserver&) = delete;
+
+ virtual Observation OnNodeCreated(const Node* node) {
+ return Observation::kContinue;
+ }
+
+ virtual Observation OnNodeChanged(const char* reducer_name, const Node* node,
+ const ObservableNodeState& old_state) {
+ return Observation::kContinue;
+ }
+
+ void set_has_observed_changes() { has_observed_changes_ = true; }
+ bool has_observed_changes() const { return has_observed_changes_; }
+
+ private:
+ bool has_observed_changes_ = false;
+};
+inline NodeObserver::~NodeObserver() = default;
+
+struct NodeObservation : public ZoneObject {
+ NodeObservation(NodeObserver* node_observer, const Node* node, Zone* zone)
+ : observer(node_observer), state(node, zone) {
+ DCHECK_NOT_NULL(node_observer);
+ }
+
+ NodeObserver* observer;
+ ObservableNodeState state;
+};
+
+class ObserveNodeManager : public ZoneObject {
+ public:
+ explicit ObserveNodeManager(Zone* zone) : zone_(zone), observations_(zone) {}
+
+ void StartObserving(Node* node, NodeObserver* observer);
+ void OnNodeChanged(const char* reducer_name, const Node* old_node,
+ const Node* new_node);
+
+ private:
+ Zone* zone_;
+ ZoneMap<NodeId, NodeObservation*> observations_;
+};
+
+struct ObserveNodeInfo {
+ ObserveNodeInfo() : observe_node_manager(nullptr), node_observer(nullptr) {}
+ ObserveNodeInfo(ObserveNodeManager* manager, NodeObserver* observer)
+ : observe_node_manager(manager), node_observer(observer) {}
+
+ void StartObserving(Node* node) const {
+ if (observe_node_manager) {
+ DCHECK_NOT_NULL(node_observer);
+ observe_node_manager->StartObserving(node, node_observer);
+ }
+ }
+
+ ObserveNodeManager* observe_node_manager;
+ NodeObserver* node_observer;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_OBSERVER_H_
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index bc25b83d92..3219c216c6 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -568,11 +568,10 @@ Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
}
// static
-Type NodeProperties::GetTypeOrAny(Node* node) {
+Type NodeProperties::GetTypeOrAny(const Node* node) {
return IsTyped(node) ? node->type() : Type::Any();
}
-
// static
bool NodeProperties::AllValueInputsAreTyped(Node* node) {
int input_count = node->op()->ValueInputCount();
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 059db4f5cb..d08d328bee 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -21,7 +21,7 @@ class Operator;
class CommonOperatorBuilder;
// A facade that simplifies access to the different kinds of inputs to a node.
-class V8_EXPORT_PRIVATE NodeProperties final {
+class V8_EXPORT_PRIVATE NodeProperties {
public:
// ---------------------------------------------------------------------------
// Input layout.
@@ -244,12 +244,12 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// ---------------------------------------------------------------------------
// Type.
- static bool IsTyped(Node* node) { return !node->type().IsInvalid(); }
+ static bool IsTyped(const Node* node) { return !node->type().IsInvalid(); }
static Type GetType(Node* node) {
DCHECK(IsTyped(node));
return node->type();
}
- static Type GetTypeOrAny(Node* node);
+ static Type GetTypeOrAny(const Node* node);
static void SetType(Node* node, Type type) {
DCHECK(!type.IsInvalid());
node->set_type(type);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 8525fa0b01..912c0966d1 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -499,3 +499,7 @@ bool Node::Uses::empty() const { return begin() == end(); }
} // namespace compiler
} // namespace internal
} // namespace v8
+
+V8_EXPORT_PRIVATE extern void _v8_internal_Node_Print(void* object) {
+ reinterpret_cast<i::compiler::Node*>(object)->Print();
+}
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 823bee4597..117bea7212 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -367,25 +367,6 @@ class Control : public NodeWrapper {
}
};
-class FrameState : public NodeWrapper {
- public:
- explicit constexpr FrameState(Node* node) : NodeWrapper(node) {
- // TODO(jgruber): Disallow kStart (needed for PromiseConstructorBasic unit
- // test, among others).
- SLOW_DCHECK(node->opcode() == IrOpcode::kFrameState ||
- node->opcode() == IrOpcode::kStart);
- }
-
- // Duplicating here from frame-states.h for ease of access and to keep
- // header include-balls small. Equality of the two constants is
- // static-asserted elsewhere.
- static constexpr int kFrameStateOuterStateInput = 5;
-
- FrameState outer_frame_state() const {
- return FrameState{node()->InputAt(kFrameStateOuterStateInput)};
- }
-};
-
// Typedefs to shorten commonly used Node containers.
using NodeDeque = ZoneDeque<Node*>;
using NodeSet = ZoneSet<Node*>;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index cf65864e8d..bd2011ada2 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -196,7 +196,8 @@
V(JSCall) \
V(JSCallForwardVarargs) \
V(JSCallWithArrayLike) \
- V(JSCallWithSpread)
+ V(JSCallWithSpread) \
+ V(JSWasmCall)
#define JS_CONSTRUCT_OP_LIST(V) \
V(JSConstructForwardVarargs) \
@@ -393,7 +394,6 @@
#define SIMPLIFIED_OTHER_OP_LIST(V) \
V(Allocate) \
V(AllocateRaw) \
- V(ArgumentsFrame) \
V(ArgumentsLength) \
V(AssertType) \
V(BooleanNot) \
@@ -783,6 +783,9 @@
V(F64x2Floor) \
V(F64x2Trunc) \
V(F64x2NearestInt) \
+ V(F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -814,11 +817,13 @@
V(F32x4Floor) \
V(F32x4Trunc) \
V(F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero) \
V(I64x2Splat) \
V(I64x2SplatI32Pair) \
V(I64x2ExtractLane) \
V(I64x2ReplaceLane) \
V(I64x2ReplaceLaneI32Pair) \
+ V(I64x2Abs) \
V(I64x2Neg) \
V(I64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High) \
@@ -831,6 +836,9 @@
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
V(I64x2ShrU) \
V(I64x2ExtMulLowI32x4S) \
V(I64x2ExtMulHighI32x4S) \
@@ -878,6 +886,8 @@
V(I32x4SignSelect) \
V(I32x4ExtAddPairwiseI16x8S) \
V(I32x4ExtAddPairwiseI16x8U) \
+ V(I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -973,11 +983,10 @@
V(S128AndNot) \
V(I8x16Swizzle) \
V(I8x16Shuffle) \
- V(V32x4AnyTrue) \
+ V(V128AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
- V(V16x8AnyTrue) \
V(V16x8AllTrue) \
- V(V8x16AnyTrue) \
V(V8x16AllTrue) \
V(LoadTransform) \
V(PrefetchTemporal) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index a8e29416b5..8c72ae3c72 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -224,6 +224,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCall:
case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
+ case IrOpcode::kJSWasmCall:
// Misc operations
case IrOpcode::kJSAsyncFunctionEnter:
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 3239eb0269..e47441208f 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -68,6 +68,8 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Operator(const Operator&) = delete;
Operator& operator=(const Operator&) = delete;
+ virtual ~Operator() = default;
+
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
// the opcode is stored directly in the operator object.
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index d497fc5669..d8ebe23abd 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -16,9 +16,9 @@ namespace compiler {
OsrHelper::OsrHelper(OptimizedCompilationInfo* info)
: parameter_count_(info->bytecode_array()->parameter_count()),
- stack_slot_count_(InterpreterFrameConstants::RegisterStackSlotCount(
+ stack_slot_count_(UnoptimizedFrameConstants::RegisterStackSlotCount(
info->bytecode_array()->register_count()) +
- InterpreterFrameConstants::kExtraSlotCount) {}
+ UnoptimizedFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) {
// The optimized frame will subsume the unoptimized frame. Do so by reserving
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index 75e4833e1a..84e905b812 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -28,8 +28,8 @@ namespace compiler {
// - iteration: amortized O(1) per step
// - Zip: O(n)
// - equality check: O(n)
-// TODO(tebbi): Cache map transitions to avoid re-allocation of the same map.
-// TODO(tebbi): Implement an O(1) equality check based on hash consing or
+// TODO(turbofan): Cache map transitions to avoid re-allocation of the same map.
+// TODO(turbofan): Implement an O(1) equality check based on hash consing or
// something similar.
template <class Key, class Value, class Hasher = base::hash<Key>>
class PersistentMap {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 42560b5451..c4e88db841 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -60,6 +60,7 @@
#include "src/compiler/machine-graph-verifier.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/memory-optimizer.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
@@ -174,6 +175,10 @@ class PipelineData {
javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
simplified_, machine_);
+ observe_node_manager_ =
+ info->node_observer()
+ ? graph_zone_->New<ObserveNodeManager>(graph_zone_)
+ : nullptr;
dependencies_ =
info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
}
@@ -346,6 +351,10 @@ class PipelineData {
}
void reset_schedule() { schedule_ = nullptr; }
+ ObserveNodeManager* observe_node_manager() const {
+ return observe_node_manager_;
+ }
+
Zone* instruction_zone() const { return instruction_zone_; }
Zone* codegen_zone() const { return codegen_zone_; }
InstructionSequence* sequence() const { return sequence_; }
@@ -567,6 +576,12 @@ class PipelineData {
runtime_call_stats_ = stats;
}
+ // Used to skip the "wasm-inlining" phase when there are no JS-to-Wasm calls.
+ bool has_js_wasm_calls() const { return has_js_wasm_calls_; }
+ void set_has_js_wasm_calls(bool has_js_wasm_calls) {
+ has_js_wasm_calls_ = has_js_wasm_calls;
+ }
+
private:
Isolate* const isolate_;
wasm::WasmEngine* const wasm_engine_ = nullptr;
@@ -600,6 +615,7 @@ class PipelineData {
JSGraph* jsgraph_ = nullptr;
MachineGraph* mcgraph_ = nullptr;
Schedule* schedule_ = nullptr;
+ ObserveNodeManager* observe_node_manager_ = nullptr;
// All objects in the following group of fields are allocated in
// instruction_zone_. They are all set to nullptr when the instruction_zone_
@@ -639,6 +655,8 @@ class PipelineData {
RuntimeCallStats* runtime_call_stats_ = nullptr;
const ProfileDataFromFile* profile_data_ = nullptr;
+
+ bool has_js_wasm_calls_ = false;
};
class PipelineImpl final {
@@ -693,6 +711,8 @@ class PipelineImpl final {
Isolate* isolate() const;
CodeGenerator* code_generator() const;
+ ObserveNodeManager* observe_node_manager() const;
+
private:
PipelineData* const data_;
};
@@ -712,7 +732,7 @@ class SourcePositionWrapper final : public Reducer {
Reduction Reduce(Node* node) final {
SourcePosition const pos = table_->GetSourcePosition(node);
SourcePositionTable::Scope position(table_, pos);
- return reducer_->Reduce(node);
+ return reducer_->Reduce(node, nullptr);
}
void Finalize() final { reducer_->Finalize(); }
@@ -734,7 +754,7 @@ class NodeOriginsWrapper final : public Reducer {
Reduction Reduce(Node* node) final {
NodeOriginTable::Scope position(table_, reducer_name(), node);
- return reducer_->Reduce(node);
+ return reducer_->Reduce(node, nullptr);
}
void Finalize() final { reducer_->Finalize(); }
@@ -1029,7 +1049,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
public:
PipelineCompilationJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info,
- Handle<JSFunction> function, BailoutId osr_offset,
+ Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame, CodeKind code_kind);
~PipelineCompilationJob() final;
PipelineCompilationJob(const PipelineCompilationJob&) = delete;
@@ -1067,7 +1087,7 @@ bool ShouldUseConcurrentInlining(CodeKind code_kind, bool is_osr) {
PipelineCompilationJob::PipelineCompilationJob(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
- Handle<JSFunction> function, BailoutId osr_offset,
+ Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame, CodeKind code_kind)
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
@@ -1183,6 +1203,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
}
+ if (FLAG_turbo_direct_heap_access) {
+ isolate->heap()->PublishPendingAllocations();
+ }
+
return SUCCEEDED;
}
@@ -1422,7 +1446,9 @@ struct GraphBuilderPhase {
closure.raw_feedback_cell(), data->info()->osr_offset(),
data->jsgraph(), frequency, data->source_positions(),
SourcePosition::kNotInlined, data->info()->code_kind(), flags,
- &data->info()->tick_counter());
+ &data->info()->tick_counter(),
+ ObserveNodeInfo{data->observe_node_manager(),
+ data->info()->node_observer()});
}
};
@@ -1432,7 +1458,8 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
OptimizedCompilationInfo* info = data->info();
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
- data->broker(), data->jsgraph()->Dead());
+ data->broker(), data->jsgraph()->Dead(),
+ data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
@@ -1443,6 +1470,9 @@ struct InliningPhase {
if (data->info()->bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
}
+ if (FLAG_turbo_inline_js_wasm_calls && data->info()->inlining()) {
+ call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
+ }
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
temp_zone, call_reducer_flags,
data->dependencies());
@@ -1463,9 +1493,9 @@ struct InliningPhase {
JSNativeContextSpecialization native_context_specialization(
&graph_reducer, data->jsgraph(), data->broker(), flags,
data->dependencies(), temp_zone, info->zone());
- JSInliningHeuristic inlining(&graph_reducer,
- temp_zone, data->info(), data->jsgraph(),
- data->broker(), data->source_positions());
+ JSInliningHeuristic inlining(
+ &graph_reducer, temp_zone, data->info(), data->jsgraph(),
+ data->broker(), data->source_positions(), JSInliningHeuristic::kJSOnly);
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
data->broker());
@@ -1483,9 +1513,37 @@ struct InliningPhase {
}
graph_reducer.ReduceGraph();
info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
+
+ // Skip the "wasm-inlining" phase if there are no Wasm functions calls.
+ if (call_reducer.has_wasm_calls()) {
+ data->set_has_js_wasm_calls(true);
+ }
}
};
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+ void Run(PipelineData* data, Zone* temp_zone) {
+ DCHECK(data->has_js_wasm_calls());
+
+ OptimizedCompilationInfo* info = data->info();
+ GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
+ data->broker(), data->jsgraph()->Dead());
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
+ data->jsgraph(), data->broker(),
+ data->source_positions(),
+ JSInliningHeuristic::kWasmOnly);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &inlining);
+ graph_reducer.ReduceGraph();
+ }
+};
struct TyperPhase {
DECL_PIPELINE_PHASE_CONSTANTS(Typer)
@@ -1530,9 +1588,9 @@ struct UntyperPhase {
NodeProperties::RemoveType(node);
}
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
graph_reducer.ReduceGraph();
@@ -1551,9 +1609,9 @@ struct CopyMetadataForConcurrentCompilePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
graph_reducer.ReduceGraph();
@@ -1597,9 +1655,9 @@ struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
@@ -1648,7 +1706,7 @@ struct EscapeAnalysisPhase {
GraphReducer reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ data->jsgraph()->Dead(), data->observe_node_manager());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
@@ -1659,7 +1717,8 @@ struct EscapeAnalysisPhase {
UnparkedScopeIfNeeded scope(data->broker());
reducer.ReduceGraph();
- // TODO(tebbi): Turn this into a debug mode check once we have confidence.
+ // TODO(turbofan): Turn this into a debug mode check once we have
+ // confidence.
escape_reducer.VerifyReplacement();
}
};
@@ -1668,9 +1727,9 @@ struct TypeAssertionsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
temp_zone);
AddReducer(data, &graph_reducer, &type_assertions);
@@ -1682,10 +1741,10 @@ struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
- data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter(), linkage);
+ SimplifiedLowering lowering(
+ data->jsgraph(), data->broker(), temp_zone, data->source_positions(),
+ data->node_origins(), data->info()->GetPoisoningMitigationLevel(),
+ &data->info()->tick_counter(), linkage, data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1701,7 +1760,10 @@ struct LoopPeelingPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
LoopTree* loop_tree = LoopFinder::BuildLoopTree(
data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
@@ -1726,9 +1788,9 @@ struct GenericLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
data->broker());
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -1744,9 +1806,9 @@ struct EarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
@@ -1789,7 +1851,10 @@ struct EffectControlLinearizationPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
// Schedule the graph without node splitting so that we can
// fix the effect and control flow for nodes with low-level side
@@ -1824,7 +1889,8 @@ struct EffectControlLinearizationPhase {
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
&data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ data->jsgraph()->Dead(),
+ data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -1844,7 +1910,10 @@ struct StoreStoreEliminationPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
temp_zone);
@@ -1855,9 +1924,9 @@ struct LoadEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LoadElimination)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone,
BranchElimination::kEARLY);
@@ -1904,7 +1973,10 @@ struct MemoryOptimizationPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- trimmer.TrimGraph(roots.begin(), roots.end());
+ {
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
@@ -1921,9 +1993,9 @@ struct LateOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -1949,9 +2021,9 @@ struct MachineOperatorOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -2021,9 +2093,9 @@ struct CsaEarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -2049,9 +2121,9 @@ struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
void Run(PipelineData* data, Zone* temp_zone) {
- GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(), data->broker(),
- data->jsgraph()->Dead());
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
@@ -2077,6 +2149,7 @@ struct EarlyGraphTrimmingPhase {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
trimmer.TrimGraph(roots.begin(), roots.end());
}
};
@@ -2091,6 +2164,7 @@ struct LateGraphTrimmingPhase {
if (data->jsgraph()) {
data->jsgraph()->GetCachedNodes(&roots);
}
+ UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
trimmer.TrimGraph(roots.begin(), roots.end());
}
};
@@ -2302,7 +2376,7 @@ struct ResolveControlFlowPhase {
};
struct MidTierRegisterOutputDefinitionPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
+ DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterOutputDefinition)
void Run(PipelineData* data, Zone* temp_zone) {
DefineOutputs(data->mid_tier_register_allocator_data());
@@ -2584,6 +2658,12 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
+ if (data->has_js_wasm_calls()) {
+ DCHECK(FLAG_turbo_inline_js_wasm_calls);
+ Run<WasmInliningPhase>();
+ RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
+
// From now on it is invalid to look at types on the nodes, because the types
// on the nodes might not make sense after representation selection due to the
// way we handle truncations; if we'd want to look at types afterwards we'd
@@ -3116,7 +3196,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
// static
std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
- bool has_script, BailoutId osr_offset, JavaScriptFrame* osr_frame) {
+ bool has_script, BytecodeOffset osr_offset, JavaScriptFrame* osr_frame) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
return std::make_unique<PipelineCompilationJob>(
@@ -3155,6 +3235,11 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
+ if (FLAG_wasm_loop_unrolling) {
+ pipeline.Run<LoopExitEliminationPhase>();
+ pipeline.RunPrintAndVerify("V8.LoopExitEliminationPhase", true);
+ }
+
data.BeginPhaseKind("V8.WasmOptimization");
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
@@ -3163,9 +3248,9 @@ void Pipeline::GenerateCodeForWasmFunction(
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization",
RuntimeCallCounterId::kOptimizeWasmFullOptimization);
- GraphReducer graph_reducer(scope.zone(), data.graph(),
- &data.info()->tick_counter(), data.broker(),
- data.mcgraph()->Dead());
+ GraphReducer graph_reducer(
+ scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
+ data.mcgraph()->Dead(), data.observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
@@ -3183,9 +3268,9 @@ void Pipeline::GenerateCodeForWasmFunction(
} else {
PipelineRunScope scope(&data, "V8.OptimizeWasmBaseOptimization",
RuntimeCallCounterId::kOptimizeWasmBaseOptimization);
- GraphReducer graph_reducer(scope.zone(), data.graph(),
- &data.info()->tick_counter(), data.broker(),
- data.mcgraph()->Dead());
+ GraphReducer graph_reducer(
+ scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
+ data.mcgraph()->Dead(), data.observe_node_manager());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
@@ -3754,6 +3839,10 @@ CodeGenerator* PipelineImpl::code_generator() const {
return data_->code_generator();
}
+ObserveNodeManager* PipelineImpl::observe_node_manager() const {
+ return data_->observe_node_manager();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 509f5febc6..db3aab4623 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -48,7 +48,7 @@ class Pipeline : public AllStatic {
static V8_EXPORT_PRIVATE std::unique_ptr<OptimizedCompilationJob>
NewCompilationJob(Isolate* isolate, Handle<JSFunction> function,
CodeKind code_kind, bool has_script,
- BailoutId osr_offset = BailoutId::None(),
+ BytecodeOffset osr_offset = BytecodeOffset::None(),
JavaScriptFrame* osr_frame = nullptr);
// Run the pipeline for the WebAssembly compilation info.
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index da3785f35e..4cecd338c5 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -246,7 +246,7 @@ class LiteralFeedback
};
class RegExpLiteralFeedback
- : public SingleValueFeedback<JSRegExpRef,
+ : public SingleValueFeedback<RegExpBoilerplateDescriptionRef,
ProcessedFeedback::kRegExpLiteral> {
using SingleValueFeedback::SingleValueFeedback;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 5214f7ad9b..a7eddbe826 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -199,54 +199,49 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
}
if (field_access.machine_type.representation() ==
MachineRepresentation::kFloat64) {
- bool const is_heapnumber = !is_inobject || !FLAG_unbox_double_fields;
- if (is_heapnumber) {
- if (dependencies() == nullptr) {
- FieldAccess const storage_access = {kTaggedBase,
- field_access.offset,
- name.object(),
- MaybeHandle<Map>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier,
- LoadSensitivity::kCritical,
- field_access.const_field_info};
- storage = *effect =
- graph()->NewNode(simplified()->LoadField(storage_access), storage,
- *effect, *control);
- // We expect the loaded value to be a heap number here. With
- // in-place field representation changes it is possible this is a
- // no longer a heap number without map transitions. If we haven't taken
- // a dependency on field representation, we should verify the loaded
- // value is a heap number.
- storage = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
- storage, *effect, *control);
- Node* map = *effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- storage, *effect, *control);
- Node* is_heap_number =
- graph()->NewNode(simplified()->ReferenceEqual(), map,
- jsgraph()->HeapNumberMapConstant());
- *effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNotAHeapNumber),
- is_heap_number, *effect, *control);
- } else {
- FieldAccess const storage_access = {kTaggedBase,
- field_access.offset,
- name.object(),
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier,
- LoadSensitivity::kCritical,
- field_access.const_field_info};
- storage = *effect =
- graph()->NewNode(simplified()->LoadField(storage_access), storage,
- *effect, *control);
- }
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
+ if (dependencies() == nullptr) {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_access.offset,
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ field_access.const_field_info};
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(storage_access), storage, *effect, *control);
+ // We expect the loaded value to be a heap number here. With
+ // in-place field representation changes it is possible this is a
+ // no longer a heap number without map transitions. If we haven't taken
+ // a dependency on field representation, we should verify the loaded
+ // value is a heap number.
+ storage = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ storage, *effect, *control);
+ Node* map = *effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ storage, *effect, *control);
+ Node* is_heap_number =
+ graph()->NewNode(simplified()->ReferenceEqual(), map,
+ jsgraph()->HeapNumberMapConstant());
+ *effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNotAHeapNumber),
+ is_heap_number, *effect, *control);
+ } else {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_access.offset,
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ field_access.const_field_info};
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(storage_access), storage, *effect, *control);
}
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
}
Node* value = *effect = graph()->NewNode(
simplified()->LoadField(field_access), storage, *effect, *control);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 1bff7c82a7..df12030c31 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -384,6 +384,7 @@ Node* RawMachineAssembler::CreateNodeFromPredecessors(
return sidetable[predecessors.front()->id().ToSize()];
}
std::vector<Node*> inputs;
+ inputs.reserve(predecessors.size());
for (BasicBlock* predecessor : predecessors) {
inputs.push_back(sidetable[predecessor->id().ToSize()]);
}
@@ -410,6 +411,7 @@ void RawMachineAssembler::MakePhiBinary(Node* phi, int split_point,
left_input = NodeProperties::GetValueInput(phi, 0);
} else {
std::vector<Node*> inputs;
+ inputs.reserve(left_input_count);
for (int i = 0; i < left_input_count; ++i) {
inputs.push_back(NodeProperties::GetValueInput(phi, i));
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index eff7a845c6..907464f57e 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -1053,7 +1053,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// A post-processing pass to add effect and control edges so that the graph
// can be optimized and re-scheduled.
- // TODO(tebbi): Move this to a separate class.
+ // TODO(turbofan): Move this to a separate class.
void MakeReschedulable();
Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
const std::vector<Node*>& sidetable,
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 2455ea3115..64b274cdcc 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -1105,7 +1105,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
// BigInts are only represented as tagged pointer and word64.
if (!CanBeTaggedPointer(output_rep) &&
output_rep != MachineRepresentation::kWord64) {
- DCHECK(!output_type.Is(Type::BigInt()));
+ DCHECK(!output_type.Equals(Type::BigInt()));
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
return jsgraph()->graph()->NewNode(
diff --git a/deps/v8/src/compiler/scheduled-machine-lowering.cc b/deps/v8/src/compiler/scheduled-machine-lowering.cc
index 903052be1d..fde836e4e8 100644
--- a/deps/v8/src/compiler/scheduled-machine-lowering.cc
+++ b/deps/v8/src/compiler/scheduled-machine-lowering.cc
@@ -38,7 +38,7 @@ void ScheduledMachineLowering::Run() {
Node* node = *instr;
Reduction reduction;
for (auto reducer : reducers_) {
- reduction = reducer->Reduce(node);
+ reduction = reducer->Reduce(node, nullptr);
if (reduction.Changed()) break;
}
if (reduction.Changed()) {
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 2733aaaa7d..5be9a7d705 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -382,7 +382,7 @@ class SerializerForBackgroundCompilation {
SerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
Hints Run(); // NOTE: Returns empty for an
// already-serialized function.
@@ -405,6 +405,8 @@ class SerializerForBackgroundCompilation {
SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
+ void VisitShortStar(interpreter::Register reg);
+
Hints& register_hints(interpreter::Register reg);
// Return a vector containing the hints for the given register range (in
@@ -555,7 +557,7 @@ class SerializerForBackgroundCompilation {
Zone* zone() { return zone_scope_.zone(); }
Environment* environment() const { return environment_; }
SerializerForBackgroundCompilationFlags flags() const { return flags_; }
- BailoutId osr_offset() const { return osr_offset_; }
+ BytecodeOffset osr_offset() const { return osr_offset_; }
const BytecodeAnalysis& bytecode_analysis() { return *bytecode_analysis_; }
JSHeapBroker* const broker_;
@@ -565,7 +567,7 @@ class SerializerForBackgroundCompilation {
// Instead of storing the virtual_closure here, we could extract it from the
// {closure_hints_} but that would be cumbersome.
VirtualClosure const function_;
- BailoutId const osr_offset_;
+ BytecodeOffset const osr_offset_;
base::Optional<BytecodeAnalysis> bytecode_analysis_;
ZoneUnorderedMap<int, Environment*> jump_target_environments_;
Environment* const environment_;
@@ -579,7 +581,7 @@ class SerializerForBackgroundCompilation {
void RunSerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset) {
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset) {
SerializerForBackgroundCompilation serializer(
zone_stats, broker, dependencies, closure, flags, osr_offset);
serializer.Run();
@@ -1056,7 +1058,7 @@ std::ostream& operator<<(
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset)
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset)
: broker_(broker),
dependencies_(dependencies),
zone_scope_(zone_stats, ZONE_NAME),
@@ -1070,6 +1072,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
arguments_(zone()) {
closure_hints_.AddConstant(closure, zone(), broker_);
JSFunctionRef(broker, closure).Serialize();
+ JSFunctionRef(broker, closure).SerializeCodeAndFeedback();
TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_);
TRACE_BROKER(broker_, "Initial environment:\n" << *environment_);
@@ -1086,7 +1089,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
zone_scope_(zone_stats, ZONE_NAME),
flags_(flags),
function_(function.virtual_closure()),
- osr_offset_(BailoutId::None()),
+ osr_offset_(BytecodeOffset::None()),
jump_target_environments_(zone()),
environment_(zone()->New<Environment>(zone(), broker_->isolate(),
function, new_target, arguments,
@@ -1097,6 +1100,7 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
if (function.closure().ToHandle(&closure)) {
closure_hints_.AddConstant(closure, zone(), broker);
JSFunctionRef(broker, closure).Serialize();
+ JSFunctionRef(broker, closure).SerializeCodeAndFeedback();
} else {
closure_hints_.AddVirtualClosure(function.virtual_closure(), zone(),
broker);
@@ -1263,7 +1267,6 @@ Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
void SerializerForBackgroundCompilation::TraverseBytecode() {
bytecode_analysis_.emplace(bytecode_array(), zone(), osr_offset(), false);
- BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation();
BytecodeArrayIterator iterator(bytecode_array());
HandlerRangeMatcher try_start_matcher(iterator, bytecode_array());
@@ -1309,13 +1312,20 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
}
}
- switch (iterator.current_bytecode()) {
+ interpreter::Bytecode current_bytecode = iterator.current_bytecode();
+ switch (current_bytecode) {
#define DEFINE_BYTECODE_CASE(name) \
case interpreter::Bytecode::k##name: \
Visit##name(&iterator); \
break;
SUPPORTED_BYTECODE_LIST(DEFINE_BYTECODE_CASE)
#undef DEFINE_BYTECODE_CASE
+
+#define DEFINE_SHORT_STAR_CASE(Name, ...) case interpreter::Bytecode::k##Name:
+ SHORT_STAR_BYTECODE_LIST(DEFINE_SHORT_STAR_CASE)
+#undef DEFINE_SHORT_STAR_CASE
+ VisitShortStar(interpreter::Register::FromShortStar(current_bytecode));
+ break;
}
}
@@ -1521,10 +1531,14 @@ void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
void SerializerForBackgroundCompilation::VisitLdaConstant(
BytecodeArrayIterator* iterator) {
- ObjectRef object(
- broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- environment()->accumulator_hints() =
- Hints::SingleConstant(object.object(), zone());
+ Handle<Object> constant =
+ iterator->GetConstantForIndexOperand(0, broker()->isolate());
+ // TODO(v8:7790): FixedArrays still need to be serialized until they are
+ // moved to kNeverSerialized.
+ if (!FLAG_turbo_direct_heap_access || constant->IsFixedArray()) {
+ ObjectRef(broker(), constant);
+ }
+ environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
}
void SerializerForBackgroundCompilation::VisitPushContext(
@@ -1692,6 +1706,11 @@ void SerializerForBackgroundCompilation::VisitStar(
register_hints(reg).Reset(&environment()->accumulator_hints(), zone());
}
+void SerializerForBackgroundCompilation::VisitShortStar(
+ interpreter::Register reg) {
+ register_hints(reg).Reset(&environment()->accumulator_hints(), zone());
+}
+
void SerializerForBackgroundCompilation::VisitMov(
BytecodeArrayIterator* iterator) {
interpreter::Register src = iterator->GetRegisterOperand(0);
@@ -2136,10 +2155,8 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
callee.AddConstant(target->object(), zone(), broker());
} else {
// Call; target is feedback cell or callee.
- if (target->IsFeedbackCell() &&
- target->AsFeedbackCell().value().IsFeedbackVector()) {
- FeedbackVectorRef vector =
- target->AsFeedbackCell().value().AsFeedbackVector();
+ if (target->IsFeedbackCell() && target->AsFeedbackCell().value()) {
+ FeedbackVectorRef vector = *target->AsFeedbackCell().value();
vector.Serialize();
VirtualClosure virtual_closure(
vector.shared_function_info().object(), vector.object(),
@@ -2255,7 +2272,7 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
FunctionTemplateInfoRef target_template_info(
broker(),
- handle(target->function_data(kAcquireLoad), broker()->isolate()));
+ broker()->CanonicalPersistentHandle(target->function_data(kAcquireLoad)));
if (!target_template_info.has_call_code()) return;
target_template_info.SerializeCallCode();
@@ -2987,7 +3004,9 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
name, SerializationPolicy::kSerializeIfNeeded);
if (access_mode == AccessMode::kLoad && cell.has_value()) {
- result_hints->AddConstant(cell->value().object(), zone(), broker());
+ result_hints->AddConstant(
+ handle(cell->object()->value(), broker()->isolate()), zone(),
+ broker());
}
}
@@ -3017,7 +3036,8 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
Handle<SharedFunctionInfo> sfi = function.shared().object();
if (sfi->IsApiFunction()) {
FunctionTemplateInfoRef fti_ref(
- broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ broker(),
+ broker()->CanonicalPersistentHandle(sfi->get_api_func_data()));
if (fti_ref.has_call_code()) {
fti_ref.SerializeCallCode();
ProcessReceiverMapForApiCall(fti_ref, receiver_map->object());
@@ -3030,7 +3050,8 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
// For JSCallReducer::ReduceJSCall.
function.Serialize();
} else {
- FunctionTemplateInfoRef fti(broker(), access_info.constant());
+ FunctionTemplateInfoRef fti(broker(), broker()->CanonicalPersistentHandle(
+ access_info.constant()));
if (fti.has_call_code()) fti.SerializeCallCode();
}
} else if (access_info.IsModuleExport()) {
@@ -3301,14 +3322,22 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
ObjectRef key_ref(broker(), hint);
// TODO(neis): Do this for integer-HeapNumbers too?
if (key_ref.IsSmi() && key_ref.AsSmi() >= 0) {
- base::Optional<ObjectRef> element =
- receiver_ref.GetOwnConstantElement(
- key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
- if (!element.has_value() && receiver_ref.IsJSArray()) {
- // We didn't find a constant element, but if the receiver is a
- // cow-array we can exploit the fact that any future write to the
- // element will replace the whole elements storage.
- receiver_ref.AsJSArray().GetOwnCowElement(
+ base::Optional<ObjectRef> element;
+ if (receiver_ref.IsJSObject()) {
+ element = receiver_ref.AsJSObject().GetOwnConstantElement(
+ key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
+ if (!element.has_value() && receiver_ref.IsJSArray()) {
+ // We didn't find a constant element, but if the receiver is a
+ // cow-array we can exploit the fact that any future write to the
+ // element will replace the whole elements storage.
+ JSArrayRef array_ref = receiver_ref.AsJSArray();
+ array_ref.SerializeElements();
+ array_ref.GetOwnCowElement(
+ array_ref.elements().value(), key_ref.AsSmi(),
+ SerializationPolicy::kSerializeIfNeeded);
+ }
+ } else if (receiver_ref.IsString()) {
+ element = receiver_ref.AsString().GetCharAsStringOrUndefined(
key_ref.AsSmi(), SerializationPolicy::kSerializeIfNeeded);
}
}
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index f2330332d8..f01e73452e 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-class BailoutId;
+class BytecodeOffset;
class Zone;
namespace compiler {
@@ -31,7 +31,7 @@ using SerializerForBackgroundCompilationFlags =
void RunSerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags, BailoutId osr_offset);
+ SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index f9bf22f8be..445898d882 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -114,6 +114,8 @@ void SimdScalarLowering::LowerGraph() {
V(I64x2Splat) \
V(I64x2ExtractLane) \
V(I64x2ReplaceLane) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
V(I64x2Neg) \
V(I64x2Shl) \
V(I64x2ShrS) \
@@ -166,11 +168,10 @@ void SimdScalarLowering::LowerGraph() {
V(S128Not) \
V(S128AndNot) \
V(S128Select) \
- V(V32x4AnyTrue) \
+ V(V64x2AllTrue) \
V(V32x4AllTrue) \
- V(V16x8AnyTrue) \
V(V16x8AllTrue) \
- V(V8x16AnyTrue) \
+ V(V128AnyTrue) \
V(V8x16AllTrue) \
V(I32x4BitMask) \
V(I32x4ExtMulLowI16x8S) \
@@ -1188,7 +1189,7 @@ Node* SimdScalarLowering::ConstructPhiForComparison(Diamond d,
int false_value) {
// Close the given Diamond d using a Phi node, taking care of constructing the
// right kind of constants (Int32 or Int64) based on rep_type.
- if (rep_type == SimdType::kFloat64x2) {
+ if (rep_type == SimdType::kFloat64x2 || rep_type == SimdType::kInt64x2) {
MachineRepresentation rep = MachineRepresentation::kWord64;
return d.Phi(rep, mcgraph_->Int64Constant(true_value),
mcgraph_->Int64Constant(false_value));
@@ -1261,15 +1262,33 @@ void SimdScalarLowering::LowerAllTrueOp(Node* node, SimdType rep_type) {
int num_lanes = NumLanes(rep_type);
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node* zero;
+ Node* tmp_result;
+ MachineRepresentation result_rep = MachineRepresentation::kWord32;
+ const Operator* equals;
+
+ if (SimdType::kInt64x2 == rep_type) {
+ zero = mcgraph_->Int64Constant(0);
+ tmp_result = mcgraph_->Int64Constant(1);
+ result_rep = MachineRepresentation::kWord64;
+ equals = machine()->Word64Equal();
+ } else {
+ zero = mcgraph_->Int32Constant(0);
+ tmp_result = mcgraph_->Int32Constant(1);
+ equals = machine()->Word32Equal();
+ }
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* zero = mcgraph_->Int32Constant(0);
- Node* tmp_result = mcgraph_->Int32Constant(1);
for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rep[i], zero));
- tmp_result = d.Phi(MachineRepresentation::kWord32, zero, tmp_result);
+ Diamond d(graph(), common(), graph()->NewNode(equals, rep[i], zero));
+ tmp_result = d.Phi(result_rep, zero, tmp_result);
+ }
+
+ if (SimdType::kInt64x2 == rep_type) {
+ tmp_result =
+ graph()->NewNode(machine()->TruncateInt64ToInt32(), tmp_result);
}
+
rep_node[0] = tmp_result;
ReplaceNode(node, rep_node, 1);
}
@@ -2102,6 +2121,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
COMPARISON_CASE(Float32x4, kF32x4Gt, Float32LessThan, true)
COMPARISON_CASE(Float32x4, kF32x4Ge, Float32LessThanOrEqual, true)
+ COMPARISON_CASE(Int64x2, kI64x2Eq, Word64Equal, false)
COMPARISON_CASE(Int32x4, kI32x4Eq, Word32Equal, false)
COMPARISON_CASE(Int32x4, kI32x4LtS, Int32LessThan, false)
COMPARISON_CASE(Int32x4, kI32x4LeS, Int32LessThanOrEqual, false)
@@ -2138,6 +2158,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
break;
}
+ case IrOpcode::kI64x2Ne: {
+ LowerNotEqual(node, SimdType::kInt64x2, machine()->Word64Equal());
+ break;
+ }
case IrOpcode::kI32x4Ne: {
LowerNotEqual(node, SimdType::kInt32x4, machine()->Word32Equal());
break;
@@ -2220,9 +2244,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 16);
break;
}
- case IrOpcode::kV32x4AnyTrue:
- case IrOpcode::kV16x8AnyTrue:
- case IrOpcode::kV8x16AnyTrue: {
+ case IrOpcode::kV128AnyTrue: {
DCHECK_EQ(1, node->InputCount());
// AnyTrue always returns a I32x4, and can work with inputs of any shape,
// but we still need GetReplacementsWithType if input is float.
@@ -2242,6 +2264,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 1);
break;
}
+ case IrOpcode::kV64x2AllTrue: {
+ LowerAllTrueOp(node, SimdType::kInt64x2);
+ break;
+ }
case IrOpcode::kV32x4AllTrue: {
LowerAllTrueOp(node, SimdType::kInt32x4);
break;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index a61ff7bf70..49df06a0ec 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -18,8 +18,8 @@
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-observer.h"
#include "src/compiler/node-origin-table.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/operation-typer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
@@ -28,6 +28,7 @@
#include "src/numbers/conversions-inl.h"
#include "src/objects/objects.h"
#include "src/utils/address-map.h"
+#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
@@ -39,6 +40,8 @@ namespace compiler {
if (FLAG_trace_representation) PrintF(__VA_ARGS__); \
} while (false)
+const char* kSimplifiedLoweringReducerName = "SimplifiedLowering";
+
// Representation selection and lowering of {Simplified} operators to machine
// operators are interwined. We use a fixpoint calculation to compute both the
// output representation and the best possible lowering for {Simplified} nodes.
@@ -241,6 +244,16 @@ class InputUseInfos {
#endif // DEBUG
class RepresentationSelector {
+ // The purpose of this nested class is to hide method
+ // v8::internal::compiler::NodeProperties::ChangeOp which should not be
+ // directly used by code in RepresentationSelector and SimplifiedLowering.
+ // RepresentationSelector code should call RepresentationSelector::ChangeOp in
+ // place of NodeProperties::ChangeOp, in order to notify the changes to a
+ // registered ObserveNodeManager and support the %ObserveNode intrinsic.
+ class NodeProperties : public compiler::NodeProperties {
+ static void ChangeOp(Node* node, const Operator* new_op) { UNREACHABLE(); }
+ };
+
public:
// Information for each node tracked during the fixpoint.
class NodeInfo final {
@@ -290,7 +303,8 @@ class RepresentationSelector {
RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- TickCounter* tick_counter, Linkage* linkage)
+ TickCounter* tick_counter, Linkage* linkage,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
zone_(zone),
might_need_revisit_(zone),
@@ -308,7 +322,8 @@ class RepresentationSelector {
type_cache_(TypeCache::Get()),
op_typer_(broker, graph_zone()),
tick_counter_(tick_counter),
- linkage_(linkage) {
+ linkage_(linkage),
+ observe_node_manager_(observe_node_manager) {
}
void ResetNodeInfoState() {
@@ -763,7 +778,7 @@ class RepresentationSelector {
DCHECK(TypeOf(node).IsNone());
// If the node is unreachable, insert an Unreachable node and mark the
// value dead.
- // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
+ // TODO(jarin,turbofan) Find a way to unify/merge this insertion with
// InsertUnreachableIfNecessary.
Node* unreachable = effect =
graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control);
@@ -772,7 +787,7 @@ class RepresentationSelector {
node->ReplaceInput(0, unreachable);
node->TrimInputCount(dead_value->ValueInputCount());
ReplaceEffectControlUses(node, effect, control);
- NodeProperties::ChangeOp(node, dead_value);
+ ChangeOp(node, dead_value);
}
void ChangeToPureOp(Node* node, const Operator* new_op) {
@@ -792,7 +807,7 @@ class RepresentationSelector {
} else {
DCHECK_EQ(0, node->op()->ControlInputCount());
}
- NodeProperties::ChangeOp(node, new_op);
+ ChangeOp(node, new_op);
}
void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op,
@@ -816,7 +831,7 @@ class RepresentationSelector {
DCHECK_EQ(0, node->op()->ControlInputCount());
}
node->InsertInput(jsgraph_->zone(), new_input_index, new_input);
- NodeProperties::ChangeOp(node, new_op);
+ ChangeOp(node, new_op);
}
// Converts input {index} of {node} according to given UseInfo {use},
@@ -1038,8 +1053,7 @@ class RepresentationSelector {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
if (output != p.representation()) {
- NodeProperties::ChangeOp(node,
- lowering->common()->Select(output, p.hint()));
+ ChangeOp(node, lowering->common()->Select(output, p.hint()));
}
}
// Convert inputs to the output representation of this phi, pass the
@@ -1063,7 +1077,7 @@ class RepresentationSelector {
if (lower<T>()) {
// Update the phi operator.
if (output != PhiRepresentationOf(node->op())) {
- NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values));
+ ChangeOp(node, lowering->common()->Phi(output, values));
}
}
@@ -1216,41 +1230,47 @@ class RepresentationSelector {
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
SparseInputMask mask = SparseInputMaskOf(node->op());
- NodeProperties::ChangeOp(
- node, jsgraph_->common()->TypedStateValues(types, mask));
+ ChangeOp(node, jsgraph_->common()->TypedStateValues(types, mask));
}
SetOutput<T>(node, MachineRepresentation::kTagged);
}
template <Phase T>
- void VisitFrameState(Node* node) {
+ void VisitFrameState(FrameState node) {
DCHECK_EQ(5, node->op()->ValueInputCount());
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(FrameState::kFrameStateInputCount, node->InputCount());
- ProcessInput<T>(node, 0, UseInfo::AnyTagged()); // Parameters.
- ProcessInput<T>(node, 1, UseInfo::AnyTagged()); // Registers.
+ ProcessInput<T>(node, FrameState::kFrameStateParametersInput,
+ UseInfo::AnyTagged());
+ ProcessInput<T>(node, FrameState::kFrameStateLocalsInput,
+ UseInfo::AnyTagged());
// Accumulator is a special flower - we need to remember its type in
// a singleton typed-state-values node (as if it was a singleton
// state-values node).
- Node* accumulator = node->InputAt(2);
+ Node* accumulator = node.stack();
if (propagate<T>()) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(accumulator).Is(Type::BigInt())) {
- EnqueueInput<T>(node, 2, UseInfo::AnyTagged());
+ EnqueueInput<T>(node, FrameState::kFrameStateStackInput,
+ UseInfo::AnyTagged());
} else {
- EnqueueInput<T>(node, 2, UseInfo::Any());
+ EnqueueInput<T>(node, FrameState::kFrameStateStackInput,
+ UseInfo::Any());
}
} else if (lower<T>()) {
// TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
// truncated BigInts.
if (TypeOf(accumulator).Is(Type::BigInt())) {
- ConvertInput(node, 2, UseInfo::AnyTagged());
+ ConvertInput(node, FrameState::kFrameStateStackInput,
+ UseInfo::AnyTagged());
}
Zone* zone = jsgraph_->zone();
if (accumulator == jsgraph_->OptimizedOutConstant()) {
- node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues());
+ node->ReplaceInput(FrameState::kFrameStateStackInput,
+ jsgraph_->SingleDeadTypedStateValues());
} else {
ZoneVector<MachineType>* types =
zone->New<ZoneVector<MachineType>>(1, zone);
@@ -1258,15 +1278,19 @@ class RepresentationSelector {
TypeOf(accumulator));
node->ReplaceInput(
- 2, jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
- types, SparseInputMask::Dense()),
- node->InputAt(2)));
+ FrameState::kFrameStateStackInput,
+ jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
+ types, SparseInputMask::Dense()),
+ node.stack()));
}
}
- ProcessInput<T>(node, 3, UseInfo::AnyTagged()); // Context.
- ProcessInput<T>(node, 4, UseInfo::AnyTagged()); // Closure.
- ProcessInput<T>(node, 5, UseInfo::AnyTagged()); // Outer frame state.
+ ProcessInput<T>(node, FrameState::kFrameStateContextInput,
+ UseInfo::AnyTagged());
+ ProcessInput<T>(node, FrameState::kFrameStateFunctionInput,
+ UseInfo::AnyTagged());
+ ProcessInput<T>(node, FrameState::kFrameStateOuterStateInput,
+ UseInfo::AnyTagged());
return SetOutput<T>(node, MachineRepresentation::kTagged);
}
@@ -1296,8 +1320,8 @@ class RepresentationSelector {
ConvertInput(node, i, UseInfo::AnyTagged());
}
}
- NodeProperties::ChangeOp(node, jsgraph_->common()->TypedObjectState(
- ObjectIdOf(node->op()), types));
+ ChangeOp(node, jsgraph_->common()->TypedObjectState(
+ ObjectIdOf(node->op()), types));
}
SetOutput<T>(node, MachineRepresentation::kTagged);
}
@@ -1406,15 +1430,15 @@ class RepresentationSelector {
IsSomePositiveOrderedNumber(input1_type)
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
- NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
+ ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
}
void ChangeToInt32OverflowOp(Node* node) {
- NodeProperties::ChangeOp(node, Int32OverflowOp(node));
+ ChangeOp(node, Int32OverflowOp(node));
}
void ChangeToUint32OverflowOp(Node* node) {
- NodeProperties::ChangeOp(node, Uint32OverflowOp(node));
+ ChangeOp(node, Uint32OverflowOp(node));
}
template <Phase T>
@@ -1674,19 +1698,19 @@ class RepresentationSelector {
// TODO(neis): Move this into TypedOptimization?
new_flags |= CheckBoundsFlag::kAbortOnOutOfBounds;
}
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint32Bounds(feedback, new_flags));
}
} else if (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) {
VisitBinop<T>(node, UseInfo::CheckedTaggedAsArrayIndex(feedback),
UseInfo::Word(), MachineType::PointerRepresentation());
if (lower<T>()) {
if (jsgraph_->machine()->Is64()) {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint64Bounds(feedback, new_flags));
} else {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint32Bounds(feedback, new_flags));
}
}
} else {
@@ -1694,8 +1718,8 @@ class RepresentationSelector {
node, UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, feedback),
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower<T>()) {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32Bounds(feedback, new_flags));
+ ChangeOp(node,
+ simplified()->CheckedUint32Bounds(feedback, new_flags));
}
}
} else {
@@ -1708,35 +1732,11 @@ class RepresentationSelector {
UseInfo::CheckedSigned64AsWord64(zero_handling, feedback),
UseInfo::Word64(), MachineRepresentation::kWord64);
if (lower<T>()) {
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint64Bounds(feedback, new_flags));
+ ChangeOp(node, simplified()->CheckedUint64Bounds(feedback, new_flags));
}
}
}
- static MachineType MachineTypeFor(CTypeInfo::Type type) {
- switch (type) {
- case CTypeInfo::Type::kVoid:
- return MachineType::AnyTagged();
- case CTypeInfo::Type::kBool:
- return MachineType::Bool();
- case CTypeInfo::Type::kInt32:
- return MachineType::Int32();
- case CTypeInfo::Type::kUint32:
- return MachineType::Uint32();
- case CTypeInfo::Type::kInt64:
- return MachineType::Int64();
- case CTypeInfo::Type::kUint64:
- return MachineType::Uint64();
- case CTypeInfo::Type::kFloat32:
- return MachineType::Float32();
- case CTypeInfo::Type::kFloat64:
- return MachineType::Float64();
- case CTypeInfo::Type::kV8Value:
- return MachineType::AnyTagged();
- }
- }
-
UseInfo UseInfoForFastApiCallArgument(CTypeInfo::Type type,
FeedbackSource const& feedback) {
switch (type) {
@@ -1801,10 +1801,106 @@ class RepresentationSelector {
ProcessInput<T>(node, i, UseInfo::AnyTagged());
}
ProcessRemainingInputs<T>(node, value_input_count);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
+ }
+
+ static MachineType MachineTypeForWasmReturnType(wasm::ValueType type) {
+ switch (type.kind()) {
+ case wasm::kI32:
+ return MachineType::Int32();
+ case wasm::kF32:
+ return MachineType::Float32();
+ case wasm::kF64:
+ return MachineType::Float64();
+ case wasm::kI64:
+ // Not used for i64, see VisitJSWasmCall().
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ UseInfo UseInfoForJSWasmCallArgument(Node* input, wasm::ValueType type,
+ FeedbackSource const& feedback) {
+ // If the input type is a Number or Oddball, we can directly convert the
+ // input into the Wasm native type of the argument. If not, we return
+ // UseInfo::AnyTagged to signal that WasmWrapperGraphBuilder will need to
+ // add Nodes to perform the conversion (in WasmWrapperGraphBuilder::FromJS).
+ switch (type.kind()) {
+ case wasm::kI32:
+ return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
+ case wasm::kI64:
+ return UseInfo::AnyTagged();
+ case wasm::kF32:
+ case wasm::kF64:
+ // For Float32, TruncateFloat64ToFloat32 will be inserted later in
+ // WasmWrapperGraphBuilder::BuildJSToWasmWrapper.
+ return UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros,
+ feedback);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <Phase T>
+ void VisitJSWasmCall(Node* node, SimplifiedLowering* lowering) {
+ DCHECK_EQ(JSWasmCallNode::TargetIndex(), 0);
+ DCHECK_EQ(JSWasmCallNode::ReceiverIndex(), 1);
+ DCHECK_EQ(JSWasmCallNode::FirstArgumentIndex(), 2);
+
+ JSWasmCallNode n(node);
+
+ JSWasmCallParameters const& params = n.Parameters();
+ const wasm::FunctionSig* wasm_signature = params.signature();
+ int wasm_arg_count = static_cast<int>(wasm_signature->parameter_count());
+ DCHECK_EQ(wasm_arg_count, n.ArgumentCount());
+
+ base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info(
+ wasm_arg_count);
+
+ // Visit JSFunction and Receiver nodes.
+ ProcessInput<T>(node, JSWasmCallNode::TargetIndex(), UseInfo::Any());
+ ProcessInput<T>(node, JSWasmCallNode::ReceiverIndex(), UseInfo::Any());
+
+ // Propagate representation information from TypeInfo.
+ for (int i = 0; i < wasm_arg_count; i++) {
+ TNode<Object> input = n.Argument(i);
+ DCHECK_NOT_NULL(input);
+ arg_use_info[i] = UseInfoForJSWasmCallArgument(
+ input, wasm_signature->GetParam(i), params.feedback());
+ ProcessInput<T>(node, JSWasmCallNode::ArgumentIndex(i), arg_use_info[i]);
+ }
+
+ // Visit value, context and frame state inputs as tagged.
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
+ DCHECK(first_effect_index >
+ JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count);
+ for (int i = JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count;
+ i < first_effect_index; i++) {
+ ProcessInput<T>(node, i, UseInfo::AnyTagged());
+ }
+
+ // Effect and Control.
+ ProcessRemainingInputs<T>(node, NodeProperties::FirstEffectIndex(node));
+
+ if (wasm_signature->return_count() == 1) {
+ if (wasm_signature->GetReturn().kind() == wasm::kI64) {
+ // Conversion between negative int64 and BigInt not supported yet.
+ // Do not bypass the type conversion when the result type is i64.
+ SetOutput<T>(node, MachineRepresentation::kTagged);
+ } else {
+ MachineType return_type =
+ MachineTypeForWasmReturnType(wasm_signature->GetReturn());
+ SetOutput<T>(
+ node, return_type.representation(),
+ JSWasmCallNode::TypeForWasmReturnType(wasm_signature->GetReturn()));
+ }
+ } else {
+ DCHECK_EQ(wasm_signature->return_count(), 0);
+ SetOutput<T>(node, MachineRepresentation::kTagged);
+ }
- MachineType return_type =
- MachineTypeFor(c_signature->ReturnInfo().GetType());
- SetOutput<T>(node, return_type.representation());
+ // The actual lowering of JSWasmCall nodes happens later, in the subsequent
+ // "wasm-inlining" phase.
}
// Dispatching routine for visiting the node {node} with the usage {use}.
@@ -1936,11 +2032,11 @@ class RepresentationSelector {
if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
- NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ ChangeOp(node, lowering->machine()->Word32Equal());
} else if (CanBeTaggedPointer(input_info->representation())) {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ ChangeOp(node, lowering->machine()->WordEqual());
} else {
DCHECK(TypeOf(node->InputAt(0)).IsNone());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
@@ -1968,7 +2064,7 @@ class RepresentationSelector {
// => unsigned Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
return;
}
if ((lhs_type.Is(Type::Signed32OrMinusZero()) &&
@@ -1979,13 +2075,13 @@ class RepresentationSelector {
// => signed Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower<T>()) ChangeOp(node, Int32Op(node));
return;
}
// => Float64Cmp
VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberLessThan:
@@ -2000,18 +2096,18 @@ class RepresentationSelector {
// => unsigned Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
} else if (lhs_type.Is(Type::Signed32OrMinusZero()) &&
rhs_type.Is(Type::Signed32OrMinusZero())) {
// => signed Int32Cmp
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower<T>()) ChangeOp(node, Int32Op(node));
} else {
// => Float64Cmp
VisitBinop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kBit);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
}
return;
}
@@ -2337,7 +2433,7 @@ class RepresentationSelector {
case IrOpcode::kNumberBitwiseXor:
case IrOpcode::kNumberBitwiseAnd: {
VisitWord32TruncatingBinop<T>(node);
- if (lower<T>()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower<T>()) ChangeOp(node, Int32Op(node));
return;
}
case IrOpcode::kSpeculativeNumberBitwiseOr:
@@ -2439,8 +2535,8 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Unsigned31());
if (lower<T>()) {
node->RemoveInput(1);
- NodeProperties::ChangeOp(
- node, simplified()->CheckedUint32ToInt32(FeedbackSource()));
+ ChangeOp(node,
+ simplified()->CheckedUint32ToInt32(FeedbackSource()));
}
return;
}
@@ -2482,27 +2578,27 @@ class RepresentationSelector {
} else {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(kIdentifyZeros),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kNumberClz32: {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
return;
}
case IrOpcode::kNumberImul: {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
- if (lower<T>()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ if (lower<T>()) ChangeOp(node, Uint32Op(node));
return;
}
case IrOpcode::kNumberFround: {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat32);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberMax: {
@@ -2557,7 +2653,7 @@ class RepresentationSelector {
lowering->DoMax(node, lowering->machine()->Float64LessThan(),
MachineRepresentation::kFloat64);
} else {
- NodeProperties::ChangeOp(node, Float64Op(node));
+ ChangeOp(node, Float64Op(node));
}
}
}
@@ -2616,7 +2712,7 @@ class RepresentationSelector {
lowering->machine()->Float64LessThanOrEqual(),
MachineRepresentation::kFloat64);
} else {
- NodeProperties::ChangeOp(node, Float64Op(node));
+ ChangeOp(node, Float64Op(node));
}
}
}
@@ -2626,7 +2722,7 @@ class RepresentationSelector {
case IrOpcode::kNumberPow: {
VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberCeil:
@@ -2647,7 +2743,7 @@ class RepresentationSelector {
} else if (node->opcode() == IrOpcode::kNumberRound) {
DeferReplacement(node, lowering->Float64Round(node));
} else {
- NodeProperties::ChangeOp(node, Float64Op(node));
+ ChangeOp(node, Float64Op(node));
}
}
return;
@@ -2687,7 +2783,7 @@ class RepresentationSelector {
case IrOpcode::kNumberTanh: {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberSign: {
@@ -2712,14 +2808,14 @@ class RepresentationSelector {
} else {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
}
return;
}
case IrOpcode::kNumberSqrt: {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
- if (lower<T>()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower<T>()) ChangeOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberToBoolean: {
@@ -2792,9 +2888,9 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower<T>()) {
if (COMPRESS_POINTERS_BOOL) {
- NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
+ ChangeOp(node, lowering->machine()->Word32Equal());
} else {
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ ChangeOp(node, lowering->machine()->WordEqual());
}
}
return;
@@ -2810,8 +2906,7 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->NumberSameValue());
+ ChangeOp(node, lowering->simplified()->NumberSameValue());
}
} else {
VisitBinop<T>(node, UseInfo::AnyTagged(),
@@ -2864,7 +2959,7 @@ class RepresentationSelector {
UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
MachineRepresentation::kTaggedPointer);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
+ ChangeOp(node, lowering->simplified()->BigIntAdd());
}
}
return;
@@ -2882,8 +2977,7 @@ class RepresentationSelector {
UseInfo::CheckedBigIntAsTaggedPointer(FeedbackSource{}),
MachineRepresentation::kTaggedPointer);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->BigIntSubtract());
+ ChangeOp(node, lowering->simplified()->BigIntSubtract());
}
}
return;
@@ -3064,6 +3158,10 @@ class RepresentationSelector {
SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kLoadFramePointer: {
+ SetOutput<T>(node, MachineType::PointerRepresentation());
+ return;
+ }
case IrOpcode::kLoadMessage: {
if (truncation.IsUnused()) return VisitUnused<T>(node);
VisitUnop<T>(node, UseInfo::Word(), MachineRepresentation::kTagged);
@@ -3114,8 +3212,7 @@ class RepresentationSelector {
if (lower<T>()) {
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
- NodeProperties::ChangeOp(
- node, jsgraph_->simplified()->StoreField(access));
+ ChangeOp(node, jsgraph_->simplified()->StoreField(access));
}
}
return;
@@ -3157,8 +3254,7 @@ class RepresentationSelector {
if (lower<T>()) {
if (write_barrier_kind < access.write_barrier_kind) {
access.write_barrier_kind = write_barrier_kind;
- NodeProperties::ChangeOp(
- node, jsgraph_->simplified()->StoreElement(access));
+ ChangeOp(node, jsgraph_->simplified()->StoreElement(access));
}
}
return;
@@ -3177,24 +3273,21 @@ class RepresentationSelector {
if (value_type.Is(Type::SignedSmall())) {
ProcessInput<T>(node, 2, UseInfo::TruncatingWord32()); // value
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- simplified()->StoreSignedSmallElement());
+ ChangeOp(node, simplified()->StoreSignedSmallElement());
}
} else if (value_type.Is(Type::Number())) {
ProcessInput<T>(node, 2, UseInfo::TruncatingFloat64()); // value
if (lower<T>()) {
Handle<Map> double_map = DoubleMapParameterOf(node->op());
- NodeProperties::ChangeOp(
- node,
- simplified()->TransitionAndStoreNumberElement(double_map));
+ ChangeOp(node,
+ simplified()->TransitionAndStoreNumberElement(double_map));
}
} else if (value_type.Is(Type::NonNumber())) {
ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
if (lower<T>()) {
Handle<Map> fast_map = FastMapParameterOf(node->op());
- NodeProperties::ChangeOp(
- node, simplified()->TransitionAndStoreNonNumberElement(
- fast_map, value_type));
+ ChangeOp(node, simplified()->TransitionAndStoreNonNumberElement(
+ fast_map, value_type));
}
} else {
ProcessInput<T>(node, 2, UseInfo::AnyTagged()); // value
@@ -3263,9 +3356,8 @@ class RepresentationSelector {
} else if (input_type.Is(Type::NullOrUndefined())) {
DeferReplacement(node, node->InputAt(1));
} else if (!input_type.Maybe(Type::NullOrUndefined())) {
- NodeProperties::ChangeOp(
- node, lowering->simplified()->ConvertReceiver(
- ConvertReceiverMode::kNotNullOrUndefined));
+ ChangeOp(node, lowering->simplified()->ConvertReceiver(
+ ConvertReceiverMode::kNotNullOrUndefined));
}
}
return;
@@ -3278,7 +3370,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTagged);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, simplified()->StringToNumber());
+ ChangeOp(node, simplified()->StringToNumber());
}
} else if (truncation.IsUsedAsWord32()) {
if (InputIs(node, Type::NumberOrOddball())) {
@@ -3289,8 +3381,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kWord32);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- simplified()->PlainPrimitiveToWord32());
+ ChangeOp(node, simplified()->PlainPrimitiveToWord32());
}
}
} else if (truncation.TruncatesOddballAndBigIntToNumber()) {
@@ -3302,8 +3393,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kFloat64);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- simplified()->PlainPrimitiveToFloat64());
+ ChangeOp(node, simplified()->PlainPrimitiveToFloat64());
}
}
} else {
@@ -3371,8 +3461,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->NumberIsFinite());
+ ChangeOp(node, lowering->simplified()->NumberIsFinite());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3400,8 +3489,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(
- node, lowering->simplified()->NumberIsSafeInteger());
+ ChangeOp(node, lowering->simplified()->NumberIsSafeInteger());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3427,8 +3515,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node,
- lowering->simplified()->NumberIsInteger());
+ ChangeOp(node, lowering->simplified()->NumberIsInteger());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3456,7 +3543,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, simplified()->NumberIsMinusZero());
+ ChangeOp(node, simplified()->NumberIsMinusZero());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3479,7 +3566,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kBit);
if (lower<T>()) {
- NodeProperties::ChangeOp(node, simplified()->NumberIsNaN());
+ ChangeOp(node, simplified()->NumberIsNaN());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
@@ -3520,14 +3607,9 @@ class RepresentationSelector {
VisitObjectIs<T>(node, Type::Undetectable(), lowering);
return;
}
- case IrOpcode::kArgumentsFrame: {
- SetOutput<T>(node, MachineType::PointerRepresentation());
- return;
- }
case IrOpcode::kArgumentsLength:
case IrOpcode::kRestLength: {
- VisitUnop<T>(node, UseInfo::Word(),
- MachineRepresentation::kTaggedSigned);
+ SetOutput<T>(node, MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kNewDoubleElements:
@@ -3537,8 +3619,8 @@ class RepresentationSelector {
return;
}
case IrOpcode::kNewArgumentsElements: {
- VisitBinop<T>(node, UseInfo::Word(), UseInfo::TaggedSigned(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop<T>(node, UseInfo::TaggedSigned(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kCheckFloat64Hole: {
@@ -3645,7 +3727,7 @@ class RepresentationSelector {
VisitInputs<T>(node);
return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kFrameState:
- return VisitFrameState<T>(node);
+ return VisitFrameState<T>(FrameState{node});
case IrOpcode::kStateValues:
return VisitStateValues<T>(node);
case IrOpcode::kObjectState:
@@ -3693,7 +3775,7 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
MachineType::PointerRepresentation());
if (lower<T>()) {
- NodeProperties::ChangeOp(
+ ChangeOp(
node,
lowering->simplified()->FindOrderedHashMapEntryForInt32Key());
}
@@ -3732,7 +3814,8 @@ class RepresentationSelector {
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kUnreachable:
case IrOpcode::kRuntimeAbort:
-// All JavaScript operators except JSToNumber have uniform handling.
+// All JavaScript operators except JSToNumber, JSToNumberConvertBigInt,
+// kJSToNumeric and JSWasmCall have uniform handling.
#define OPCODE_CASE(name, ...) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
JS_OBJECT_OP_LIST(OPCODE_CASE)
@@ -3748,6 +3831,9 @@ class RepresentationSelector {
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
case IrOpcode::kJSParseInt:
+ if (node->opcode() == IrOpcode::kJSWasmCall) {
+ return VisitJSWasmCall<T>(node, lowering);
+ }
VisitInputs<T>(node);
// Assume the output is tagged.
return SetOutput<T>(node, MachineRepresentation::kTagged);
@@ -3787,6 +3873,8 @@ class RepresentationSelector {
replacements_.push_back(replacement);
node->NullAllInputs(); // Node is now dead.
+
+ NotifyNodeReplaced(node, replacement);
}
void Kill(Node* node) {
@@ -3810,6 +3898,20 @@ class RepresentationSelector {
}
private:
+ void ChangeOp(Node* node, const Operator* new_op) {
+ compiler::NodeProperties::ChangeOp(node, new_op);
+
+ if (V8_UNLIKELY(observe_node_manager_ != nullptr))
+ observe_node_manager_->OnNodeChanged(kSimplifiedLoweringReducerName, node,
+ node);
+ }
+
+ void NotifyNodeReplaced(Node* node, Node* replacement) {
+ if (V8_UNLIKELY(observe_node_manager_ != nullptr))
+ observe_node_manager_->OnNodeChanged(kSimplifiedLoweringReducerName, node,
+ replacement);
+ }
+
JSGraph* jsgraph_;
Zone* zone_; // Temporary zone.
// Map from node to its uses that might need to be revisited.
@@ -3840,6 +3942,7 @@ class RepresentationSelector {
OperationTyper op_typer_; // helper for the feedback typer
TickCounter* const tick_counter_;
Linkage* const linkage_;
+ ObserveNodeManager* const observe_node_manager_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -4006,13 +4109,11 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
- Zone* zone,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter,
- Linkage* linkage)
+SimplifiedLowering::SimplifiedLowering(
+ JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter,
+ Linkage* linkage, ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
@@ -4021,13 +4122,14 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
node_origins_(node_origins),
poisoning_level_(poisoning_level),
tick_counter_(tick_counter),
- linkage_(linkage) {}
+ linkage_(linkage),
+ observe_node_manager_(observe_node_manager) {}
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), broker_);
- RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
- source_positions_, node_origins_,
- tick_counter_, linkage_);
+ RepresentationSelector selector(
+ jsgraph(), broker_, zone_, &changer, source_positions_, node_origins_,
+ tick_counter_, linkage_, observe_node_manager_);
selector.Run(this);
}
@@ -4548,7 +4650,7 @@ void SimplifiedLowering::DoMax(Node* node, Operator const* op,
node->ReplaceInput(0, graph()->NewNode(op, lhs, rhs));
DCHECK_EQ(rhs, node->InputAt(1));
node->AppendInput(graph()->zone(), lhs);
- NodeProperties::ChangeOp(node, common()->Select(rep));
+ ChangeOp(node, common()->Select(rep));
}
void SimplifiedLowering::DoMin(Node* node, Operator const* op,
@@ -4559,7 +4661,7 @@ void SimplifiedLowering::DoMin(Node* node, Operator const* op,
node->InsertInput(graph()->zone(), 0, graph()->NewNode(op, lhs, rhs));
DCHECK_EQ(lhs, node->InputAt(1));
DCHECK_EQ(rhs, node->InputAt(2));
- NodeProperties::ChangeOp(node, common()->Select(rep));
+ ChangeOp(node, common()->Select(rep));
}
void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
@@ -4569,7 +4671,7 @@ void SimplifiedLowering::DoIntegral32ToBit(Node* node) {
node->ReplaceInput(0, graph()->NewNode(op, input, zero));
node->AppendInput(graph()->zone(), zero);
- NodeProperties::ChangeOp(node, op);
+ ChangeOp(node, op);
}
void SimplifiedLowering::DoOrderedNumberToBit(Node* node) {
@@ -4578,7 +4680,7 @@ void SimplifiedLowering::DoOrderedNumberToBit(Node* node) {
node->ReplaceInput(0, graph()->NewNode(machine()->Float64Equal(), input,
jsgraph()->Float64Constant(0.0)));
node->AppendInput(graph()->zone(), jsgraph()->Int32Constant(0));
- NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ ChangeOp(node, machine()->Word32Equal());
}
void SimplifiedLowering::DoNumberToBit(Node* node) {
@@ -4587,7 +4689,7 @@ void SimplifiedLowering::DoNumberToBit(Node* node) {
node->ReplaceInput(0, jsgraph()->Float64Constant(0.0));
node->AppendInput(graph()->zone(),
graph()->NewNode(machine()->Float64Abs(), input));
- NodeProperties::ChangeOp(node, machine()->Float64LessThan());
+ ChangeOp(node, machine()->Float64LessThan());
}
void SimplifiedLowering::DoIntegerToUint8Clamped(Node* node) {
@@ -4604,8 +4706,7 @@ void SimplifiedLowering::DoIntegerToUint8Clamped(Node* node) {
graph()->NewNode(machine()->Float64LessThan(), input, max), input,
max));
node->AppendInput(graph()->zone(), min);
- NodeProperties::ChangeOp(node,
- common()->Select(MachineRepresentation::kFloat64));
+ ChangeOp(node, common()->Select(MachineRepresentation::kFloat64));
}
void SimplifiedLowering::DoNumberToUint8Clamped(Node* node) {
@@ -4622,8 +4723,7 @@ void SimplifiedLowering::DoNumberToUint8Clamped(Node* node) {
graph()->NewNode(machine()->Float64LessThan(), input, max),
input, max),
min));
- NodeProperties::ChangeOp(node,
- machine()->Float64RoundTiesEven().placeholder());
+ ChangeOp(node, machine()->Float64RoundTiesEven().placeholder());
}
void SimplifiedLowering::DoSigned32ToUint8Clamped(Node* node) {
@@ -4639,8 +4739,7 @@ void SimplifiedLowering::DoSigned32ToUint8Clamped(Node* node) {
graph()->NewNode(machine()->Int32LessThan(), input, min),
min, input));
node->AppendInput(graph()->zone(), max);
- NodeProperties::ChangeOp(node,
- common()->Select(MachineRepresentation::kWord32));
+ ChangeOp(node, common()->Select(MachineRepresentation::kWord32));
}
void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
@@ -4651,8 +4750,7 @@ void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
0, graph()->NewNode(machine()->Uint32LessThanOrEqual(), input, max));
node->AppendInput(graph()->zone(), input);
node->AppendInput(graph()->zone(), max);
- NodeProperties::ChangeOp(node,
- common()->Select(MachineRepresentation::kWord32));
+ ChangeOp(node, common()->Select(MachineRepresentation::kWord32));
}
Node* SimplifiedLowering::ToNumberCode() {
@@ -4721,6 +4819,14 @@ Operator const* SimplifiedLowering::ToNumericOperator() {
return to_numeric_operator_.get();
}
+void SimplifiedLowering::ChangeOp(Node* node, const Operator* new_op) {
+ compiler::NodeProperties::ChangeOp(node, new_op);
+
+ if (V8_UNLIKELY(observe_node_manager_ != nullptr))
+ observe_node_manager_->OnNodeChanged(kSimplifiedLoweringReducerName, node,
+ node);
+}
+
#undef TRACE
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index f38d3df132..54017b34f7 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
@@ -19,6 +20,7 @@ namespace compiler {
// Forward declarations.
class NodeOriginTable;
+class ObserveNodeManager;
class RepresentationChanger;
class RepresentationSelector;
class SourcePositionTable;
@@ -30,7 +32,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter, Linkage* linkage);
+ TickCounter* tick_counter, Linkage* linkage,
+ ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -50,6 +53,17 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
void DoUnsigned32ToUint8Clamped(Node* node);
private:
+ // The purpose of this nested class is to hide method
+ // v8::internal::compiler::NodeProperties::ChangeOp which should not be
+ // directly used by code in SimplifiedLowering.
+ // SimplifiedLowering code should call SimplifiedLowering::ChangeOp instead,
+ // in order to notify the changes to ObserveNodeManager and support the
+ // %ObserveNode intrinsic.
+ class NodeProperties : public compiler::NodeProperties {
+ static void ChangeOp(Node* node, const Operator* new_op) { UNREACHABLE(); }
+ };
+ void ChangeOp(Node* node, const Operator* new_op);
+
JSGraph* const jsgraph_;
JSHeapBroker* broker_;
Zone* const zone_;
@@ -74,6 +88,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
TickCounter* const tick_counter_;
Linkage* const linkage_;
+ ObserveNodeManager* const observe_node_manager_;
+
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
Node* Int32Abs(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 582a74db69..09e3a80ec4 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -972,13 +972,6 @@ struct SimplifiedOperatorGlobalCache final {
FindOrderedHashMapEntryForInt32KeyOperator
kFindOrderedHashMapEntryForInt32Key;
- struct ArgumentsFrameOperator final : public Operator {
- ArgumentsFrameOperator()
- : Operator(IrOpcode::kArgumentsFrame, Operator::kPure, "ArgumentsFrame",
- 0, 0, 0, 1, 0, 0) {}
- };
- ArgumentsFrameOperator kArgumentsFrame;
-
template <CheckForMinusZeroMode kMode>
struct ChangeFloat64ToTaggedOperator final
: public Operator1<CheckForMinusZeroMode> {
@@ -1225,7 +1218,6 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
PURE_OP_LIST(GET_FROM_CACHE)
EFFECT_DEPENDENT_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
-GET_FROM_CACHE(ArgumentsFrame)
GET_FROM_CACHE(FindOrderedHashMapEntry)
GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
GET_FROM_CACHE(LoadFieldByIndex)
@@ -1637,14 +1629,12 @@ const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
transition); // parameter
}
-const Operator* SimplifiedOperatorBuilder::ArgumentsLength(
- int formal_parameter_count) {
- return zone()->New<Operator1<int>>( // --
- IrOpcode::kArgumentsLength, // opcode
- Operator::kPure, // flags
- "ArgumentsLength", // name
- 1, 0, 0, 1, 0, 0, // counts
- formal_parameter_count); // parameter
+const Operator* SimplifiedOperatorBuilder::ArgumentsLength() {
+ return zone()->New<Operator>( // --
+ IrOpcode::kArgumentsLength, // opcode
+ Operator::kPure, // flags
+ "ArgumentsLength", // name
+ 0, 0, 0, 1, 0, 0); // counts
}
const Operator* SimplifiedOperatorBuilder::RestLength(
@@ -1653,7 +1643,7 @@ const Operator* SimplifiedOperatorBuilder::RestLength(
IrOpcode::kRestLength, // opcode
Operator::kPure, // flags
"RestLength", // name
- 1, 0, 0, 1, 0, 0, // counts
+ 0, 0, 0, 1, 0, 0, // counts
formal_parameter_count); // parameter
}
@@ -1775,7 +1765,7 @@ const Operator* SimplifiedOperatorBuilder::NewArgumentsElements(
IrOpcode::kNewArgumentsElements, // opcode
Operator::kEliminatable, // flags
"NewArgumentsElements", // name
- 2, 1, 0, 1, 1, 0, // counts
+ 1, 1, 0, 1, 1, 0, // counts
NewArgumentsElementsParameters(type,
formal_parameter_count)); // parameter
}
@@ -1950,6 +1940,11 @@ const Operator* SimplifiedOperatorBuilder::FastApiCall(
FastApiCallParameters(signature, feedback, descriptor));
}
+int FastApiCallNode::FastCallExtraInputCount() const {
+ return kFastTargetInputCount + kEffectAndControlInputCount +
+ (Parameters().signature()->HasOptions() ? 1 : 0);
+}
+
int FastApiCallNode::FastCallArgumentCount() const {
FastApiCallParameters p = FastApiCallParametersOf(node()->op());
const CFunctionInfo* signature = p.signature();
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 815243c0ae..cd66b89ea4 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -11,6 +11,7 @@
#include "src/codegen/machine-type.h"
#include "src/codegen/tnode.h"
#include "src/common/globals.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator.h"
@@ -971,14 +972,13 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberIsSafeInteger();
const Operator* ObjectIsInteger();
- const Operator* ArgumentsFrame();
- const Operator* ArgumentsLength(int formal_parameter_count);
+ const Operator* ArgumentsLength();
const Operator* RestLength(int formal_parameter_count);
const Operator* NewDoubleElements(AllocationType);
const Operator* NewSmiOrObjectElements(AllocationType);
- // new-arguments-elements frame, arguments count
+ // new-arguments-elements arguments-length
const Operator* NewArgumentsElements(CreateArgumentsType type,
int formal_parameter_count);
@@ -1133,19 +1133,19 @@ class FastApiCallNode final : public SimplifiedNodeWrapperBase {
static constexpr int kExtraInputCount =
kFastTargetInputCount + kFastReceiverInputCount;
- static constexpr int kHasErrorInputCount = 1;
static constexpr int kArityInputCount = 1;
static constexpr int kNewTargetInputCount = 1;
static constexpr int kHolderInputCount = 1;
static constexpr int kContextAndFrameStateInputCount = 2;
static constexpr int kEffectAndControlInputCount = 2;
- static constexpr int kFastCallExtraInputCount =
- kFastTargetInputCount + kHasErrorInputCount + kEffectAndControlInputCount;
+ int FastCallExtraInputCount() const;
static constexpr int kSlowCallExtraInputCount =
kSlowTargetInputCount + kArityInputCount + kNewTargetInputCount +
kSlowReceiverInputCount + kHolderInputCount +
kContextAndFrameStateInputCount + kEffectAndControlInputCount;
+ static constexpr int kSlowCallDataArgumentIndex = 3;
+
// This is the arity fed into FastApiCallArguments.
static constexpr int ArityForArgc(int c_arg_count, int js_arg_count) {
return c_arg_count + kFastTargetInputCount + js_arg_count +
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index ff66c3df71..78d57a92b9 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -126,6 +126,17 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
size_t size() const;
iterator begin() const { return iterator(node_); }
+ iterator begin_without_receiver() const {
+ return ++begin(); // Skip the receiver.
+ }
+ iterator begin_without_receiver_and_skip(int n_skips) {
+ iterator it = begin_without_receiver();
+ while (n_skips > 0 && !it.done()) {
+ ++it;
+ --n_skips;
+ }
+ return it;
+ }
iterator end() const { return iterator(); }
private:
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 1e4acdc335..a2103139fa 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -680,10 +680,7 @@ Type Typer::Visitor::TypeIfException(Node* node) { return Type::NonInternal(); }
// Common operators.
Type Typer::Visitor::TypeParameter(Node* node) {
- Node* const start = node->InputAt(0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const parameter_count = start->op()->ValueOutputCount() - 4;
- DCHECK_LE(1, parameter_count);
+ StartNode start{node->InputAt(0)};
int const index = ParameterIndexOf(node->op());
if (index == Linkage::kJSCallClosureParamIndex) {
return Type::Function();
@@ -694,15 +691,15 @@ Type Typer::Visitor::TypeParameter(Node* node) {
// Parameter[this] can be the_hole for derived class constructors.
return Type::Union(Type::Hole(), Type::NonInternal(), typer_->zone());
}
- } else if (index == Linkage::GetJSCallNewTargetParamIndex(parameter_count)) {
+ } else if (index == start.NewTargetParameterIndex()) {
if (typer_->flags() & Typer::kNewTargetIsReceiver) {
return Type::Receiver();
} else {
return Type::Union(Type::Receiver(), Type::Undefined(), typer_->zone());
}
- } else if (index == Linkage::GetJSCallArgCountParamIndex(parameter_count)) {
+ } else if (index == start.ArgCountParameterIndex()) {
return Type::Range(0.0, FixedArray::kMaxLength, typer_->zone());
- } else if (index == Linkage::GetJSCallContextParamIndex(parameter_count)) {
+ } else if (index == start.ContextParameterIndex()) {
return Type::OtherInternal();
}
return Type::NonInternal();
@@ -992,6 +989,15 @@ Type Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
Type Typer::Visitor::TypeFastApiCall(Node* node) { return Type::Any(); }
+Type Typer::Visitor::TypeJSWasmCall(Node* node) {
+ const JSWasmCallParameters& op_params = JSWasmCallParametersOf(node->op());
+ const wasm::FunctionSig* wasm_signature = op_params.signature();
+ if (wasm_signature->return_count() > 0) {
+ return JSWasmCallNode::TypeForWasmReturnType(wasm_signature->GetReturn());
+ }
+ return Type::Any();
+}
+
Type Typer::Visitor::TypeProjection(Node* node) {
Type const type = Operand(node, 0);
if (type.Is(Type::None())) return Type::None();
@@ -2323,10 +2329,6 @@ Type Typer::Visitor::TypeRestLength(Node* node) {
return TypeCache::Get()->kArgumentsLengthType;
}
-Type Typer::Visitor::TypeArgumentsFrame(Node* node) {
- return Type::ExternalPointer();
-}
-
Type Typer::Visitor::TypeNewDoubleElements(Node* node) {
return Type::OtherInternal();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 7912b09ac0..2c6f05b44a 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -153,6 +153,8 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
return kString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
return kInternalizedString;
@@ -261,6 +263,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case WASM_MODULE_OBJECT_TYPE:
case WASM_STRUCT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_VALUE_OBJECT_TYPE:
case WEAK_CELL_TYPE:
DCHECK(!map.is_callable());
DCHECK(!map.is_undetectable());
@@ -269,6 +272,13 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
DCHECK(!map.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
+ case JS_PROMISE_CONSTRUCTOR_TYPE:
+ case JS_REG_EXP_CONSTRUCTOR_TYPE:
+ case JS_ARRAY_CONSTRUCTOR_TYPE:
+#define TYPED_ARRAY_CONSTRUCTORS_SWITCH(Type, type, TYPE, Ctype) \
+ case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
+ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
+#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
DCHECK(!map.is_undetectable());
return kFunction;
case JS_PROXY_TYPE:
@@ -302,6 +312,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case BYTECODE_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
+ case REG_EXP_BOILERPLATE_DESCRIPTION_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 0960d34917..f3cd4789e7 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -396,11 +396,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(1, input_count);
// Parameter has an input that produces enough values.
int const index = ParameterIndexOf(node->op());
- Node* const start = NodeProperties::GetValueInput(node, 0);
- CHECK_EQ(IrOpcode::kStart, start->opcode());
+ StartNode start{NodeProperties::GetValueInput(node, 0)};
// Currently, parameter indices start at -1 instead of 0.
CHECK_LE(-1, index);
- CHECK_LT(index + 1, start->op()->ValueOutputCount());
+ CHECK_LE(index, start.LastParameterIndex_MaybeNonStandardLayout());
CheckTypeIs(node, Type::Any());
break;
}
@@ -536,29 +535,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(0, control_count);
CHECK_EQ(0, effect_count);
CHECK_EQ(6, input_count);
- // Check that the parameters and registers are kStateValues or
- // kTypedStateValues.
- for (int i = 0; i < 2; ++i) {
- CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kStateValues ||
- NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kTypedStateValues);
- }
+
+ FrameState state{node};
+ CHECK(state.parameters()->opcode() == IrOpcode::kStateValues ||
+ state.parameters()->opcode() == IrOpcode::kTypedStateValues);
+ CHECK(state.locals()->opcode() == IrOpcode::kStateValues ||
+ state.locals()->opcode() == IrOpcode::kTypedStateValues);
// Checks that the state input is empty for all but kInterpretedFunction
// frames, where it should have size one.
{
- const FrameStateInfo& state_info = FrameStateInfoOf(node->op());
- const FrameStateFunctionInfo* func_info = state_info.function_info();
+ const FrameStateFunctionInfo* func_info =
+ state.frame_state_info().function_info();
CHECK_EQ(func_info->parameter_count(),
- StateValuesAccess(node->InputAt(kFrameStateParametersInput))
- .size());
- CHECK_EQ(
- func_info->local_count(),
- StateValuesAccess(node->InputAt(kFrameStateLocalsInput)).size());
-
- Node* accumulator = node->InputAt(kFrameStateStackInput);
- if (func_info->type() == FrameStateType::kInterpretedFunction) {
+ StateValuesAccess(state.parameters()).size());
+ CHECK_EQ(func_info->local_count(),
+ StateValuesAccess(state.locals()).size());
+
+ Node* accumulator = state.stack();
+ if (func_info->type() == FrameStateType::kUnoptimizedFunction) {
// The accumulator (InputAt(2)) cannot be kStateValues.
// It can be kTypedStateValues (to signal the type) and it can have
// other Node types including that of the optimized_out HeapConstant.
@@ -1232,12 +1227,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kArgumentsLength:
case IrOpcode::kRestLength:
- CheckValueInputIs(node, 0, Type::ExternalPointer());
CheckTypeIs(node, TypeCache::Get()->kArgumentsLengthType);
break;
- case IrOpcode::kArgumentsFrame:
- CheckTypeIs(node, Type::ExternalPointer());
- break;
case IrOpcode::kNewDoubleElements:
case IrOpcode::kNewSmiOrObjectElements:
CheckValueInputIs(node, 0,
@@ -1245,8 +1236,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::OtherInternal());
break;
case IrOpcode::kNewArgumentsElements:
- CheckValueInputIs(node, 0, Type::ExternalPointer());
- CheckValueInputIs(node, 1,
+ CheckValueInputIs(node, 0,
Type::Range(0.0, FixedArray::kMaxLength, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
@@ -1627,6 +1617,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::ExternalPointer()); // callee
CheckValueInputIs(node, 1, Type::Any()); // receiver
break;
+ case IrOpcode::kJSWasmCall:
+ CHECK_GE(value_count, 3);
+ CheckTypeIs(node, Type::Any());
+ CheckValueInputIs(node, 0, Type::Any()); // callee
+ break;
// Machine operators
// -----------------------
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 3b72da176e..f4e99169e4 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -81,17 +81,6 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-// We would like to use gasm_->Call() to implement this macro,
-// but this doesn't work currently when we try to call it from functions
-// which set IfSuccess/IfFailure control paths (e.g. within Throw()).
-// TODO(manoskouk): Maybe clean this up at some point?
-#define CALL_BUILTIN(name, ...) \
- SetEffect(graph()->NewNode( \
- mcgraph()->common()->Call(GetBuiltinCallDescriptor<name##Descriptor>( \
- this, StubCallMode::kCallBuiltinPointer)), \
- GetBuiltinPointerTarget(Builtins::k##name), ##__VA_ARGS__, effect(), \
- control()))
-
#define LOAD_INSTANCE_FIELD(name, type) \
gasm_->Load(assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name))
@@ -119,7 +108,7 @@ MachineType assert_size(int expected_size, MachineType type) {
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
#define STORE_RAW(base, offset, val, rep, barrier) \
- STORE_RAW_NODE_OFFSET(base, gasm_->Int32Constant(offset), val, rep, barrier)
+ STORE_RAW_NODE_OFFSET(base, Int32Constant(offset), val, rep, barrier)
#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
gasm_->Store(StoreRepresentation(rep, barrier), base, node_offset, val)
@@ -162,25 +151,107 @@ bool ContainsInt64(const wasm::FunctionSig* sig) {
return false;
}
-template <typename BuiltinDescriptor>
-CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
- StubCallMode stub_mode) {
- BuiltinDescriptor interface_descriptor;
+constexpr Builtins::Name WasmRuntimeStubIdToBuiltinName(
+ wasm::WasmCode::RuntimeStubId runtime_stub_id) {
+ switch (runtime_stub_id) {
+#define DEF_CASE(name) \
+ case wasm::WasmCode::k##name: \
+ return Builtins::k##name;
+#define DEF_TRAP_CASE(name) DEF_CASE(ThrowWasm##name)
+ WASM_RUNTIME_STUB_LIST(DEF_CASE, DEF_TRAP_CASE)
+#undef DEF_CASE
+#undef DEF_TRAP_CASE
+ default:
+#if V8_HAS_CXX14_CONSTEXPR
+ UNREACHABLE();
+#else
+ return Builtins::kAbort;
+#endif
+ }
+}
+
+CallDescriptor* GetBuiltinCallDescriptor(Builtins::Name name, Zone* zone,
+ StubCallMode stub_mode,
+ bool needs_frame_state = false) {
+ CallInterfaceDescriptor interface_descriptor =
+ Builtins::CallInterfaceDescriptorFor(name);
return Linkage::GetStubCallDescriptor(
- builder->mcgraph()->zone(), // zone
+ zone, // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- stub_mode); // stub call mode
+ needs_frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags, // flags
+ Operator::kNoProperties, // properties
+ stub_mode); // stub call mode
}
+
+Node* GetBuiltinPointerTarget(MachineGraph* mcgraph,
+ Builtins::Name builtin_id) {
+ static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
+ return mcgraph->graph()->NewNode(
+ mcgraph->common()->NumberConstant(builtin_id));
+}
+
} // namespace
+JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature)
+ : result_needs_conversion_(wasm_signature->return_count() == 1 &&
+ wasm_signature->GetReturn().kind() ==
+ wasm::kI64) {
+ arg_needs_conversion_.resize(wasm_signature->parameter_count());
+ for (size_t i = 0; i < wasm_signature->parameter_count(); i++) {
+ wasm::ValueType type = wasm_signature->GetParam(i);
+ arg_needs_conversion_[i] = type.kind() == wasm::kI64;
+ }
+}
+
class WasmGraphAssembler : public GraphAssembler {
public:
WasmGraphAssembler(MachineGraph* mcgraph, Zone* zone)
: GraphAssembler(mcgraph, zone) {}
+ template <typename... Args>
+ Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id, Args*... args) {
+ auto* call_descriptor = GetBuiltinCallDescriptor(
+ WasmRuntimeStubIdToBuiltinName(stub_id), temp_zone(),
+ StubCallMode::kCallWasmRuntimeStub);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ stub_id, RelocInfo::WASM_STUB_CALL);
+ return Call(call_descriptor, call_target, args...);
+ }
+
+ template <typename... Args>
+ Node* CallBuiltin(Builtins::Name name, Args*... args) {
+ // We would like to use gasm_->Call() to implement this method,
+ // but this doesn't work currently when we try to call it from functions
+ // which set IfSuccess/IfFailure control paths (e.g. within Throw()).
+ // TODO(manoskouk): Maybe clean this up at some point and unite with
+ // CallRuntimeStub?
+ auto* call_descriptor = GetBuiltinCallDescriptor(
+ name, temp_zone(), StubCallMode::kCallBuiltinPointer);
+ Node* call_target = GetBuiltinPointerTarget(mcgraph(), name);
+ Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, args..., effect(), control());
+ InitializeEffectControl(call, control());
+ return call;
+ }
+
+ Node* Branch(Node* cond, Node** true_node, Node** false_node,
+ BranchHint hint) {
+ DCHECK_NOT_NULL(cond);
+ Node* branch =
+ graph()->NewNode(mcgraph()->common()->Branch(hint), cond, control());
+ *true_node = graph()->NewNode(mcgraph()->common()->IfTrue(), branch);
+ *false_node = graph()->NewNode(mcgraph()->common()->IfFalse(), branch);
+ return branch;
+ }
+
+ Node* NumberConstant(volatile double value) {
+ return graph()->NewNode(mcgraph()->common()->NumberConstant(value));
+ }
+
// Helper functions for dealing with HeapObjects.
// Rule of thumb: if access to a given field in an object is required in
// at least two places, put a helper function here.
@@ -388,8 +459,7 @@ Node* WasmGraphBuilder::Start(unsigned params) {
}
Node* WasmGraphBuilder::Param(unsigned index) {
- return graph()->NewNode(mcgraph()->common()->Parameter(index),
- graph()->start());
+ return gasm_->Parameter(index);
}
Node* WasmGraphBuilder::Loop(Node* entry) {
@@ -461,6 +531,12 @@ void WasmGraphBuilder::AppendToPhi(Node* phi, Node* from) {
phi, mcgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
}
+template <typename... Nodes>
+Node* WasmGraphBuilder::Merge(Node* fst, Nodes*... args) {
+ return graph()->NewNode(this->mcgraph()->common()->Merge(1 + sizeof...(args)),
+ fst, args...);
+}
+
Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(mcgraph()->common()->Merge(count), count, controls);
}
@@ -481,22 +557,25 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
}
Node* WasmGraphBuilder::RefNull() {
- return LOAD_FULL_POINTER(
- BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(RootIndex::kNullValue));
+ // Technically speaking, this does not generate a valid graph since the effect
+ // of the last Load is not consumed.
+ // TODO(manoskouk): Remove this code once we implement Load elimination
+ // optimization for wasm.
+ if (!ref_null_node_.is_set()) {
+ Node* current_effect = effect();
+ Node* current_control = control();
+ SetEffectControl(mcgraph()->graph()->start());
+ ref_null_node_.set(LOAD_FULL_POINTER(
+ BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(RootIndex::kNullValue)));
+ SetEffectControl(current_effect, current_control);
+ }
+ return ref_null_node_.get();
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmRefFunc, RelocInfo::WASM_STUB_CALL);
-
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- mcgraph()->Uint32Constant(function_index), effect(), control()));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRefFunc,
+ gasm_->Uint32Constant(function_index));
}
Node* WasmGraphBuilder::RefAsNonNull(Node* arg,
@@ -534,9 +613,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
Node* limit_address =
LOAD_INSTANCE_FIELD(StackLimitAddress, MachineType::Pointer());
- Node* limit = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Pointer()), limit_address,
- mcgraph()->IntPtrConstant(0), limit_address, control()));
+ Node* limit = gasm_->Load(MachineType::Pointer(), limit_address, 0);
Node* check = SetEffect(graph()->NewNode(
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
@@ -882,8 +959,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
MachineOperatorBuilder* m = mcgraph()->machine();
switch (opcode) {
case wasm::kExprI32Eqz:
- op = m->Word32Equal();
- return graph()->NewNode(op, input, mcgraph()->Int32Constant(0));
+ return gasm_->Word32Equal(input, Int32Constant(0));
case wasm::kExprF32Abs:
op = m->Float32Abs();
break;
@@ -1084,8 +1160,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
break;
}
case wasm::kExprI64Eqz:
- op = m->Word64Equal();
- return graph()->NewNode(op, input, mcgraph()->Int64Constant(0));
+ return gasm_->Word64Equal(input, Int64Constant(0));
case wasm::kExprF32SConvertI64:
if (m->Is32()) {
return BuildF32SConvertI64(input);
@@ -1137,7 +1212,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
? BuildCcallConvertFloat(input, position, opcode)
: BuildIntConvertFloat(input, position, opcode);
case wasm::kExprRefIsNull:
- return graph()->NewNode(m->WordEqual(), input, RefNull());
+ return gasm_->WordEqual(input, RefNull());
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1171,29 +1246,14 @@ Node* WasmGraphBuilder::Simd128Constant(const uint8_t value[16]) {
return graph()->NewNode(mcgraph()->machine()->S128Const(value));
}
-namespace {
-Node* Branch(MachineGraph* mcgraph, Node* cond, Node** true_node,
- Node** false_node, Node* control, BranchHint hint) {
- DCHECK_NOT_NULL(cond);
- DCHECK_NOT_NULL(control);
- Node* branch =
- mcgraph->graph()->NewNode(mcgraph->common()->Branch(hint), cond, control);
- *true_node = mcgraph->graph()->NewNode(mcgraph->common()->IfTrue(), branch);
- *false_node = mcgraph->graph()->NewNode(mcgraph->common()->IfFalse(), branch);
- return branch;
-}
-} // namespace
-
Node* WasmGraphBuilder::BranchNoHint(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, control(),
- BranchHint::kNone);
+ return gasm_->Branch(cond, true_node, false_node, BranchHint::kNone);
}
Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, control(),
- BranchHint::kFalse);
+ return gasm_->Branch(cond, true_node, false_node, BranchHint::kFalse);
}
TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
@@ -1247,9 +1307,7 @@ Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
if (val == 0) {
return TrapIfFalse(reason, node, position);
} else {
- return TrapIfTrue(reason,
- graph()->NewNode(mcgraph()->machine()->Word32Equal(),
- node, mcgraph()->Int32Constant(val)),
+ return TrapIfTrue(reason, gasm_->Word32Equal(node, Int32Constant(val)),
position);
}
}
@@ -1266,9 +1324,7 @@ Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
wasm::WasmCodePosition position) {
Int64Matcher m(node);
if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
- return TrapIfTrue(reason,
- graph()->NewNode(mcgraph()->machine()->Word64Equal(), node,
- mcgraph()->Int64Constant(val)),
+ return TrapIfTrue(reason, gasm_->Word64Equal(node, Int64Constant(val)),
position);
}
@@ -1300,7 +1356,7 @@ Node* WasmGraphBuilder::Return(Vector<Node*> vals) {
unsigned count = static_cast<unsigned>(vals.size());
base::SmallVector<Node*, 8> buf(count + 3);
- buf[0] = mcgraph()->Int32Constant(0);
+ buf[0] = Int32Constant(0);
if (count > 0) {
base::Memcpy(buf.data() + 1, vals.begin(), sizeof(void*) * count);
}
@@ -1327,11 +1383,9 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
Int32Matcher match(node);
if (match.HasResolvedValue()) {
int32_t masked = (match.ResolvedValue() & kMask32);
- if (match.ResolvedValue() != masked)
- node = mcgraph()->Int32Constant(masked);
+ if (match.ResolvedValue() != masked) node = Int32Constant(masked);
} else {
- node = graph()->NewNode(mcgraph()->machine()->Word32And(), node,
- mcgraph()->Int32Constant(kMask32));
+ node = gasm_->Word32And(node, Int32Constant(kMask32));
}
}
return node;
@@ -1344,11 +1398,9 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
Int64Matcher match(node);
if (match.HasResolvedValue()) {
int64_t masked = (match.ResolvedValue() & kMask64);
- if (match.ResolvedValue() != masked)
- node = mcgraph()->Int64Constant(masked);
+ if (match.ResolvedValue() != masked) node = Int64Constant(masked);
} else {
- node = graph()->NewNode(mcgraph()->machine()->Word64And(), node,
- mcgraph()->Int64Constant(kMask64));
+ node = gasm_->Word64And(node, Int64Constant(kMask64));
}
}
return node;
@@ -1381,21 +1433,21 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
bool isFloat = false;
switch (wasmtype.kind()) {
- case wasm::ValueType::kF64:
- value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ case wasm::kF64:
+ value = gasm_->BitcastFloat64ToInt64(node);
isFloat = true;
V8_FALLTHROUGH;
- case wasm::ValueType::kI64:
- result = mcgraph()->Int64Constant(0);
+ case wasm::kI64:
+ result = Int64Constant(0);
break;
- case wasm::ValueType::kF32:
- value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ case wasm::kF32:
+ value = gasm_->BitcastFloat32ToInt32(node);
isFloat = true;
V8_FALLTHROUGH;
- case wasm::ValueType::kI32:
- result = mcgraph()->Int32Constant(0);
+ case wasm::kI32:
+ result = Int32Constant(0);
break;
- case wasm::ValueType::kS128:
+ case wasm::kS128:
DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
break;
default:
@@ -1409,17 +1461,15 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
if (wasmtype == wasm::kWasmI64 && mem_rep < MachineRepresentation::kWord64) {
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
- value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ value = gasm_->TruncateInt64ToInt32(value);
valueSizeInBytes = wasm::kWasmI32.element_size_bytes();
valueSizeInBits = 8 * valueSizeInBytes;
if (mem_rep == MachineRepresentation::kWord16) {
- value =
- graph()->NewNode(m->Word32Shl(), value, mcgraph()->Int32Constant(16));
+ value = gasm_->Word32Shl(value, Int32Constant(16));
}
} else if (wasmtype == wasm::kWasmI32 &&
mem_rep == MachineRepresentation::kWord16) {
- value =
- graph()->NewNode(m->Word32Shl(), value, mcgraph()->Int32Constant(16));
+ value = gasm_->Word32Shl(value, Int32Constant(16));
}
int i;
@@ -1428,10 +1478,10 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
if (ReverseBytesSupported(m, valueSizeInBytes)) {
switch (valueSizeInBytes) {
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes(), value);
+ result = gasm_->Word32ReverseBytes(value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes(), value);
+ result = gasm_->Word64ReverseBytes(value);
break;
case 16:
result = graph()->NewNode(m->Simd128ReverseBytes(), value);
@@ -1452,44 +1502,36 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
DCHECK_EQ(0, (shiftCount + 8) % 16);
if (valueSizeInBits > 32) {
- shiftLower = graph()->NewNode(m->Word64Shl(), value,
- mcgraph()->Int64Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word64Shr(), value,
- mcgraph()->Int64Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word64And(), shiftLower,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word64And(), shiftHigher,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
- result = graph()->NewNode(m->Word64Or(), result, lowerByte);
- result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ shiftLower = gasm_->Word64Shl(value, Int64Constant(shiftCount));
+ shiftHigher = gasm_->Word64Shr(value, Int64Constant(shiftCount));
+ lowerByte = gasm_->Word64And(
+ shiftLower, Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word64And(
+ shiftHigher, Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = gasm_->Word64Or(result, lowerByte);
+ result = gasm_->Word64Or(result, higherByte);
} else {
- shiftLower = graph()->NewNode(m->Word32Shl(), value,
- mcgraph()->Int32Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word32Shr(), value,
- mcgraph()->Int32Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word32And(), shiftLower,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word32And(), shiftHigher,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
- result = graph()->NewNode(m->Word32Or(), result, lowerByte);
- result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ shiftLower = gasm_->Word32Shl(value, Int32Constant(shiftCount));
+ shiftHigher = gasm_->Word32Shr(value, Int32Constant(shiftCount));
+ lowerByte = gasm_->Word32And(
+ shiftLower, Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word32And(
+ shiftHigher, Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = gasm_->Word32Or(result, lowerByte);
+ result = gasm_->Word32Or(result, higherByte);
}
}
}
if (isFloat) {
switch (wasmtype.kind()) {
- case wasm::ValueType::kF64:
- result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ case wasm::kF64:
+ result = gasm_->BitcastInt64ToFloat64(result);
break;
- case wasm::ValueType::kF32:
- result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ case wasm::kF32:
+ result = gasm_->BitcastInt32ToFloat32(result);
break;
default:
UNREACHABLE();
@@ -1512,19 +1554,19 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
switch (memtype.representation()) {
case MachineRepresentation::kFloat64:
- value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ value = gasm_->BitcastFloat64ToInt64(node);
isFloat = true;
V8_FALLTHROUGH;
case MachineRepresentation::kWord64:
- result = mcgraph()->Int64Constant(0);
+ result = Int64Constant(0);
break;
case MachineRepresentation::kFloat32:
- value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ value = gasm_->BitcastFloat32ToInt32(node);
isFloat = true;
V8_FALLTHROUGH;
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord16:
- result = mcgraph()->Int32Constant(0);
+ result = Int32Constant(0);
break;
case MachineRepresentation::kWord8:
// No need to change endianness for byte size, return original node
@@ -1543,16 +1585,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
if (ReverseBytesSupported(m, valueSizeInBytes < 4 ? 4 : valueSizeInBytes)) {
switch (valueSizeInBytes) {
case 2:
- result =
- graph()->NewNode(m->Word32ReverseBytes(),
- graph()->NewNode(m->Word32Shl(), value,
- mcgraph()->Int32Constant(16)));
+ result = gasm_->Word32ReverseBytes(
+ gasm_->Word32Shl(value, Int32Constant(16)));
break;
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes(), value);
+ result = gasm_->Word32ReverseBytes(value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes(), value);
+ result = gasm_->Word64ReverseBytes(value);
break;
case 16:
result = graph()->NewNode(m->Simd128ReverseBytes(), value);
@@ -1572,33 +1612,25 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
DCHECK_EQ(0, (shiftCount + 8) % 16);
if (valueSizeInBits > 32) {
- shiftLower = graph()->NewNode(m->Word64Shl(), value,
- mcgraph()->Int64Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word64Shr(), value,
- mcgraph()->Int64Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word64And(), shiftLower,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word64And(), shiftHigher,
- mcgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
- result = graph()->NewNode(m->Word64Or(), result, lowerByte);
- result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ shiftLower = gasm_->Word64Shl(value, Int64Constant(shiftCount));
+ shiftHigher = gasm_->Word64Shr(value, Int64Constant(shiftCount));
+ lowerByte = gasm_->Word64And(
+ shiftLower, Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word64And(
+ shiftHigher, Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = gasm_->Word64Or(result, lowerByte);
+ result = gasm_->Word64Or(result, higherByte);
} else {
- shiftLower = graph()->NewNode(m->Word32Shl(), value,
- mcgraph()->Int32Constant(shiftCount));
- shiftHigher = graph()->NewNode(m->Word32Shr(), value,
- mcgraph()->Int32Constant(shiftCount));
- lowerByte = graph()->NewNode(
- m->Word32And(), shiftLower,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
- << (valueSizeInBits - 8 - i)));
- higherByte = graph()->NewNode(
- m->Word32And(), shiftHigher,
- mcgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
- result = graph()->NewNode(m->Word32Or(), result, lowerByte);
- result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ shiftLower = gasm_->Word32Shl(value, Int32Constant(shiftCount));
+ shiftHigher = gasm_->Word32Shr(value, Int32Constant(shiftCount));
+ lowerByte = gasm_->Word32And(
+ shiftLower, Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = gasm_->Word32And(
+ shiftHigher, Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = gasm_->Word32Or(result, lowerByte);
+ result = gasm_->Word32Or(result, higherByte);
}
}
}
@@ -1606,10 +1638,10 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
if (isFloat) {
switch (memtype.representation()) {
case MachineRepresentation::kFloat64:
- result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ result = gasm_->BitcastInt64ToFloat64(result);
break;
case MachineRepresentation::kFloat32:
- result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ result = gasm_->BitcastInt32ToFloat32(result);
break;
default:
UNREACHABLE();
@@ -1626,19 +1658,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
// result = (x << machine_width - type_width) >> (machine_width -
// type_width)
if (wasmtype == wasm::kWasmI64) {
- shiftBitCount = mcgraph()->Int32Constant(64 - valueSizeInBits);
- result = graph()->NewNode(
- m->Word64Sar(),
- graph()->NewNode(m->Word64Shl(),
- graph()->NewNode(m->ChangeInt32ToInt64(), result),
- shiftBitCount),
+ shiftBitCount = Int32Constant(64 - valueSizeInBits);
+ result = gasm_->Word64Sar(
+ gasm_->Word64Shl(gasm_->ChangeInt32ToInt64(result), shiftBitCount),
shiftBitCount);
} else if (wasmtype == wasm::kWasmI32) {
- shiftBitCount = mcgraph()->Int32Constant(32 - valueSizeInBits);
- result = graph()->NewNode(
- m->Word32Sar(),
- graph()->NewNode(m->Word32Shl(), result, shiftBitCount),
- shiftBitCount);
+ shiftBitCount = Int32Constant(32 - valueSizeInBits);
+ result = gasm_->Word32Sar(gasm_->Word32Shl(result, shiftBitCount),
+ shiftBitCount);
}
}
}
@@ -1651,20 +1678,20 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
wasm::kExprF32ReinterpretI32,
Binop(wasm::kExprI32Ior,
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
- mcgraph()->Int32Constant(0x7FFFFFFF)),
+ Int32Constant(0x7FFFFFFF)),
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
- mcgraph()->Int32Constant(0x80000000))));
+ Int32Constant(0x80000000))));
return result;
}
Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
if (mcgraph()->machine()->Is64()) {
- return gasm_->BitcastInt64ToFloat64(gasm_->Word64Or(
- gasm_->Word64And(gasm_->BitcastFloat64ToInt64(left),
- gasm_->Int64Constant(0x7FFFFFFFFFFFFFFF)),
- gasm_->Word64And(gasm_->BitcastFloat64ToInt64(right),
- gasm_->Int64Constant(0x8000000000000000))));
+ return gasm_->BitcastInt64ToFloat64(
+ gasm_->Word64Or(gasm_->Word64And(gasm_->BitcastFloat64ToInt64(left),
+ Int64Constant(0x7FFFFFFFFFFFFFFF)),
+ gasm_->Word64And(gasm_->BitcastFloat64ToInt64(right),
+ Int64Constant(0x8000000000000000))));
}
DCHECK(mcgraph()->machine()->Is32());
@@ -1673,8 +1700,8 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
Node* high_word_right = gasm_->Float64ExtractHighWord32(right);
Node* new_high_word = gasm_->Word32Or(
- gasm_->Word32And(high_word_left, gasm_->Int32Constant(0x7FFFFFFF)),
- gasm_->Word32And(high_word_right, gasm_->Int32Constant(0x80000000)));
+ gasm_->Word32And(high_word_left, Int32Constant(0x7FFFFFFF)),
+ gasm_->Word32And(high_word_right, Int32Constant(0x80000000)));
return gasm_->Float64InsertHighWord32(left, new_high_word);
}
@@ -1966,29 +1993,23 @@ Node* WasmGraphBuilder::BuildIntConvertFloat(Node* input,
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(gasm_->ChangeFloat32ToFloat64(input));
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF64(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(input);
}
Node* WasmGraphBuilder::BuildI32AsmjsUConvertF32(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(gasm_->ChangeFloat32ToFloat64(input));
}
Node* WasmGraphBuilder::BuildI32AsmjsUConvertF64(Node* input) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js must use the wacky JS semantics.
- return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+ return gasm_->TruncateFloat64ToWord32(input);
}
Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
@@ -1998,7 +2019,7 @@ Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
return BuildCCall(&sig, function, stack_slot_param);
}
@@ -2118,12 +2139,10 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type),
- stack_slot, mcgraph()->Int32Constant(0),
- effect(), control()));
+ return gasm_->Load(type, stack_slot, 0);
}
Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
@@ -2158,17 +2177,14 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
ElementSizeInBytes(result_type.representation()));
Node* stack_slot =
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(parameter_representation, kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input, effect(), control()));
+ auto store_rep =
+ StoreRepresentation(parameter_representation, kNoWriteBarrier);
+ gasm_->Store(store_rep, stack_slot, 0, input);
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
- stack_slot, mcgraph()->Int32Constant(0),
- effect(), control()));
+ return gasm_->Load(result_type, stack_slot, 0);
}
namespace {
@@ -2205,20 +2221,16 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
ElementSizeInBytes(float_ty.representation()));
Node* stack_slot =
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
- SetEffect(graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
- effect(), control()));
+ auto store_rep =
+ StoreRepresentation(float_ty.representation(), kNoWriteBarrier);
+ gasm_->Store(store_rep, stack_slot, 0, input);
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* function =
- graph()->NewNode(mcgraph()->common()->ExternalConstant(call_ref));
+ Node* function = gasm_->ExternalConstant(call_ref);
Node* overflow = BuildCCall(&sig, function, stack_slot);
if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty),
- stack_slot, Int32Constant(0), effect(),
- control()));
+ return gasm_->Load(int_ty, stack_slot, 0);
}
Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse);
@@ -2231,9 +2243,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
sat_d.Nest(nan_d, false);
Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
- Node* load =
- SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty), stack_slot,
- Int32Constant(0), effect(), control()));
+ Node* load = gasm_->Load(int_ty, stack_slot, 0);
Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load);
@@ -2241,22 +2251,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* WasmGraphBuilder::MemoryGrow(Node* input) {
needs_stack_check_ = true;
-
- WasmMemoryGrowDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), // zone
- interface_descriptor, // descriptor
- interface_descriptor.GetStackParameterCount(), // stack parameter count
- CallDescriptor::kNoFlags, // flags
- Operator::kNoProperties, // properties
- StubCallMode::kCallWasmRuntimeStub); // stub call mode
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmMemoryGrow, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- input, effect(), control()));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmMemoryGrow, input);
}
Node* WasmGraphBuilder::Throw(uint32_t exception_index,
@@ -2265,44 +2260,36 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
wasm::WasmCodePosition position) {
needs_stack_check_ = true;
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
- Node* create_parameters[] = {
- LoadExceptionTagFromTable(exception_index),
- BuildChangeUint31ToSmi(mcgraph()->Uint32Constant(encoded_size))};
- Node* except_obj =
- BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
- arraysize(create_parameters));
- SetSourcePosition(except_obj, position);
- Node* values_array = CALL_BUILTIN(
- WasmGetOwnProperty, except_obj,
- LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(
- RootIndex::kwasm_exception_values_symbol)),
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+
+ Node* values_array =
+ gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAllocateFixedArray,
+ gasm_->IntPtrConstant(encoded_size));
+ SetSourcePosition(values_array, position);
+
uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig;
MachineOperatorBuilder* m = mcgraph()->machine();
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value = values[i];
switch (sig->GetParam(i).kind()) {
- case wasm::ValueType::kF32:
- value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
+ case wasm::kF32:
+ value = gasm_->BitcastFloat32ToInt32(value);
V8_FALLTHROUGH;
- case wasm::ValueType::kI32:
+ case wasm::kI32:
BuildEncodeException32BitValue(values_array, &index, value);
break;
- case wasm::ValueType::kF64:
- value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
+ case wasm::kF64:
+ value = gasm_->BitcastFloat64ToInt64(value);
V8_FALLTHROUGH;
- case wasm::ValueType::kI64: {
- Node* upper32 = graph()->NewNode(
- m->TruncateInt64ToInt32(),
+ case wasm::kI64: {
+ Node* upper32 = gasm_->TruncateInt64ToInt32(
Binop(wasm::kExprI64ShrU, value, Int64Constant(32)));
BuildEncodeException32BitValue(values_array, &index, upper32);
- Node* lower32 = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ Node* lower32 = gasm_->TruncateInt64ToInt32(value);
BuildEncodeException32BitValue(values_array, &index, lower32);
break;
}
- case wasm::ValueType::kS128:
+ case wasm::kS128:
BuildEncodeException32BitValue(
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(0), value));
@@ -2316,59 +2303,53 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
- case wasm::ValueType::kRtt: // TODO(7748): Implement.
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kStmt:
+ case wasm::kBottom:
UNREACHABLE();
}
}
DCHECK_EQ(encoded_size, index);
- WasmThrowDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmThrow, RelocInfo::WASM_STUB_CALL);
- Node* call = SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- except_obj, effect(), control()));
- SetSourcePosition(call, position);
- return call;
+
+ Node* exception_tag = LoadExceptionTagFromTable(exception_index);
+
+ Node* throw_call = gasm_->CallRuntimeStub(wasm::WasmCode::kWasmThrow,
+ exception_tag, values_array);
+ SetSourcePosition(throw_call, position);
+ return throw_call;
}
void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array,
uint32_t* index,
Node* value) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- Node* upper_halfword_as_smi = BuildChangeUint31ToSmi(
- graph()->NewNode(machine->Word32Shr(), value, Int32Constant(16)));
+ Node* upper_halfword_as_smi =
+ BuildChangeUint31ToSmi(gasm_->Word32Shr(value, Int32Constant(16)));
STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, upper_halfword_as_smi);
++(*index);
- Node* lower_halfword_as_smi = BuildChangeUint31ToSmi(
- graph()->NewNode(machine->Word32And(), value, Int32Constant(0xFFFFu)));
+ Node* lower_halfword_as_smi =
+ BuildChangeUint31ToSmi(gasm_->Word32And(value, Int32Constant(0xFFFFu)));
STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, lower_halfword_as_smi);
++(*index);
}
Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
Node* upper =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
- upper = graph()->NewNode(machine->Word32Shl(), upper, Int32Constant(16));
+ upper = gasm_->Word32Shl(upper, Int32Constant(16));
Node* lower =
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index));
(*index)++;
- Node* value = graph()->NewNode(machine->Word32Or(), upper, lower);
+ Node* value = gasm_->Word32Or(upper, lower);
return value;
}
@@ -2387,20 +2368,12 @@ Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
// TODO(v8:8091): Currently the message of the original exception is not being
// preserved when rethrown to the console. The pending message will need to be
// saved when caught and restored here while being rethrown.
- WasmThrowDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
- Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
- return gasm_->Call(call_descriptor, call_target, except_obj);
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmRethrow, except_obj);
}
Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* expected_tag) {
- MachineOperatorBuilder* machine = mcgraph()->machine();
- return graph()->NewNode(machine->WordEqual(), caught_tag, expected_tag);
+ return gasm_->WordEqual(caught_tag, expected_tag);
}
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
@@ -2411,8 +2384,8 @@ Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
}
Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
- return CALL_BUILTIN(
- WasmGetOwnProperty, except_obj,
+ return gasm_->CallBuiltin(
+ Builtins::kWasmGetOwnProperty, except_obj,
LOAD_FULL_POINTER(
BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(RootIndex::kwasm_exception_tag_symbol)),
@@ -2422,8 +2395,8 @@ Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
const wasm::WasmException* exception,
Vector<Node*> values) {
- Node* values_array = CALL_BUILTIN(
- WasmGetOwnProperty, except_obj,
+ Node* values_array = gasm_->CallBuiltin(
+ Builtins::kWasmGetOwnProperty, except_obj,
LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(
RootIndex::kwasm_exception_values_symbol)),
@@ -2434,23 +2407,23 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
for (size_t i = 0; i < sig->parameter_count(); ++i) {
Node* value;
switch (sig->GetParam(i).kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
value = BuildDecodeException32BitValue(values_array, &index);
break;
- case wasm::ValueType::kI64:
+ case wasm::kI64:
value = BuildDecodeException64BitValue(values_array, &index);
break;
- case wasm::ValueType::kF32: {
+ case wasm::kF32: {
value = Unop(wasm::kExprF32ReinterpretI32,
BuildDecodeException32BitValue(values_array, &index));
break;
}
- case wasm::ValueType::kF64: {
+ case wasm::kF64: {
value = Unop(wasm::kExprF64ReinterpretI64,
BuildDecodeException64BitValue(values_array, &index));
break;
}
- case wasm::ValueType::kS128:
+ case wasm::kS128:
value = graph()->NewNode(
mcgraph()->machine()->I32x4Splat(),
BuildDecodeException32BitValue(values_array, &index));
@@ -2464,16 +2437,17 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
mcgraph()->machine()->I32x4ReplaceLane(3), value,
BuildDecodeException32BitValue(values_array, &index));
break;
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
- case wasm::ValueType::kRtt: // TODO(7748): Implement.
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kStmt:
+ case wasm::kBottom:
UNREACHABLE();
}
values[i] = value;
@@ -2484,23 +2458,20 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = mcgraph()->machine();
ZeroCheck32(wasm::kTrapDivByZero, right, position);
Node* before = control();
Node* denom_is_m1;
Node* denom_is_not_m1;
- BranchExpectFalse(
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
+ BranchExpectFalse(gasm_->Word32Equal(right, Int32Constant(-1)), &denom_is_m1,
+ &denom_is_not_m1);
SetControl(denom_is_m1);
TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
if (control() != denom_is_m1) {
- SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- control()));
+ SetControl(Merge(denom_is_not_m1, control()));
} else {
SetControl(before);
}
- return graph()->NewNode(m->Int32Div(), left, right, control());
+ return gasm_->Int32Div(left, right);
}
Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
@@ -2509,28 +2480,24 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
ZeroCheck32(wasm::kTrapRemByZero, right, position);
- Diamond d(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
+ Diamond d(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(-1)), BranchHint::kFalse);
d.Chain(control());
- return d.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
+ return d.Phi(MachineRepresentation::kWord32, Int32Constant(0),
graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
}
Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right,
wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = mcgraph()->machine();
- return graph()->NewNode(m->Uint32Div(), left, right,
- ZeroCheck32(wasm::kTrapDivByZero, right, position));
+ ZeroCheck32(wasm::kTrapDivByZero, right, position);
+ return gasm_->Uint32Div(left, right);
}
Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = mcgraph()->machine();
- return graph()->NewNode(m->Uint32Mod(), left, right,
- ZeroCheck32(wasm::kTrapRemByZero, right, position));
+ ZeroCheck32(wasm::kTrapRemByZero, right, position);
+ return gasm_->Uint32Mod(left, right);
}
Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
@@ -2539,54 +2506,49 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
Int32Matcher mr(right);
if (mr.HasResolvedValue()) {
if (mr.ResolvedValue() == 0) {
- return mcgraph()->Int32Constant(0);
+ return Int32Constant(0);
} else if (mr.ResolvedValue() == -1) {
// The result is the negation of the left input.
- return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
+ return gasm_->Int32Sub(Int32Constant(0), left);
}
- return graph()->NewNode(m->Int32Div(), left, right, control());
+ return gasm_->Int32Div(left, right);
}
// asm.js semantics return 0 on divide or mod by zero.
if (m->Int32DivIsSafe()) {
// The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Int32Div(), left, right, control());
+ return gasm_->Int32Div(left, right);
}
// Check denominator for zero.
- Diamond z(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ Diamond z(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(0)), BranchHint::kFalse);
z.Chain(control());
// Check denominator for -1. (avoid minint / -1 case).
- Diamond n(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
+ Diamond n(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(-1)), BranchHint::kFalse);
n.Chain(z.if_false);
Node* div = graph()->NewNode(m->Int32Div(), left, right, n.if_false);
- Node* neg =
- graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
+ Node* neg = gasm_->Int32Sub(Int32Constant(0), left);
- return z.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
+ return z.Phi(MachineRepresentation::kWord32, Int32Constant(0),
n.Phi(MachineRepresentation::kWord32, neg, div));
}
Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
CommonOperatorBuilder* c = mcgraph()->common();
MachineOperatorBuilder* m = mcgraph()->machine();
- Node* const zero = mcgraph()->Int32Constant(0);
+ Node* const zero = Int32Constant(0);
Int32Matcher mr(right);
if (mr.HasResolvedValue()) {
if (mr.ResolvedValue() == 0 || mr.ResolvedValue() == -1) {
return zero;
}
- return graph()->NewNode(m->Int32Mod(), left, right, control());
+ return gasm_->Int32Mod(left, right);
}
// General case for signed integer modulus, with optimization for (unknown)
@@ -2609,12 +2571,12 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
//
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
- Node* const minus_one = mcgraph()->Int32Constant(-1);
+ Node* const minus_one = Int32Constant(-1);
const Operator* const merge_op = c->Merge(2);
const Operator* const phi_op = c->Phi(MachineRepresentation::kWord32, 2);
- Node* check0 = graph()->NewNode(m->Int32LessThan(), zero, right);
+ Node* check0 = gasm_->Int32LessThan(zero, right);
Node* branch0 =
graph()->NewNode(c->Branch(BranchHint::kTrue), check0, control());
@@ -2679,35 +2641,29 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
// asm.js semantics return 0 on divide or mod by zero.
if (m->Uint32DivIsSafe()) {
// The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Uint32Div(), left, right, control());
+ return gasm_->Uint32Div(left, right);
}
// Explicit check for x % 0.
- Diamond z(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ Diamond z(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(0)), BranchHint::kFalse);
z.Chain(control());
- return z.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
+ return z.Phi(MachineRepresentation::kWord32, Int32Constant(0),
graph()->NewNode(mcgraph()->machine()->Uint32Div(), left, right,
z.if_false));
}
Node* WasmGraphBuilder::BuildI32AsmjsRemU(Node* left, Node* right) {
- MachineOperatorBuilder* m = mcgraph()->machine();
// asm.js semantics return 0 on divide or mod by zero.
// Explicit check for x % 0.
- Diamond z(
- graph(), mcgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ Diamond z(graph(), mcgraph()->common(),
+ gasm_->Word32Equal(right, Int32Constant(0)), BranchHint::kFalse);
z.Chain(control());
Node* rem = graph()->NewNode(mcgraph()->machine()->Uint32Mod(), left, right,
z.if_false);
- return z.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
- rem);
+ return z.Phi(MachineRepresentation::kWord32, Int32Constant(0), rem);
}
Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
@@ -2720,20 +2676,17 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
Node* before = control();
Node* denom_is_m1;
Node* denom_is_not_m1;
- BranchExpectFalse(graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
- mcgraph()->Int64Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
+ BranchExpectFalse(gasm_->Word64Equal(right, Int64Constant(-1)), &denom_is_m1,
+ &denom_is_not_m1);
SetControl(denom_is_m1);
TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
std::numeric_limits<int64_t>::min(), position);
if (control() != denom_is_m1) {
- SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- control()));
+ SetControl(Merge(denom_is_not_m1, control()));
} else {
SetControl(before);
}
- return graph()->NewNode(mcgraph()->machine()->Int64Div(), left, right,
- control());
+ return gasm_->Int64Div(left, right);
}
Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
@@ -2744,16 +2697,14 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
}
ZeroCheck64(wasm::kTrapRemByZero, right, position);
Diamond d(mcgraph()->graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
- mcgraph()->Int64Constant(-1)));
+ gasm_->Word64Equal(right, Int64Constant(-1)));
d.Chain(control());
Node* rem = graph()->NewNode(mcgraph()->machine()->Int64Mod(), left, right,
d.if_false);
- return d.Phi(MachineRepresentation::kWord64, mcgraph()->Int64Constant(0),
- rem);
+ return d.Phi(MachineRepresentation::kWord64, Int64Constant(0), rem);
}
Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
@@ -2762,8 +2713,8 @@ Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
return BuildDiv64Call(left, right, ExternalReference::wasm_uint64_div(),
MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- return graph()->NewNode(mcgraph()->machine()->Uint64Div(), left, right,
- ZeroCheck64(wasm::kTrapDivByZero, right, position));
+ ZeroCheck64(wasm::kTrapDivByZero, right, position);
+ return gasm_->Uint64Div(left, right);
}
Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
wasm::WasmCodePosition position) {
@@ -2771,13 +2722,8 @@ Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
return BuildDiv64Call(left, right, ExternalReference::wasm_uint64_mod(),
MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- return graph()->NewNode(mcgraph()->machine()->Uint64Mod(), left, right,
- ZeroCheck64(wasm::kTrapRemByZero, right, position));
-}
-
-Node* WasmGraphBuilder::GetBuiltinPointerTarget(int builtin_id) {
- static_assert(std::is_same<Smi, BuiltinPtr>(), "BuiltinPtr must be Smi");
- return graph()->NewNode(mcgraph()->common()->NumberConstant(builtin_id));
+ ZeroCheck64(wasm::kTrapRemByZero, right, position);
+ return gasm_->Uint64Mod(left, right);
}
Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
@@ -2792,14 +2738,12 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
Node* call = BuildCCall(&sig, function, stack_slot);
ZeroCheck32(trap_zero, call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
- return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
- stack_slot, mcgraph()->Int32Constant(0),
- effect(), control()));
+ return gasm_->Load(result_type, stack_slot, 0);
}
template <typename... Args>
@@ -2807,27 +2751,28 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Args... args) {
DCHECK_LE(sig->return_count(), 1);
DCHECK_EQ(sizeof...(args), sig->parameter_count());
- Node* const call_args[] = {function, args..., effect(), control()};
+ Node* call_args[] = {function, args..., effect(), control()};
auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(mcgraph()->zone(), sig);
- const Operator* op = mcgraph()->common()->Call(call_descriptor);
- return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
+ return gasm_->Call(call_descriptor, arraysize(call_args), call_args);
}
Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node, const Operator* op) {
+ Node* instance_node, const Operator* op,
+ Node* frame_state) {
if (instance_node == nullptr) {
DCHECK_NOT_NULL(instance_node_);
instance_node = instance_node_.get();
}
needs_stack_check_ = true;
const size_t params = sig->parameter_count();
+ const size_t has_frame_state = frame_state != nullptr ? 1 : 0;
const size_t extra = 3; // instance_node, effect, and control.
- const size_t count = 1 + params + extra;
+ const size_t count = 1 + params + extra + has_frame_state;
// Reallocate the buffer to make space for extra inputs.
base::SmallVector<Node*, 16 + extra> inputs(count);
@@ -2839,8 +2784,9 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
if (params > 0) base::Memcpy(&inputs[2], &args[1], params * sizeof(Node*));
// Add effect and control inputs.
- inputs[params + 2] = effect();
- inputs[params + 3] = control();
+ if (has_frame_state != 0) inputs[params + 2] = frame_state;
+ inputs[params + has_frame_state + 2] = effect();
+ inputs[params + has_frame_state + 3] = control();
Node* call = graph()->NewNode(op, static_cast<int>(count), inputs.begin());
// Return calls have no effect output. Other calls are the new effect node.
@@ -2855,11 +2801,15 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
Vector<Node*> args, Vector<Node*> rets,
wasm::WasmCodePosition position,
Node* instance_node,
- UseRetpoline use_retpoline) {
+ UseRetpoline use_retpoline,
+ Node* frame_state) {
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
+ GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline,
+ kWasmFunction, frame_state != nullptr);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
- Node* call = BuildCallNode(sig, args, position, instance_node, op);
+ Node* call =
+ BuildCallNode(sig, args, position, instance_node, op, frame_state);
+ SetControl(call);
size_t ret_count = sig->return_count();
if (ret_count == 0) return call; // No return value.
@@ -2907,10 +2857,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
// Load the target from the imported_targets array at a known offset.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- mcgraph()->Int32Constant(func_index * kSystemPointerSize), effect(),
- control()));
+ Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets,
+ func_index * kSystemPointerSize);
args[0] = target_node;
const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
@@ -2943,9 +2891,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- func_index_times_pointersize, effect(), control()));
+ Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets,
+ func_index_times_pointersize);
args[0] = target_node;
const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
@@ -3042,32 +2989,27 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
const wasm::FunctionSig* sig = env_->module->signature(sig_index);
- MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
// Bounds check against the table size.
- Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, ift_size);
+ Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
// Mask the key to prevent SSCA.
if (untrusted_code_mitigations_) {
// mask = ((key - size) & ~key) >> 31
- Node* neg_key =
- graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1));
- Node* masked_diff = graph()->NewNode(
- machine->Word32And(),
- graph()->NewNode(machine->Int32Sub(), key, ift_size), neg_key);
- Node* mask =
- graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31));
- key = graph()->NewNode(machine->Word32And(), key, mask);
- }
-
- Node* int32_scaled_key = Uint32ToUintptr(
- graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
-
- Node* loaded_sig = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
- int32_scaled_key, effect(), control()));
+ Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1));
+ Node* masked_diff =
+ gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key);
+ Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31));
+ key = gasm_->Word32And(key, mask);
+ }
+
+ Node* int32_scaled_key =
+ Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2)));
+
+ Node* loaded_sig =
+ gasm_->Load(MachineType::Int32(), ift_sig_ids, int32_scaled_key);
// Check that the dynamic type of the function is a subtype of its static
// (table) type. Currently, the only subtyping between function types is
// $t <: funcref for all $t: function_type.
@@ -3076,15 +3018,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
env_->module->tables[table_index].type == wasm::kWasmFuncRef;
if (needs_typechecking) {
int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
- Node* sig_match = graph()->NewNode(machine->Word32Equal(), loaded_sig,
- Int32Constant(expected_sig_id));
+ Node* sig_match =
+ gasm_->Word32Equal(loaded_sig, Int32Constant(expected_sig_id));
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
} else {
// We still have to check that the entry is initialized.
// TODO(9495): Skip this check for non-nullable tables when they are
// allowed.
- Node* function_is_null =
- graph()->NewNode(machine->Word32Equal(), loaded_sig, Int32Constant(-1));
+ Node* function_is_null = gasm_->Word32Equal(loaded_sig, Int32Constant(-1));
TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
}
@@ -3096,9 +3037,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* intptr_scaled_key =
gasm_->IntMul(key_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
- Node* target = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Pointer()), ift_targets,
- intptr_scaled_key, effect(), control()));
+ Node* target =
+ gasm_->Load(MachineType::Pointer(), ift_targets, intptr_scaled_key);
args[0] = target;
const UseRetpoline use_retpoline =
@@ -3222,9 +3162,8 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// TODO(manoskouk): Find an elegant way to avoid allocating this pair for
// every call.
- Node* function_instance_node = CALL_BUILTIN(
- WasmAllocatePair, instance_node_.get(), callable,
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ Node* function_instance_node = gasm_->CallBuiltin(
+ Builtins::kWasmAllocatePair, instance_node_.get(), callable);
gasm_->Goto(&end_label, call_target, function_instance_node);
}
@@ -3302,10 +3241,10 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
Int32Matcher m(right);
if (m.HasResolvedValue()) {
return Binop(wasm::kExprI32Ror, left,
- mcgraph()->Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
+ Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
- Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
+ Binop(wasm::kExprI32Sub, Int32Constant(32), right));
}
}
@@ -3313,10 +3252,9 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
// Implement Rol by Ror since TurboFan does not have Rol opcode.
// TODO(weiliang): support Word64Rol opcode in TurboFan.
Int64Matcher m(right);
- Node* inv_right =
- m.HasResolvedValue()
- ? mcgraph()->Int64Constant(64 - (m.ResolvedValue() & 0x3F))
- : Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right);
+ Node* inv_right = m.HasResolvedValue()
+ ? Int64Constant(64 - (m.ResolvedValue() & 0x3F))
+ : Binop(wasm::kExprI64Sub, Int64Constant(64), right);
return Binop(wasm::kExprI64Ror, left, inv_right);
}
@@ -3334,6 +3272,11 @@ Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
: value;
}
+Node* WasmGraphBuilder::BuildChangeIntPtrToInt64(Node* value) {
+ return mcgraph()->machine()->Is32() ? gasm_->ChangeInt32ToInt64(value)
+ : value;
+}
+
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
// With pointer compression, only the lower 32 bits are used.
return COMPRESS_POINTERS_BOOL
@@ -3345,9 +3288,8 @@ Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
return COMPRESS_POINTERS_BOOL
? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
- : graph()->NewNode(mcgraph()->machine()->WordShl(),
- Uint32ToUintptr(value),
- BuildSmiShiftBitsConstant());
+ : gasm_->WordShl(Uint32ToUintptr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
@@ -3355,7 +3297,7 @@ Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
- return gasm_->Int32Constant(kSmiShiftSize + kSmiTagSize);
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
@@ -3370,18 +3312,16 @@ Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
value = BuildChangeSmiToInt32(value);
return BuildChangeInt32ToIntPtr(value);
}
- return graph()->NewNode(mcgraph()->machine()->WordSar(), value,
- BuildSmiShiftBitsConstant());
+ return gasm_->WordSar(value, BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
Node* max = mcgraph()->Uint32Constant(maxval);
- Node* check = graph()->NewNode(mcgraph()->machine()->Uint32LessThanOrEqual(),
- value, max);
+ Node* check = gasm_->Uint32LessThanOrEqual(value, max);
Node* valsmi = BuildChangeUint31ToSmi(value);
- Node* maxsmi = graph()->NewNode(mcgraph()->common()->NumberConstant(maxval));
+ Node* maxsmi = gasm_->NumberConstant(maxval);
Diamond d(graph(), mcgraph()->common(), check, BranchHint::kTrue);
d.Chain(control());
return d.Phi(MachineRepresentation::kTagged, valsmi, maxsmi);
@@ -3529,12 +3469,10 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
Node** offset_node) {
DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) {
- *base_node = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::UintPtr()),
- GetImportedMutableGlobals(),
- mcgraph()->Int32Constant(global.index * sizeof(Address)), effect(),
- control()));
- *offset_node = mcgraph()->Int32Constant(0);
+ *base_node =
+ gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(),
+ Int32Constant(global.index * sizeof(Address)));
+ *offset_node = Int32Constant(0);
} else {
if (globals_start_ == nullptr) {
// Load globals_start from the instance object at runtime.
@@ -3549,17 +3487,16 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
globals_start_ = graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()),
instance_node_.get(),
- mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
+ Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
graph()->start(), graph()->start());
}
*base_node = globals_start_.get();
- *offset_node = mcgraph()->Int32Constant(global.offset);
+ *offset_node = Int32Constant(global.offset);
if (mem_type == MachineType::Simd128() && global.offset != 0) {
// TODO(titzer,bbudge): code generation for SIMD memory offsets is broken.
- *base_node = graph()->NewNode(mcgraph()->machine()->IntAdd(), *base_node,
- *offset_node);
- *offset_node = mcgraph()->Int32Constant(0);
+ *base_node = gasm_->IntAdd(*base_node, *offset_node);
+ *offset_node = Int32Constant(0);
}
}
}
@@ -3574,20 +3511,16 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
// For the offset we need the index of the global in the buffer, and then
// calculate the actual offset from the index. Load the index from the
// ImportedMutableGlobals array of the instance.
- Node* index = SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::UintPtr()),
- GetImportedMutableGlobals(),
- mcgraph()->Int32Constant(global.index * sizeof(Address)),
- effect(), control()));
+ Node* index = gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(),
+ Int32Constant(global.index * sizeof(Address)));
// From the index, calculate the actual offset in the FixedArray. This
// is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
// wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
Node* index_times_tagged_size =
- graph()->NewNode(mcgraph()->machine()->IntMul(), Uint32ToUintptr(index),
- mcgraph()->Int32Constant(kTaggedSize));
- *offset = graph()->NewNode(
- mcgraph()->machine()->IntAdd(), index_times_tagged_size,
+ gasm_->IntMul(Uint32ToUintptr(index), Int32Constant(kTaggedSize));
+ *offset = gasm_->IntAdd(
+ index_times_tagged_size,
mcgraph()->IntPtrConstant(
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
@@ -3607,9 +3540,9 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
Node* result =
- graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
- mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
- result = BuildTruncateIntPtrToInt32(result);
+ gasm_->WordShr(mem_size, Int32Constant(wasm::kWasmPageSizeLog2));
+ result = env_->module->is_memory64 ? BuildChangeIntPtrToInt64(result)
+ : BuildTruncateIntPtrToInt32(result);
return result;
}
@@ -3644,15 +3577,12 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
}
inputs[count++] =
mcgraph()->ExternalConstant(ExternalReference::Create(f)); // ref
- inputs[count++] = mcgraph()->Int32Constant(fun->nargs); // arity
+ inputs[count++] = Int32Constant(fun->nargs); // arity
inputs[count++] = js_context; // js_context
inputs[count++] = effect();
inputs[count++] = control();
- Node* call = mcgraph()->graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), count, inputs);
- SetEffect(call);
- return call;
+ return gasm_->Call(call_descriptor, count, inputs);
}
Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
@@ -3683,8 +3613,7 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* base = nullptr;
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
- Node* result = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(mem_type), base, offset, effect(), control()));
+ Node* result = gasm_->Load(mem_type, base, offset);
#if defined(V8_TARGET_BIG_ENDIAN)
result = BuildChangeEndiannessLoad(result, mem_type, global.type);
#endif
@@ -3714,40 +3643,25 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
Node* base = nullptr;
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
- const Operator* op = mcgraph()->machine()->Store(
- StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
+ auto store_rep =
+ StoreRepresentation(mem_type.representation(), kNoWriteBarrier);
#if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, mem_type.representation(), global.type);
#endif
- return SetEffect(
- graph()->NewNode(op, base, offset, val, effect(), control()));
+
+ return gasm_->Store(store_rep, base, offset, val);
}
Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableGetDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmTableGet, RelocInfo::WASM_STUB_CALL);
-
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- mcgraph()->IntPtrConstant(table_index), index, effect(), control()));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableGet,
+ gasm_->IntPtrConstant(table_index), index);
}
Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableSetDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- // A direct call to a wasm runtime stub defined in this module.
- // Just encode the stub index. This will be patched at relocation.
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmTableSet, RelocInfo::WASM_STUB_CALL);
-
- return gasm_->Call(call_descriptor, call_target,
- gasm_->IntPtrConstant(table_index), index, val);
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableSet,
+ gasm_->IntPtrConstant(table_index), index, val);
}
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
@@ -3782,7 +3696,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
Node* cond =
gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask));
TrapIfFalse(wasm::kTrapUnalignedAccess,
- gasm_->Word32Equal(cond, gasm_->Int32Constant(0)), position);
+ gasm_->Word32Equal(cond, Int32Constant(0)), position);
return index;
}
@@ -3816,8 +3730,8 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// In memory64 mode on 32-bit systems, the upper 32 bits need to be zero to
// succeed the bounds check.
if (kSystemPointerSize == kInt32Size && env_->module->is_memory64) {
- Node* high_word = gasm_->TruncateInt64ToInt32(
- gasm_->Word64Shr(index, gasm_->Int32Constant(32)));
+ Node* high_word =
+ gasm_->TruncateInt64ToInt32(gasm_->Word64Shr(index, Int32Constant(32)));
TrapIfTrue(wasm::kTrapMemOutOfBounds, high_word, position);
// Only use the low word for the following bounds check.
index = gasm_->TruncateInt64ToInt32(index);
@@ -3918,7 +3832,7 @@ Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
info = gasm_->StackSlot(size, size);
gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
- gasm_->Int32Constant(0), vals[0]);
+ Int32Constant(0), vals[0]);
}
Node* call = BuildCallToRuntime(Runtime::kWasmTraceExit, &info, 1);
@@ -3937,17 +3851,15 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
Node* effective_offset = gasm_->IntAdd(gasm_->UintPtrConstant(offset), index);
auto store = [&](int field_offset, MachineRepresentation rep, Node* data) {
gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
- gasm_->Int32Constant(field_offset), data);
+ Int32Constant(field_offset), data);
};
// Store effective_offset, is_store, and mem_rep.
store(offsetof(wasm::MemoryTracingInfo, offset),
MachineType::PointerRepresentation(), effective_offset);
store(offsetof(wasm::MemoryTracingInfo, is_store),
- MachineRepresentation::kWord8,
- mcgraph()->Int32Constant(is_store ? 1 : 0));
+ MachineRepresentation::kWord8, Int32Constant(is_store ? 1 : 0));
store(offsetof(wasm::MemoryTracingInfo, mem_rep),
- MachineRepresentation::kWord8,
- mcgraph()->Int32Constant(static_cast<int>(rep)));
+ MachineRepresentation::kWord8, Int32Constant(static_cast<int>(rep)));
Node* args[] = {info};
Node* call =
@@ -4391,17 +4303,15 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
index = Uint32ToUintptr(index);
- Diamond bounds_check(
- graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->UintLessThan(), index, mem_size),
- BranchHint::kTrue);
+ Diamond bounds_check(graph(), mcgraph()->common(),
+ gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index = graph()->NewNode(mcgraph()->machine()->WordAnd(), index, mem_mask);
+ index = gasm_->WordAnd(index, mem_mask);
}
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
@@ -4419,7 +4329,7 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
uintptr_t value = matcher.ResolvedValue();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
}
- return graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), node);
+ return gasm_->ChangeUint32ToUint64(node);
}
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
@@ -4434,18 +4344,16 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
// Note that we check against the memory size ignoring the size of the
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
- Diamond bounds_check(
- graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Uint32LessThan(), index, mem_size),
- BranchHint::kTrue);
+ Diamond bounds_check(graph(), mcgraph()->common(),
+ gasm_->Uint32LessThan(index, mem_size),
+ BranchHint::kTrue);
bounds_check.Chain(control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index =
- graph()->NewNode(mcgraph()->machine()->Word32And(), index, mem_mask);
+ index = gasm_->Word32And(index, mem_mask);
}
index = Uint32ToUintptr(index);
@@ -4552,13 +4460,13 @@ CallDescriptor* WasmGraphBuilder::GetI32AtomicWaitCallDescriptor() {
if (i32_atomic_wait_descriptor_) return i32_atomic_wait_descriptor_;
i32_atomic_wait_descriptor_ =
- GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
+ GetBuiltinCallDescriptor(Builtins::kWasmI32AtomicWait64, zone_,
+ StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i32_atomic_wait_descriptor_,
- GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub));
+ GetBuiltinCallDescriptor(Builtins::kWasmI32AtomicWait32, zone_,
+ StubCallMode::kCallWasmRuntimeStub));
return i32_atomic_wait_descriptor_;
}
@@ -4567,13 +4475,13 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
if (i64_atomic_wait_descriptor_) return i64_atomic_wait_descriptor_;
i64_atomic_wait_descriptor_ =
- GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
+ GetBuiltinCallDescriptor(Builtins::kWasmI64AtomicWait64, zone_,
+ StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i64_atomic_wait_descriptor_,
- GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
- this, StubCallMode::kCallWasmRuntimeStub));
+ GetBuiltinCallDescriptor(Builtins::kWasmI64AtomicWait32, zone_,
+ StubCallMode::kCallWasmRuntimeStub));
return i64_atomic_wait_descriptor_;
}
@@ -4690,6 +4598,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return BuildF64x2NearestInt(inputs[0]);
return graph()->NewNode(mcgraph()->machine()->F64x2NearestInt(),
inputs[0]);
+ case wasm::kExprF64x2ConvertLowI32x4S:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ConvertLowI32x4S(),
+ inputs[0]);
+ case wasm::kExprF64x2ConvertLowI32x4U:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ConvertLowI32x4U(),
+ inputs[0]);
+ case wasm::kExprF64x2PromoteLowF32x4:
+ return graph()->NewNode(mcgraph()->machine()->F64x2PromoteLowF32x4(),
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4784,8 +4701,13 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return BuildF32x4NearestInt(inputs[0]);
return graph()->NewNode(mcgraph()->machine()->F32x4NearestInt(),
inputs[0]);
+ case wasm::kExprF32x4DemoteF64x2Zero:
+ return graph()->NewNode(mcgraph()->machine()->F32x4DemoteF64x2Zero(),
+ inputs[0]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
+ case wasm::kExprI64x2Abs:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Abs(), inputs[0]);
case wasm::kExprI64x2Neg:
return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
case wasm::kExprI64x2SConvertI32x4Low:
@@ -4820,6 +4742,21 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI64x2Eq:
return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
inputs[1]);
+ case wasm::kExprI64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI64x2ShrU:
return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(), inputs[0],
inputs[1]);
@@ -4951,6 +4888,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4ExtAddPairwiseI16x8U:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8U(),
inputs[0]);
+ case wasm::kExprI32x4TruncSatF64x2SZero:
+ return graph()->NewNode(mcgraph()->machine()->I32x4TruncSatF64x2SZero(),
+ inputs[0]);
+ case wasm::kExprI32x4TruncSatF64x2UZero:
+ return graph()->NewNode(mcgraph()->machine()->I32x4TruncSatF64x2UZero(),
+ inputs[0]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -5190,16 +5133,14 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprV32x4AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V32x4AnyTrue(), inputs[0]);
+ case wasm::kExprV64x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
case wasm::kExprV32x4AllTrue:
return graph()->NewNode(mcgraph()->machine()->V32x4AllTrue(), inputs[0]);
- case wasm::kExprV16x8AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V16x8AnyTrue(), inputs[0]);
case wasm::kExprV16x8AllTrue:
return graph()->NewNode(mcgraph()->machine()->V16x8AllTrue(), inputs[0]);
- case wasm::kExprV8x16AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V8x16AnyTrue(), inputs[0]);
+ case wasm::kExprV128AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V128AnyTrue(), inputs[0]);
case wasm::kExprV8x16AllTrue:
return graph()->NewNode(mcgraph()->machine()->V8x16AllTrue(), inputs[0]);
case wasm::kExprI8x16Swizzle:
@@ -5426,15 +5367,9 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
gasm_->IntAdd(gasm_->UintPtrConstant(capped_offset), index);
switch (opcode) {
- case wasm::kExprAtomicNotify: {
- auto* call_descriptor =
- GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
- Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmAtomicNotify, RelocInfo::WASM_STUB_CALL);
- return gasm_->Call(call_descriptor, call_target, effective_offset,
- inputs[1]);
- }
+ case wasm::kExprAtomicNotify:
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmAtomicNotify,
+ effective_offset, inputs[1]);
case wasm::kExprI32AtomicWait: {
auto* call_descriptor = GetI32AtomicWaitCallDescriptor();
@@ -5479,8 +5414,8 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
// validation.
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_init()));
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_memory_init());
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), instance_node_.get()},
@@ -5503,12 +5438,11 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* seg_size_array =
LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier));
- return SetEffect(
- graph()->NewNode(store_op, seg_size_array,
- mcgraph()->IntPtrConstant(data_segment_index << 2),
- mcgraph()->Int32Constant(0), effect(), control()));
+ auto store_rep =
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier);
+ return gasm_->Store(store_rep, seg_size_array,
+ mcgraph()->IntPtrConstant(data_segment_index << 2),
+ Int32Constant(0));
}
Node* WasmGraphBuilder::StoreArgsInStackSlot(
@@ -5526,7 +5460,7 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
MachineRepresentation type = arg.first;
Node* value = arg.second;
gasm_->Store(StoreRepresentation(type, kNoWriteBarrier), stack_slot,
- mcgraph()->Int32Constant(offset), value);
+ Int32Constant(offset), value);
offset += ElementSizeInBytes(type);
}
return stack_slot;
@@ -5534,8 +5468,8 @@ Node* WasmGraphBuilder::StoreArgsInStackSlot(
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_copy()));
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_memory_copy());
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), instance_node_.get()},
@@ -5551,8 +5485,8 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
- Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
- ExternalReference::wasm_memory_fill()));
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_memory_fill());
Node* stack_slot = StoreArgsInStackSlot(
{{MachineType::PointerRepresentation(), instance_node_.get()},
@@ -5570,18 +5504,9 @@ Node* WasmGraphBuilder::TableInit(uint32_t table_index,
uint32_t elem_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableInitDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
-
- intptr_t target = wasm::WasmCode::kWasmTableInit;
- Node* call_target =
- mcgraph()->RelocatableIntPtrConstant(target, RelocInfo::WASM_STUB_CALL);
-
- return gasm_->Call(
- call_descriptor, call_target, dst, src, size,
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- graph()->NewNode(
- mcgraph()->common()->NumberConstant(elem_segment_index)));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableInit, dst, src, size,
+ gasm_->NumberConstant(table_index),
+ gasm_->NumberConstant(elem_segment_index));
}
Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
@@ -5592,35 +5517,25 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
Node* dropped_elem_segments =
LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer());
- const Operator* store_op = mcgraph()->machine()->Store(
- StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
- return SetEffect(
- graph()->NewNode(store_op, dropped_elem_segments,
- mcgraph()->IntPtrConstant(elem_segment_index),
- mcgraph()->Int32Constant(1), effect(), control()));
+ auto store_rep =
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier);
+ return gasm_->Store(store_rep, dropped_elem_segments, elem_segment_index,
+ Int32Constant(1));
}
Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
uint32_t table_src_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
- auto call_descriptor = GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(
- this, StubCallMode::kCallWasmRuntimeStub);
-
- intptr_t target = wasm::WasmCode::kWasmTableCopy;
- Node* call_target =
- mcgraph()->RelocatableIntPtrConstant(target, RelocInfo::WASM_STUB_CALL);
-
- return gasm_->Call(
- call_descriptor, call_target, dst, src, size,
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)));
+ return gasm_->CallRuntimeStub(wasm::WasmCode::kWasmTableCopy, dst, src, size,
+ gasm_->NumberConstant(table_dst_index),
+ gasm_->NumberConstant(table_src_index));
}
Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
Node* delta) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), value,
+ gasm_->NumberConstant(table_index), value,
BuildConvertUint32ToSmiWithSaturation(delta, FLAG_wasm_max_table_size)};
Node* result =
BuildCallToRuntime(Runtime::kWasmTableGrow, args, arraysize(args));
@@ -5643,7 +5558,7 @@ Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* value, Node* count) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
+ gasm_->NumberConstant(table_index),
BuildConvertUint32ToSmiWithSaturation(start, FLAG_wasm_max_table_size),
value,
BuildConvertUint32ToSmiWithSaturation(count, FLAG_wasm_max_table_size)};
@@ -5654,7 +5569,7 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
const wasm::StructType* type,
Node* rtt, Vector<Node*> fields) {
- Node* s = CALL_BUILTIN(WasmAllocateStructWithRtt, rtt);
+ Node* s = gasm_->CallBuiltin(Builtins::kWasmAllocateStructWithRtt, rtt);
for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]);
}
@@ -5671,14 +5586,14 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
position);
wasm::ValueType element_type = type->element_type();
- Node* a = CALL_BUILTIN(WasmAllocateArrayWithRtt, rtt, length,
- graph()->NewNode(mcgraph()->common()->Int32Constant(
- element_type.element_size_bytes())));
+ Node* a =
+ gasm_->CallBuiltin(Builtins::kWasmAllocateArrayWithRtt, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
- Node* start_offset = gasm_->Int32Constant(
- wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
- Node* element_size = gasm_->Int32Constant(element_type.element_size_bytes());
+ Node* start_offset =
+ Int32Constant(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize));
+ Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
// Loops need the graph's end to have been set up.
@@ -5698,42 +5613,15 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
return a;
}
-Node* WasmGraphBuilder::RttCanon(wasm::HeapType type) {
- RootIndex index;
- switch (type.representation()) {
- case wasm::HeapType::kEq:
- index = RootIndex::kWasmRttEqrefMap;
- break;
- case wasm::HeapType::kExtern:
- index = RootIndex::kWasmRttExternrefMap;
- break;
- case wasm::HeapType::kFunc:
- index = RootIndex::kWasmRttFuncrefMap;
- break;
- case wasm::HeapType::kI31:
- index = RootIndex::kWasmRttI31refMap;
- break;
- case wasm::HeapType::kAny:
- index = RootIndex::kWasmRttAnyrefMap;
- break;
- case wasm::HeapType::kBottom:
- UNREACHABLE();
- default: {
- // User-defined type.
- Node* maps_list =
- LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
- return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type.ref_index());
- }
- }
- return LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
- IsolateData::root_slot_offset(index));
+Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
+ Node* maps_list =
+ LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
+ return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type_index);
}
-Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) {
- return CALL_BUILTIN(WasmAllocateRtt,
- graph()->NewNode(mcgraph()->common()->Int32Constant(
- type.representation())),
- parent_rtt);
+Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt) {
+ return gasm_->CallBuiltin(Builtins::kWasmAllocateRtt,
+ Int32Constant(type_index), parent_rtt);
}
void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
@@ -5748,169 +5636,145 @@ void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
#endif
}
-Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
- ObjectReferenceKnowledge config) {
- auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
- if (config.object_can_be_i31) {
- if (config.rtt_is_i31) {
- return gasm_->IsI31(object);
- }
- gasm_->GotoIf(gasm_->IsI31(object), &done, gasm_->Int32Constant(0));
- } else {
- AssertFalse(mcgraph(), gasm_.get(), gasm_->IsI31(object));
- }
+WasmGraphBuilder::Callbacks WasmGraphBuilder::TestCallbacks(
+ GraphAssemblerLabel<1>* label) {
+ return {// succeed_if
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIf(condition, label, hint, Int32Constant(1));
+ },
+ // fail_if
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIf(condition, label, hint, Int32Constant(0));
+ },
+ // fail_if_not
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIfNot(condition, label, hint, Int32Constant(0));
+ }};
+}
+
+WasmGraphBuilder::Callbacks WasmGraphBuilder::CastCallbacks(
+ GraphAssemblerLabel<0>* label, wasm::WasmCodePosition position) {
+ return {// succeed_if
+ [=](Node* condition, BranchHint hint) -> void {
+ gasm_->GotoIf(condition, label, hint);
+ },
+ // fail_if
+ [=](Node* condition, BranchHint hint) -> void {
+ TrapIfTrue(wasm::kTrapIllegalCast, condition, position);
+ },
+ // fail_if_not
+ [=](Node* condition, BranchHint hint) -> void {
+ TrapIfFalse(wasm::kTrapIllegalCast, condition, position);
+ }};
+}
+
+WasmGraphBuilder::Callbacks WasmGraphBuilder::BranchCallbacks(
+ SmallNodeVector& no_match_controls, SmallNodeVector& no_match_effects,
+ SmallNodeVector& match_controls, SmallNodeVector& match_effects) {
+ return {
+ // succeed_if
+ [&](Node* condition, BranchHint hint) -> void {
+ Node* branch = graph()->NewNode(mcgraph()->common()->Branch(hint),
+ condition, control());
+ match_controls.emplace_back(
+ graph()->NewNode(mcgraph()->common()->IfTrue(), branch));
+ match_effects.emplace_back(effect());
+ SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), branch));
+ },
+ // fail_if
+ [&](Node* condition, BranchHint hint) -> void {
+ Node* branch = graph()->NewNode(mcgraph()->common()->Branch(hint),
+ condition, control());
+ no_match_controls.emplace_back(
+ graph()->NewNode(mcgraph()->common()->IfTrue(), branch));
+ no_match_effects.emplace_back(effect());
+ SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), branch));
+ },
+ // fail_if_not
+ [&](Node* condition, BranchHint hint) -> void {
+ Node* branch = graph()->NewNode(mcgraph()->common()->Branch(hint),
+ condition, control());
+ no_match_controls.emplace_back(
+ graph()->NewNode(mcgraph()->common()->IfFalse(), branch));
+ no_match_effects.emplace_back(effect());
+ SetControl(graph()->NewNode(mcgraph()->common()->IfTrue(), branch));
+ }};
+}
+
+void WasmGraphBuilder::TypeCheck(
+ Node* object, Node* rtt, WasmGraphBuilder::ObjectReferenceKnowledge config,
+ bool null_succeeds, Callbacks callbacks) {
if (config.object_can_be_null) {
- gasm_->GotoIf(gasm_->WordEqual(object, RefNull()), &done,
- gasm_->Int32Constant(0));
+ (null_succeeds ? callbacks.succeed_if : callbacks.fail_if)(
+ gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
}
Node* map = gasm_->LoadMap(object);
- gasm_->GotoIf(gasm_->TaggedEqual(map, rtt), &done, gasm_->Int32Constant(1));
- if (!config.object_must_be_data_ref) {
- gasm_->GotoIfNot(gasm_->IsDataRefMap(map), &done, gasm_->Int32Constant(0));
+ if (config.reference_kind == kFunction) {
+ // Currently, the only way for a function to match an rtt is if its map
+ // is equal to that rtt.
+ callbacks.fail_if_not(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
+ return;
}
+
+ DCHECK(config.reference_kind == kArrayOrStruct);
+
+ callbacks.succeed_if(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
+
Node* type_info = gasm_->LoadWasmTypeInfo(map);
Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* length =
+ Node* supertypes_length =
BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
- gasm_->GotoIfNot(
- gasm_->Uint32LessThan(gasm_->Int32Constant(config.rtt_depth), length),
- &done, gasm_->Int32Constant(0));
+ Node* rtt_depth =
+ config.rtt_depth >= 0
+ ? Int32Constant(config.rtt_depth)
+ : BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(
+ gasm_->LoadSupertypes(gasm_->LoadWasmTypeInfo(rtt))));
+ callbacks.fail_if_not(gasm_->Uint32LessThan(rtt_depth, supertypes_length),
+ BranchHint::kTrue);
Node* maybe_match = gasm_->LoadFixedArrayElement(
- supertypes, config.rtt_depth, MachineType::TaggedPointer());
- gasm_->Goto(&done, gasm_->TaggedEqual(maybe_match, rtt));
- gasm_->Bind(&done);
+ supertypes, rtt_depth, MachineType::TaggedPointer());
- return done.PhiAt(0);
+ callbacks.fail_if_not(gasm_->TaggedEqual(maybe_match, rtt),
+ BranchHint::kTrue);
}
-Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
- ObjectReferenceKnowledge config,
- wasm::WasmCodePosition position) {
- if (config.object_can_be_i31) {
- if (config.rtt_is_i31) {
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
- return object;
- } else {
- TrapIfTrue(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
- }
- } else {
- AssertFalse(mcgraph(), gasm_.get(), gasm_->IsI31(object));
- }
- if (config.object_can_be_null) {
- TrapIfTrue(wasm::kTrapIllegalCast, gasm_->WordEqual(object, RefNull()),
- position);
+void WasmGraphBuilder::DataCheck(Node* object, bool object_can_be_null,
+ Callbacks callbacks) {
+ if (object_can_be_null) {
+ callbacks.fail_if(gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
}
+ callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
Node* map = gasm_->LoadMap(object);
- auto done = gasm_->MakeLabel();
- gasm_->GotoIf(gasm_->TaggedEqual(map, rtt), &done);
- if (!config.object_must_be_data_ref) {
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsDataRefMap(map), position);
- }
- Node* type_info = gasm_->LoadWasmTypeInfo(map);
- Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* length =
- BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
- TrapIfFalse(
- wasm::kTrapIllegalCast,
- gasm_->Uint32LessThan(gasm_->Int32Constant(config.rtt_depth), length),
- position);
- Node* maybe_match = gasm_->LoadFixedArrayElement(
- supertypes, config.rtt_depth, MachineType::TaggedPointer());
- TrapIfFalse(wasm::kTrapIllegalCast, gasm_->TaggedEqual(maybe_match, rtt),
- position);
- gasm_->Goto(&done);
- gasm_->Bind(&done);
- return object;
+ callbacks.fail_if_not(gasm_->IsDataRefMap(map), BranchHint::kTrue);
}
-Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
- ObjectReferenceKnowledge config,
- Node** match_control, Node** match_effect,
- Node** no_match_control,
- Node** no_match_effect) {
- // We have up to 5 control nodes to merge; the EffectPhi needs an additional
- // input.
- base::SmallVector<Node*, 5> no_match_controls;
- base::SmallVector<Node*, 6> no_match_effects;
- // We always have 2 match_controls; use the same mechanism for uniformity.
- base::SmallVector<Node*, 2> match_controls;
- base::SmallVector<Node*, 3> match_effects;
-
- Node* is_i31 = gasm_->IsI31(object);
- if (config.object_can_be_i31) {
- if (config.rtt_is_i31) {
- BranchExpectFalse(is_i31, match_control, no_match_control);
- return nullptr;
- } else {
- Node* i31_branch = graph()->NewNode(
- mcgraph()->common()->Branch(BranchHint::kFalse), is_i31, control());
- SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), i31_branch));
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), i31_branch));
- no_match_effects.emplace_back(effect());
- }
- } else {
- AssertFalse(mcgraph(), gasm_.get(), is_i31);
+void WasmGraphBuilder::FuncCheck(Node* object, bool object_can_be_null,
+ Callbacks callbacks) {
+ if (object_can_be_null) {
+ callbacks.fail_if(gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
}
+ callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
+ callbacks.fail_if_not(gasm_->HasInstanceType(object, JS_FUNCTION_TYPE),
+ BranchHint::kTrue);
+}
- if (config.object_can_be_null) {
- Node* null_branch =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kFalse),
- gasm_->WordEqual(object, RefNull()), control());
- SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), null_branch));
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), null_branch));
- no_match_effects.emplace_back(effect());
- }
+Node* WasmGraphBuilder::BrOnCastAbs(
+ Node** match_control, Node** match_effect, Node** no_match_control,
+ Node** no_match_effect, std::function<void(Callbacks)> type_checker) {
+ SmallNodeVector no_match_controls, no_match_effects, match_controls,
+ match_effects;
- // At this point, {object} is neither null nor an i31ref/Smi.
- Node* map = gasm_->LoadMap(object);
- Node* exact_match =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->TaggedEqual(map, rtt), control());
- match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), exact_match));
- match_effects.emplace_back(effect());
- SetControl(graph()->NewNode(mcgraph()->common()->IfFalse(), exact_match));
- if (!config.object_must_be_data_ref) {
- Node* is_data_ref =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->IsDataRefMap(map), control());
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfFalse(), is_data_ref));
- no_match_effects.emplace_back(effect());
- SetControl(graph()->NewNode(mcgraph()->common()->IfTrue(), is_data_ref));
- }
- Node* type_info = gasm_->LoadWasmTypeInfo(map);
- Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* length =
- BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
- Node* length_sufficient = graph()->NewNode(
- mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->Uint32LessThan(gasm_->Int32Constant(config.rtt_depth), length),
- control());
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfFalse(), length_sufficient));
- no_match_effects.emplace_back(effect());
- SetControl(
- graph()->NewNode(mcgraph()->common()->IfTrue(), length_sufficient));
- Node* maybe_match = gasm_->LoadFixedArrayElement(
- supertypes, config.rtt_depth, MachineType::TaggedPointer());
- Node* supertype_match =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- gasm_->TaggedEqual(maybe_match, rtt), control());
- match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfTrue(), supertype_match));
+ type_checker(BranchCallbacks(no_match_controls, no_match_effects,
+ match_controls, match_effects));
+
+ match_controls.emplace_back(control());
match_effects.emplace_back(effect());
- no_match_controls.emplace_back(
- graph()->NewNode(mcgraph()->common()->IfFalse(), supertype_match));
- no_match_effects.emplace_back(effect());
// Wire up the control/effect nodes.
unsigned count = static_cast<unsigned>(match_controls.size());
- DCHECK_EQ(2, count);
+ DCHECK_EQ(match_controls.size(), match_effects.size());
*match_control = Merge(count, match_controls.data());
// EffectPhis need their control dependency as an additional input.
match_effects.emplace_back(*match_control);
@@ -5928,6 +5792,118 @@ Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
return nullptr;
}
+Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ TypeCheck(object, rtt, config, false, TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ TypeCheck(object, rtt, config, true, CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ return BrOnCastAbs(match_control, match_effect, no_match_control,
+ no_match_effect, [=](Callbacks callbacks) -> void {
+ return TypeCheck(object, rtt, config, false, callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsData(Node* object, bool object_can_be_null) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ DataCheck(object, object_can_be_null, TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefAsData(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ DataCheck(object, object_can_be_null, CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnData(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ return BrOnCastAbs(match_control, match_effect, no_match_control,
+ no_match_effect, [=](Callbacks callbacks) -> void {
+ return DataCheck(object, config.object_can_be_null,
+ callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsFunc(Node* object, bool object_can_be_null) {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kWord32);
+ FuncCheck(object, object_can_be_null, TestCallbacks(&done));
+ gasm_->Goto(&done, Int32Constant(1));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* WasmGraphBuilder::RefAsFunc(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position) {
+ auto done = gasm_->MakeLabel();
+ FuncCheck(object, object_can_be_null, CastCallbacks(&done, position));
+ gasm_->Goto(&done);
+ gasm_->Bind(&done);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnFunc(Node* object, Node* /*rtt*/,
+ ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ return BrOnCastAbs(match_control, match_effect, no_match_control,
+ no_match_effect, [=](Callbacks callbacks) -> void {
+ return FuncCheck(object, config.object_can_be_null,
+ callbacks);
+ });
+}
+
+Node* WasmGraphBuilder::RefIsI31(Node* object) { return gasm_->IsI31(object); }
+
+Node* WasmGraphBuilder::RefAsI31(Node* object,
+ wasm::WasmCodePosition position) {
+ TrapIfFalse(wasm::kTrapIllegalCast, gasm_->IsI31(object), position);
+ return object;
+}
+
+Node* WasmGraphBuilder::BrOnI31(Node* object, Node* /* rtt */,
+ ObjectReferenceKnowledge /* config */,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control,
+ Node** no_match_effect) {
+ gasm_->Branch(gasm_->IsI31(object), match_control, no_match_control,
+ BranchHint::kTrue);
+
+ SetControl(*no_match_control);
+ *match_effect = effect();
+ *no_match_effect = effect();
+
+ // Unused return value, needed for typing of BUILD in graph-builder-interface.
+ return nullptr;
+}
+
Node* WasmGraphBuilder::StructGet(Node* struct_object,
const wasm::StructType* struct_type,
uint32_t field_index, CheckForNull null_check,
@@ -6080,23 +6056,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (i64_to_bigint_descriptor_) return i64_to_bigint_descriptor_;
i64_to_bigint_descriptor_ =
- GetBuiltinCallDescriptor<I64ToBigIntDescriptor>(this, stub_mode_);
+ GetBuiltinCallDescriptor(Builtins::kI64ToBigInt, zone_, stub_mode_);
AddInt64LoweringReplacement(
i64_to_bigint_descriptor_,
- GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this, stub_mode_));
+ GetBuiltinCallDescriptor(Builtins::kI32PairToBigInt, zone_,
+ stub_mode_));
return i64_to_bigint_descriptor_;
}
- CallDescriptor* GetBigIntToI64CallDescriptor() {
+ CallDescriptor* GetBigIntToI64CallDescriptor(bool needs_frame_state) {
if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
- bigint_to_i64_descriptor_ =
- GetBuiltinCallDescriptor<BigIntToI64Descriptor>(this, stub_mode_);
+ bigint_to_i64_descriptor_ = GetBuiltinCallDescriptor(
+ Builtins::kBigIntToI64, zone_, stub_mode_, needs_frame_state);
AddInt64LoweringReplacement(
bigint_to_i64_descriptor_,
- GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this, stub_mode_));
+ GetBuiltinCallDescriptor(Builtins::kBigIntToI32Pair, zone_,
+ stub_mode_));
return bigint_to_i64_descriptor_;
}
@@ -6105,7 +6083,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(wasm_stub,
RelocInfo::WASM_STUB_CALL)
- : GetBuiltinPointerTarget(builtin_id);
+ : GetBuiltinPointerTarget(mcgraph(), builtin_id);
}
Node* BuildLoadUndefinedValueFromInstance() {
@@ -6114,7 +6092,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
undefined_value_node_ = gasm_->Load(
MachineType::Pointer(), isolate_root,
- mcgraph()->Int32Constant(
+ Int32Constant(
IsolateData::root_slot_offset(RootIndex::kUndefinedValue)));
}
return undefined_value_node_.get();
@@ -6159,7 +6137,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return done.PhiAt(0);
}
- Node* BuildChangeTaggedToInt32(Node* value, Node* context) {
+ Node* BuildChangeTaggedToInt32(Node* value, Node* context,
+ Node* frame_state) {
// We expect most integers at runtime to be Smis, so it is important for
// wrapper performance that Smi conversion be inlined.
auto builtin = gasm_->MakeDeferredLabel();
@@ -6180,11 +6159,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (!tagged_non_smi_to_int32_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), WasmTaggedNonSmiToInt32Descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags,
+ Operator::kNoProperties, stub_mode_);
tagged_non_smi_to_int32_operator_.set(common->Call(call_descriptor));
}
- Node* call = gasm_->Call(tagged_non_smi_to_int32_operator_.get(), target,
- value, context);
+ Node* call = frame_state
+ ? gasm_->Call(tagged_non_smi_to_int32_operator_.get(),
+ target, value, context, frame_state)
+ : gasm_->Call(tagged_non_smi_to_int32_operator_.get(),
+ target, value, context);
SetSourcePosition(call, 1);
gasm_->Goto(&done, call);
gasm_->Bind(&done);
@@ -6217,18 +6201,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return gasm_->Call(float64_to_number_operator_.get(), target, value);
}
- Node* BuildChangeTaggedToFloat64(Node* value, Node* context) {
+ Node* BuildChangeTaggedToFloat64(Node* value, Node* context,
+ Node* frame_state) {
CommonOperatorBuilder* common = mcgraph()->common();
Node* target = GetTargetForBuiltinCall(wasm::WasmCode::kWasmTaggedToFloat64,
Builtins::kWasmTaggedToFloat64);
+ bool needs_frame_state = frame_state != nullptr;
if (!tagged_to_float64_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), WasmTaggedToFloat64Descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties, stub_mode_);
+ frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags,
+ Operator::kNoProperties, stub_mode_);
tagged_to_float64_operator_.set(common->Call(call_descriptor));
}
- Node* call =
- gasm_->Call(tagged_to_float64_operator_.get(), target, value, context);
+ Node* call = needs_frame_state
+ ? gasm_->Call(tagged_to_float64_operator_.get(), target,
+ value, context, frame_state)
+ : gasm_->Call(tagged_to_float64_operator_.get(), target,
+ value, context);
SetSourcePosition(call, 1);
return call;
}
@@ -6246,26 +6237,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* ToJS(Node* node, wasm::ValueType type) {
switch (type.kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
return BuildChangeInt32ToNumber(node);
- case wasm::ValueType::kS128:
+ case wasm::kS128:
UNREACHABLE();
- case wasm::ValueType::kI64: {
+ case wasm::kI64: {
return BuildChangeInt64ToBigInt(node);
}
- case wasm::ValueType::kF32:
+ case wasm::kF32:
return BuildChangeFloat32ToNumber(node);
- case wasm::ValueType::kF64:
+ case wasm::kF64:
return BuildChangeFloat64ToNumber(node);
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef: {
+ case wasm::kRef:
+ case wasm::kOptRef: {
uint32_t representation = type.heap_representation();
if (representation == wasm::HeapType::kExtern ||
- representation == wasm::HeapType::kExn ||
representation == wasm::HeapType::kFunc) {
return node;
}
- if (representation == wasm::HeapType::kEq) {
+ if (representation == wasm::HeapType::kData) {
// TODO(7748): Update this when JS interop is settled.
return BuildAllocateObjectWrapper(node);
}
@@ -6288,13 +6278,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// TODO(7748): Figure out a JS interop story for arrays and structs.
UNREACHABLE();
}
- case wasm::ValueType::kRtt:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
// TODO(7748): Figure out what to do for RTTs.
UNIMPLEMENTED();
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kStmt:
- case wasm::ValueType::kBottom:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kStmt:
+ case wasm::kBottom:
UNREACHABLE();
}
}
@@ -6303,16 +6294,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// through JavaScript, where they show up as opaque boxes. This will disappear
// once we have a proper WasmGC <-> JS interaction story.
Node* BuildAllocateObjectWrapper(Node* input) {
- return CALL_BUILTIN(
- WasmAllocateObjectWrapper, input,
+ return gasm_->CallBuiltin(
+ Builtins::kWasmAllocateObjectWrapper, input,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
}
enum UnpackFailureBehavior : bool { kReturnInput, kReturnNull };
Node* BuildUnpackObjectWrapper(Node* input, UnpackFailureBehavior failure) {
- Node* obj = CALL_BUILTIN(
- WasmGetOwnProperty, input,
+ Node* obj = gasm_->CallBuiltin(
+ Builtins::kWasmGetOwnProperty, input,
LOAD_FULL_POINTER(BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(
RootIndex::kwasm_wrapped_object_symbol)),
@@ -6330,9 +6321,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildChangeInt64ToBigInt(Node* input) {
- const Operator* call =
- mcgraph()->common()->Call(GetI64ToBigIntCallDescriptor());
-
Node* target;
if (mcgraph()->machine()->Is64()) {
target = GetTargetForBuiltinCall(wasm::WasmCode::kI64ToBigInt,
@@ -6345,15 +6333,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
target = GetTargetForBuiltinCall(wasm::WasmCode::kI32PairToBigInt,
Builtins::kI32PairToBigInt);
}
-
- return SetEffectControl(
- graph()->NewNode(call, target, input, effect(), control()));
+ return gasm_->Call(GetI64ToBigIntCallDescriptor(), target, input);
}
- Node* BuildChangeBigIntToInt64(Node* input, Node* context) {
- const Operator* call =
- mcgraph()->common()->Call(GetBigIntToI64CallDescriptor());
-
+ Node* BuildChangeBigIntToInt64(Node* input, Node* context,
+ Node* frame_state) {
Node* target;
if (mcgraph()->machine()->Is64()) {
target = GetTargetForBuiltinCall(wasm::WasmCode::kBigIntToI64,
@@ -6367,8 +6351,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Builtins::kBigIntToI32Pair);
}
- return SetEffectControl(
- graph()->NewNode(call, target, input, context, effect(), control()));
+ return frame_state ? gasm_->Call(GetBigIntToI64CallDescriptor(true), target,
+ input, context, frame_state)
+ : gasm_->Call(GetBigIntToI64CallDescriptor(false),
+ target, input, context);
}
void BuildCheckValidRefValue(Node* input, Node* js_context,
@@ -6394,13 +6380,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
type_check.merge);
}
- Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
+ Node* FromJS(Node* input, Node* js_context, wasm::ValueType type,
+ Node* frame_state = nullptr) {
switch (type.kind()) {
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef: {
+ case wasm::kRef:
+ case wasm::kOptRef: {
switch (type.heap_representation()) {
case wasm::HeapType::kExtern:
- case wasm::HeapType::kExn:
return input;
case wasm::HeapType::kAny:
// If this is a wrapper for arrays/structs, unpack it.
@@ -6409,10 +6395,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::HeapType::kFunc:
BuildCheckValidRefValue(input, js_context, type);
return input;
- case wasm::HeapType::kEq:
+ case wasm::HeapType::kData:
// TODO(7748): Update this when JS interop has settled.
BuildCheckValidRefValue(input, js_context, type);
return BuildUnpackObjectWrapper(input, kReturnNull);
+ case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
// If this is reached, then IsJSCompatibleSignature() is too
// permissive.
@@ -6427,40 +6414,38 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
UNREACHABLE();
}
}
- case wasm::ValueType::kF32:
- return graph()->NewNode(
- mcgraph()->machine()->TruncateFloat64ToFloat32(),
- BuildChangeTaggedToFloat64(input, js_context));
+ case wasm::kF32:
+ return gasm_->TruncateFloat64ToFloat32(
+ BuildChangeTaggedToFloat64(input, js_context, frame_state));
- case wasm::ValueType::kF64:
- return BuildChangeTaggedToFloat64(input, js_context);
+ case wasm::kF64:
+ return BuildChangeTaggedToFloat64(input, js_context, frame_state);
- case wasm::ValueType::kI32:
- return BuildChangeTaggedToInt32(input, js_context);
+ case wasm::kI32:
+ return BuildChangeTaggedToInt32(input, js_context, frame_state);
- case wasm::ValueType::kI64:
+ case wasm::kI64:
// i64 values can only come from BigInt.
- return BuildChangeBigIntToInt64(input, js_context);
-
- case wasm::ValueType::kRtt: // TODO(7748): Implement.
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ return BuildChangeBigIntToInt64(input, js_context, frame_state);
+
+ case wasm::kRtt: // TODO(7748): Implement.
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
UNREACHABLE();
break;
}
}
Node* SmiToFloat32(Node* input) {
- return graph()->NewNode(mcgraph()->machine()->RoundInt32ToFloat32(),
- BuildChangeSmiToInt32(input));
+ return gasm_->RoundInt32ToFloat32(BuildChangeSmiToInt32(input));
}
Node* SmiToFloat64(Node* input) {
- return graph()->NewNode(mcgraph()->machine()->ChangeInt32ToFloat64(),
- BuildChangeSmiToInt32(input));
+ return gasm_->ChangeInt32ToFloat64(BuildChangeSmiToInt32(input));
}
Node* HeapNumberToFloat64(Node* input) {
@@ -6470,22 +6455,21 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJSFast(Node* input, wasm::ValueType type) {
switch (type.kind()) {
- case wasm::ValueType::kI32:
+ case wasm::kI32:
return BuildChangeSmiToInt32(input);
- case wasm::ValueType::kF32: {
+ case wasm::kF32: {
auto done = gasm_->MakeLabel(MachineRepresentation::kFloat32);
auto heap_number = gasm_->MakeLabel();
gasm_->GotoIfNot(IsSmi(input), &heap_number);
gasm_->Goto(&done, SmiToFloat32(input));
gasm_->Bind(&heap_number);
Node* value =
- graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToFloat32(),
- HeapNumberToFloat64(input));
+ gasm_->TruncateFloat64ToFloat32(HeapNumberToFloat64(input));
gasm_->Goto(&done, value);
gasm_->Bind(&done);
return done.PhiAt(0);
}
- case wasm::ValueType::kF64: {
+ case wasm::kF64: {
auto done = gasm_->MakeLabel(MachineRepresentation::kFloat64);
auto heap_number = gasm_->MakeLabel();
gasm_->GotoIfNot(IsSmi(input), &heap_number);
@@ -6495,66 +6479,96 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Bind(&done);
return done.PhiAt(0);
}
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kI64:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kI64:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
UNREACHABLE();
break;
}
}
- void BuildModifyThreadInWasmFlag(bool new_value) {
- if (!trap_handler::IsTrapHandlerEnabled()) return;
- Node* isolate_root = BuildLoadIsolateRoot();
-
- Node* thread_in_wasm_flag_address =
- gasm_->Load(MachineType::Pointer(), isolate_root,
- Isolate::thread_in_wasm_flag_address_offset());
-
+ void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address,
+ bool new_value) {
if (FLAG_debug_code) {
- Node* flag_value = SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::Pointer()),
- thread_in_wasm_flag_address,
- mcgraph()->Int32Constant(0), effect(), control()));
+ Node* flag_value =
+ gasm_->Load(MachineType::Pointer(), thread_in_wasm_flag_address, 0);
Node* check =
- graph()->NewNode(mcgraph()->machine()->Word32Equal(), flag_value,
- mcgraph()->Int32Constant(new_value ? 0 : 1));
+ gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1));
Diamond flag_check(graph(), mcgraph()->common(), check,
BranchHint::kTrue);
flag_check.Chain(control());
SetControl(flag_check.if_false);
- Node* message_id = graph()->NewNode(
- mcgraph()->common()->NumberConstant(static_cast<int32_t>(
- new_value ? AbortReason::kUnexpectedThreadInWasmSet
- : AbortReason::kUnexpectedThreadInWasmUnset)));
+ Node* message_id = gasm_->NumberConstant(static_cast<int32_t>(
+ new_value ? AbortReason::kUnexpectedThreadInWasmSet
+ : AbortReason::kUnexpectedThreadInWasmUnset));
Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kAbort, NoContextConstant(),
- &message_id, 1);
-
+ Node* call = BuildCallToRuntimeWithContext(
+ Runtime::kAbort, NoContextConstant(), &message_id, 1);
+ flag_check.merge->ReplaceInput(1, call);
SetEffectControl(flag_check.EffectPhi(old_effect, effect()),
flag_check.merge);
}
- SetEffect(graph()->NewNode(
- mcgraph()->machine()->Store(StoreRepresentation(
- MachineRepresentation::kWord32, kNoWriteBarrier)),
- thread_in_wasm_flag_address, mcgraph()->Int32Constant(0),
- mcgraph()->Int32Constant(new_value ? 1 : 0), effect(), control()));
+ gasm_->Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ thread_in_wasm_flag_address, 0, Int32Constant(new_value ? 1 : 0));
}
+ void BuildModifyThreadInWasmFlag(bool new_value) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* isolate_root = BuildLoadIsolateRoot();
+
+ Node* thread_in_wasm_flag_address =
+ gasm_->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
+
+ BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value);
+ }
+
+ class ModifyThreadInWasmFlagScope {
+ public:
+ ModifyThreadInWasmFlagScope(
+ WasmWrapperGraphBuilder* wasm_wrapper_graph_builder,
+ WasmGraphAssembler* gasm)
+ : wasm_wrapper_graph_builder_(wasm_wrapper_graph_builder) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* isolate_root = wasm_wrapper_graph_builder_->BuildLoadIsolateRoot();
+
+ thread_in_wasm_flag_address_ =
+ gasm->Load(MachineType::Pointer(), isolate_root,
+ Isolate::thread_in_wasm_flag_address_offset());
+
+ wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
+ thread_in_wasm_flag_address_, true);
+ }
+
+ ~ModifyThreadInWasmFlagScope() {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+
+ wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
+ thread_in_wasm_flag_address_, false);
+ }
+
+ private:
+ WasmWrapperGraphBuilder* wasm_wrapper_graph_builder_;
+ Node* thread_in_wasm_flag_address_;
+ };
+
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context) {
Node* length = BuildChangeUint31ToSmi(
mcgraph()->Uint32Constant(static_cast<uint32_t>(sig->return_count())));
- return CALL_BUILTIN(IterableToFixedArrayForWasm, iterable, length, context);
+ return gasm_->CallBuiltin(Builtins::kIterableToFixedArrayForWasm, iterable,
+ length, context);
}
// Generate a call to the AllocateJSArray builtin.
@@ -6563,43 +6577,47 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// we make sure this is true based on statically known limits.
STATIC_ASSERT(wasm::kV8MaxWasmFunctionMultiReturns <=
JSArray::kInitialMaxFastElementArray);
- return SetControl(CALL_BUILTIN(WasmAllocateJSArray, array_length, context));
+ return SetControl(gasm_->CallBuiltin(Builtins::kWasmAllocateJSArray,
+ array_length, context));
}
Node* BuildCallAndReturn(bool is_import, Node* js_context,
Node* function_data,
- base::SmallVector<Node*, 16> args) {
- // Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true);
-
+ base::SmallVector<Node*, 16> args,
+ const JSWasmCallData* js_wasm_call_data,
+ Node* frame_state) {
const int rets_count = static_cast<int>(sig_->return_count());
base::SmallVector<Node*, 1> rets(rets_count);
- if (is_import) {
- // Call to an imported function.
- // Load function index from {WasmExportedFunctionData}.
- Node* function_index = BuildChangeSmiToInt32(
- gasm_->LoadExportedFunctionIndexAsSmi(function_data));
- BuildImportCall(sig_, VectorOf(args), VectorOf(rets),
- wasm::kNoCodePosition, function_index, kCallContinues);
- } else {
- // Call to a wasm function defined in this module.
- // The call target is the jump table slot for that function.
- Node* jump_table_start =
- LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
- Node* jump_table_offset =
- BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
- Node* jump_table_slot = graph()->NewNode(
- mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
- args[0] = jump_table_slot;
-
- BuildWasmCall(sig_, VectorOf(args), VectorOf(rets), wasm::kNoCodePosition,
- nullptr, kNoRetpoline);
+ // Set the ThreadInWasm flag before we do the actual call.
+ {
+ ModifyThreadInWasmFlagScope modify_thread_in_wasm_flag_builder(
+ this, gasm_.get());
+
+ if (is_import) {
+ // Call to an imported function.
+ // Load function index from {WasmExportedFunctionData}.
+ Node* function_index = BuildChangeSmiToInt32(
+ gasm_->LoadExportedFunctionIndexAsSmi(function_data));
+ BuildImportCall(sig_, VectorOf(args), VectorOf(rets),
+ wasm::kNoCodePosition, function_index, kCallContinues);
+ } else {
+ // Call to a wasm function defined in this module.
+ // The call target is the jump table slot for that function.
+ Node* jump_table_start =
+ LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
+ Node* jump_table_offset =
+ BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
+ Node* jump_table_slot =
+ gasm_->IntAdd(jump_table_start, jump_table_offset);
+ args[0] = jump_table_slot;
+
+ BuildWasmCall(sig_, VectorOf(args), VectorOf(rets),
+ wasm::kNoCodePosition, nullptr, kNoRetpoline,
+ frame_state);
+ }
}
- // Clear the ThreadInWasm flag.
- BuildModifyThreadInWasmFlag(false);
-
Node* jsval;
if (sig_->return_count() == 0) {
// We do not use {BuildLoadUndefinedValueFromInstance} here because it
@@ -6608,14 +6626,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
jsval = gasm_->Load(
MachineType::Pointer(), isolate_root,
- mcgraph()->Int32Constant(
- IsolateData::root_slot_offset(RootIndex::kUndefinedValue)));
+ IsolateData::root_slot_offset(RootIndex::kUndefinedValue));
} else if (sig_->return_count() == 1) {
- jsval = ToJS(rets[0], sig_->GetReturn());
+ jsval = js_wasm_call_data && !js_wasm_call_data->result_needs_conversion()
+ ? rets[0]
+ : ToJS(rets[0], sig_->GetReturn());
} else {
int32_t return_count = static_cast<int32_t>(sig_->return_count());
- Node* size =
- graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+ Node* size = gasm_->NumberConstant(return_count);
jsval = BuildCallAllocateJSArray(size, js_context);
@@ -6634,19 +6652,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = 0; i < wasm_count; ++i) {
wasm::ValueType type = sig_->GetParam(i);
switch (type.kind()) {
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kI64:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kI64:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
return false;
- case wasm::ValueType::kI32:
- case wasm::ValueType::kF32:
- case wasm::ValueType::kF64:
+ case wasm::kI32:
+ case wasm::kF32:
+ case wasm::kF64:
break;
}
}
@@ -6656,20 +6675,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* IsSmi(Node* input) {
return gasm_->Word32Equal(
gasm_->Word32And(BuildTruncateIntPtrToInt32(input),
- gasm_->Int32Constant(kSmiTagMask)),
- gasm_->Int32Constant(kSmiTag));
+ Int32Constant(kSmiTagMask)),
+ Int32Constant(kSmiTag));
}
void CanTransformFast(
Node* input, wasm::ValueType type,
v8::internal::compiler::GraphAssemblerLabel<0>* slow_path) {
switch (type.kind()) {
- case wasm::ValueType::kI32: {
+ case wasm::kI32: {
gasm_->GotoIfNot(IsSmi(input), slow_path);
return;
}
- case wasm::ValueType::kF32:
- case wasm::ValueType::kF64: {
+ case wasm::kF32:
+ case wasm::kF64: {
auto done = gasm_->MakeLabel();
gasm_->GotoIf(IsSmi(input), &done);
Node* map =
@@ -6684,25 +6703,28 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Bind(&done);
return;
}
- case wasm::ValueType::kRef:
- case wasm::ValueType::kOptRef:
- case wasm::ValueType::kI64:
- case wasm::ValueType::kRtt:
- case wasm::ValueType::kS128:
- case wasm::ValueType::kI8:
- case wasm::ValueType::kI16:
- case wasm::ValueType::kBottom:
- case wasm::ValueType::kStmt:
+ case wasm::kRef:
+ case wasm::kOptRef:
+ case wasm::kI64:
+ case wasm::kRtt:
+ case wasm::kRttWithDepth:
+ case wasm::kS128:
+ case wasm::kI8:
+ case wasm::kI16:
+ case wasm::kBottom:
+ case wasm::kStmt:
UNREACHABLE();
break;
}
}
- void BuildJSToWasmWrapper(bool is_import) {
- const int wasm_count = static_cast<int>(sig_->parameter_count());
+ void BuildJSToWasmWrapper(bool is_import,
+ const JSWasmCallData* js_wasm_call_data = nullptr,
+ Node* frame_state = nullptr) {
+ const int wasm_param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
- SetEffectControl(Start(wasm_count + 5));
+ SetEffectControl(Start(wasm_param_count + 5));
// Create the js_closure and js_context parameters.
Node* js_closure =
@@ -6711,7 +6733,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->start());
Node* js_context = graph()->NewNode(
mcgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
+ Linkage::GetJSCallContextParamIndex(wasm_param_count + 1),
+ "%context"),
graph()->start());
// Create the instance_node node to pass as parameter. It is loaded from
@@ -6731,17 +6754,18 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return;
}
- const int args_count = wasm_count + 1; // +1 for wasm_code.
+ const int args_count = wasm_param_count + 1; // +1 for wasm_code.
// Check whether the signature of the function allows for a fast
// transformation (if any params exist that need transformation).
// Create a fast transformation path, only if it does.
- bool include_fast_path = wasm_count && QualifiesForFastTransform(sig_);
+ bool include_fast_path = !js_wasm_call_data && wasm_param_count > 0 &&
+ QualifiesForFastTransform(sig_);
// Prepare Param() nodes. Param() nodes can only be created once,
// so we need to use the same nodes along all possible transformation paths.
base::SmallVector<Node*, 16> params(args_count);
- for (int i = 0; i < wasm_count; ++i) params[i + 1] = Param(i + 1);
+ for (int i = 0; i < wasm_param_count; ++i) params[i + 1] = Param(i + 1);
auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
if (include_fast_path) {
@@ -6750,30 +6774,46 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// using the fast transformation. When a param that cannot be transformed
// fast is encountered, skip checking the rest and fall back to the slow
// path.
- for (int i = 0; i < wasm_count; ++i) {
+ for (int i = 0; i < wasm_param_count; ++i) {
CanTransformFast(params[i + 1], sig_->GetParam(i), &slow_path);
}
// Convert JS parameters to wasm numbers using the fast transformation
// and build the call.
base::SmallVector<Node*, 16> args(args_count);
- for (int i = 0; i < wasm_count; ++i) {
+ for (int i = 0; i < wasm_param_count; ++i) {
Node* wasm_param = FromJSFast(params[i + 1], sig_->GetParam(i));
args[i + 1] = wasm_param;
}
- Node* jsval =
- BuildCallAndReturn(is_import, js_context, function_data, args);
+ Node* jsval = BuildCallAndReturn(is_import, js_context, function_data,
+ args, js_wasm_call_data, frame_state);
gasm_->Goto(&done, jsval);
gasm_->Bind(&slow_path);
}
// Convert JS parameters to wasm numbers using the default transformation
// and build the call.
base::SmallVector<Node*, 16> args(args_count);
- for (int i = 0; i < wasm_count; ++i) {
- Node* wasm_param = FromJS(params[i + 1], js_context, sig_->GetParam(i));
- args[i + 1] = wasm_param;
+ for (int i = 0; i < wasm_param_count; ++i) {
+ bool do_conversion =
+ !js_wasm_call_data || js_wasm_call_data->arg_needs_conversion(i);
+ if (do_conversion) {
+ args[i + 1] =
+ FromJS(params[i + 1], js_context, sig_->GetParam(i), frame_state);
+ } else {
+ Node* wasm_param = params[i + 1];
+
+ // For Float32 parameters
+ // we set UseInfo::CheckedNumberOrOddballAsFloat64 in
+ // simplified-lowering and we need to add here a conversion from Float64
+ // to Float32.
+ if (sig_->GetParam(i).kind() == wasm::kF32) {
+ wasm_param = gasm_->TruncateFloat64ToFloat32(wasm_param);
+ }
+
+ args[i + 1] = wasm_param;
+ }
}
- Node* jsval =
- BuildCallAndReturn(is_import, js_context, function_data, args);
+ Node* jsval = BuildCallAndReturn(is_import, js_context, function_data, args,
+ js_wasm_call_data, frame_state);
// If both the default and a fast transformation paths are present,
// get the return value based on the path used.
if (include_fast_path) {
@@ -6795,8 +6835,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo());
Node* strict_check =
Binop(wasm::kExprI32And, flags,
- mcgraph()->Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
- SharedFunctionInfo::IsStrictBit::kMask));
+ Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
// Load global receiver if sloppy else use undefined.
Diamond strict_d(graph(), mcgraph()->common(), strict_check,
@@ -6864,17 +6904,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = function_context;
args[pos++] = effect();
args[pos++] = control();
DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
+ call = gasm_->Call(call_descriptor, pos, args.begin());
break;
}
-#ifdef V8_NO_ARGUMENTS_ADAPTOR
// =======================================================================
// === JS Functions with mismatching arity ===============================
// =======================================================================
@@ -6894,7 +6932,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node;
}
args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
Node* function_context =
gasm_->LoadContextFromJSFunction(callable_node);
@@ -6905,64 +6943,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = Linkage::GetJSCallDescriptor(
graph()->zone(), false, pushed_count + 1, CallDescriptor::kNoFlags);
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
- break;
- }
-#else
- // =======================================================================
- // === JS Functions with mismatching arity ===============================
- // =======================================================================
- case WasmImportCallKind::kJSFunctionArityMismatch: {
- base::SmallVector<Node*, 16> args(wasm_count + 9);
- int pos = 0;
- Node* function_context =
- gasm_->LoadContextFromJSFunction(callable_node);
- args[pos++] =
- GetBuiltinPointerTarget(Builtins::kArgumentsAdaptorTrampoline);
- args[pos++] = callable_node; // target callable
- args[pos++] = undefined_node; // new target
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
-
- // Load shared function info, and then the formal parameter count.
- Node* shared_function_info =
- gasm_->LoadSharedFunctionInfo(callable_node);
- Node* formal_param_count =
- gasm_->Load(MachineType::Uint16(), shared_function_info,
- wasm::ObjectAccess::
- FormalParameterCountOffsetInSharedFunctionInfo());
- args[pos++] = formal_param_count;
-
- // Determine receiver at runtime.
- args[pos++] =
- BuildReceiverNode(callable_node, native_context, undefined_node);
-
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallBuiltinPointer);
-
- // Convert wasm numbers to JS values.
- pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
- args[pos++] = function_context;
- args[pos++] = effect();
- args[pos++] = control();
-
- DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
+ call = gasm_->Call(call_descriptor, pos, args.begin());
break;
}
-#endif
// =======================================================================
// === General case of unknown callable ==================================
// =======================================================================
case WasmImportCallKind::kUseCallBuiltin: {
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] = GetBuiltinPointerTarget(Builtins::kCall_ReceiverIsAny);
+ args[pos++] =
+ GetBuiltinPointerTarget(mcgraph(), Builtins::kCall_ReceiverIsAny);
args[pos++] = callable_node;
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = undefined_node; // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -6983,8 +6976,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = control();
DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
+ call = gasm_->Call(call_descriptor, pos, args.begin());
break;
}
default:
@@ -6992,13 +6984,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
DCHECK_NOT_NULL(call);
- SetEffect(call);
SetSourcePosition(call, 0);
// Convert the return value(s) back.
if (sig_->return_count() <= 1) {
Node* val = sig_->return_count() == 0
- ? mcgraph()->Int32Constant(0)
+ ? Int32Constant(0)
: FromJS(call, native_context, sig_->GetReturn());
BuildModifyThreadInWasmFlag(true);
Return(val);
@@ -7064,8 +7055,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// TODO(jkummerow): Load the address from the {host_data}, and cache
// wrappers per signature.
const ExternalReference ref = ExternalReference::Create(address);
- Node* function =
- graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+ Node* function = gasm_->ExternalConstant(ref);
// Parameters: Address host_data_foreign, Address arguments.
MachineType host_sig_types[] = {
@@ -7078,24 +7068,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* exception_branch = graph()->NewNode(
mcgraph()->common()->Branch(BranchHint::kTrue),
- graph()->NewNode(mcgraph()->machine()->WordEqual(), return_value,
- mcgraph()->IntPtrConstant(0)),
+ gasm_->WordEqual(return_value, mcgraph()->IntPtrConstant(0)),
control());
SetControl(
graph()->NewNode(mcgraph()->common()->IfFalse(), exception_branch));
- WasmThrowDescriptor interface_descriptor;
+ WasmRethrowDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
- Node* throw_effect =
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, return_value, effect(), control());
- TerminateThrow(throw_effect, control());
+ gasm_->Call(call_descriptor, call_target, return_value);
+ TerminateThrow(effect(), control());
- SetControl(
+ SetEffectControl(
+ return_value,
graph()->NewNode(mcgraph()->common()->IfTrue(), exception_branch));
DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
size_t return_count = sig_->return_count();
@@ -7151,9 +7139,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Call the underlying closure.
base::SmallVector<Node*, 16> args(wasm_count + 7);
int pos = 0;
- args[pos++] = GetBuiltinPointerTarget(Builtins::kCall_ReceiverIsAny);
+ args[pos++] =
+ GetBuiltinPointerTarget(mcgraph(), Builtins::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = Int32Constant(wasm_count); // argument count
args[pos++] = BuildLoadUndefinedValueFromInstance(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -7173,8 +7162,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = control();
DCHECK_EQ(pos, args.size());
- Node* call = SetEffect(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
+ Node* call = gasm_->Call(call_descriptor, pos, args.begin());
// Convert return JS values to wasm numbers and back to JS values.
Node* jsval;
@@ -7186,8 +7174,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* fixed_array =
BuildMultiReturnFixedArrayFromIterable(sig_, call, context);
int32_t return_count = static_cast<int32_t>(sig_->return_count());
- Node* size =
- graph()->NewNode(mcgraph()->common()->NumberConstant(return_count));
+ Node* size = gasm_->NumberConstant(return_count);
jsval = BuildCallAllocateJSArray(size, context);
Node* result_fixed_array = gasm_->LoadJSArrayElements(jsval);
for (unsigned i = 0; i < sig_->return_count(); ++i) {
@@ -7237,8 +7224,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = GetWasmCallDescriptor(mcgraph()->zone(), sig_);
DCHECK_EQ(pos, args.size());
- Node* call = SetEffect(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), pos, args.begin()));
+ Node* call = gasm_->Call(call_descriptor, pos, args.begin());
Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call);
Node* if_exception =
@@ -7300,6 +7286,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
} // namespace
+void BuildInlinedJSToWasmWrapper(
+ Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
+ const wasm::WasmModule* module, compiler::SourcePositionTable* spt,
+ StubCallMode stub_mode, wasm::WasmFeatures features,
+ const JSWasmCallData* js_wasm_call_data, Node* frame_state) {
+ WasmWrapperGraphBuilder builder(zone, mcgraph, signature, module, spt,
+ stub_mode, features);
+ builder.BuildJSToWasmWrapper(false, js_wasm_call_data, frame_state);
+}
+
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, wasm::WasmEngine* wasm_engine,
const wasm::FunctionSig* sig, const wasm::WasmModule* module,
@@ -7447,10 +7443,10 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
}
// If function isn't compiled, compile it now.
- IsCompiledScope is_compiled_scope(
- shared->is_compiled_scope(callable->GetIsolate()));
+ Isolate* isolate = callable->GetIsolate();
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope(isolate));
if (!is_compiled_scope.is_compiled()) {
- Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
+ Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
@@ -8018,7 +8014,8 @@ class LinkageLocationAllocator {
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind) {
+ WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind,
+ bool need_frame_state) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
bool extra_callable_param =
@@ -8095,7 +8092,9 @@ CallDescriptor* GetWasmCallDescriptor(
}
CallDescriptor::Flags flags =
- use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
+ use_retpoline ? CallDescriptor::kRetpoline
+ : need_frame_state ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
return zone->New<CallDescriptor>( // --
descriptor_kind, // kind
target_type, // target MachineType
@@ -8223,7 +8222,6 @@ AssemblerOptions WasmStubAssemblerOptions() {
}
#undef FATAL_UNSUPPORTED_OPCODE
-#undef CALL_BUILTIN
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_INSTANCE_FIELD
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 00ec7a9f8b..e6614f1c67 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -10,6 +10,7 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/base/small-vector.h"
#include "src/runtime/runtime.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
@@ -36,6 +37,9 @@ class WasmDecorator;
class WasmGraphAssembler;
enum class TrapId : uint32_t;
struct Int64LoweringSpecialCase;
+template <size_t VarCount>
+class GraphAssemblerLabel;
+enum class BranchHint : uint8_t;
} // namespace compiler
namespace wasm {
@@ -148,6 +152,21 @@ enum CWasmEntryParameters {
V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(
Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
+class JSWasmCallData {
+ public:
+ explicit JSWasmCallData(const wasm::FunctionSig* wasm_signature);
+
+ bool arg_needs_conversion(size_t index) const {
+ DCHECK_LT(index, arg_needs_conversion_.size());
+ return arg_needs_conversion_[index];
+ }
+ bool result_needs_conversion() const { return result_needs_conversion_; }
+
+ private:
+ bool result_needs_conversion_;
+ std::vector<bool> arg_needs_conversion_;
+};
+
// Values from the instance object are cached between Wasm-level function calls.
// This struct allows the SSA environment handling this cache to be defined
// and manipulated in wasm-compiler.{h,cc} instead of inside the Wasm decoder.
@@ -162,12 +181,14 @@ struct WasmInstanceCacheNodes {
// the wasm decoder from the internal details of TurboFan.
class WasmGraphBuilder {
public:
+ enum ReferenceKind : bool { // --
+ kArrayOrStruct = true,
+ kFunction = false
+ };
struct ObjectReferenceKnowledge {
bool object_can_be_null;
- bool object_must_be_data_ref;
- bool object_can_be_i31;
- bool rtt_is_i31;
- uint8_t rtt_depth;
+ ReferenceKind reference_kind;
+ int8_t rtt_depth;
};
enum EnforceBoundsCheck : bool { // --
kNeedsBoundsCheck = true,
@@ -201,6 +222,8 @@ class WasmGraphBuilder {
Node* LoopExitValue(Node* value, MachineRepresentation representation);
Node* TerminateThrow(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
+ template <typename... Nodes>
+ Node* Merge(Node* fst, Nodes*... args);
Node* Phi(wasm::ValueType type, unsigned count, Node** vals_and_control);
Node* CreateOrMergeIntoPhi(MachineRepresentation rep, Node* merge,
Node* tnode, Node* fnode);
@@ -434,14 +457,32 @@ class WasmGraphBuilder {
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
- Node* RttCanon(wasm::HeapType type);
- Node* RttSub(wasm::HeapType type, Node* parent_rtt);
+ Node* RttCanon(uint32_t type_index);
+ Node* RttSub(uint32_t type_index, Node* parent_rtt);
+
Node* RefTest(Node* object, Node* rtt, ObjectReferenceKnowledge config);
Node* RefCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
wasm::WasmCodePosition position);
Node* BrOnCast(Node* object, Node* rtt, ObjectReferenceKnowledge config,
Node** match_control, Node** match_effect,
Node** no_match_control, Node** no_match_effect);
+ Node* RefIsData(Node* object, bool object_can_be_null);
+ Node* RefAsData(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position);
+ Node* BrOnData(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
+ Node* RefIsFunc(Node* object, bool object_can_be_null);
+ Node* RefAsFunc(Node* object, bool object_can_be_null,
+ wasm::WasmCodePosition position);
+ Node* BrOnFunc(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
+ Node* RefIsI31(Node* object);
+ Node* RefAsI31(Node* object, wasm::WasmCodePosition position);
+ Node* BrOnI31(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect);
bool has_simd() const { return has_simd_; }
@@ -490,7 +531,7 @@ class WasmGraphBuilder {
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildCallNode(const wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position, Node* instance_node,
- const Operator* op);
+ const Operator* op, Node* frame_state = nullptr);
// Helper function for {BuildIndirectCall}.
void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
Node** ift_sig_ids, Node** ift_targets,
@@ -501,7 +542,8 @@ class WasmGraphBuilder {
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, Vector<Node*> args,
Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline);
+ Node* instance_node, UseRetpoline use_retpoline,
+ Node* frame_state = nullptr);
Node* BuildWasmReturnCall(const wasm::FunctionSig* sig, Vector<Node*> args,
wasm::WasmCodePosition position,
Node* instance_node, UseRetpoline use_retpoline);
@@ -514,7 +556,6 @@ class WasmGraphBuilder {
Node* BuildCallRef(uint32_t sig_index, Vector<Node*> args, Vector<Node*> rets,
CheckForNull null_check, IsReturnCall continuation,
wasm::WasmCodePosition position);
- Node* GetBuiltinPointerTarget(int builtin_id);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -573,6 +614,7 @@ class WasmGraphBuilder {
Node* BuildTruncateIntPtrToInt32(Node* value);
Node* BuildChangeInt32ToIntPtr(Node* value);
+ Node* BuildChangeIntPtrToInt64(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
@@ -582,6 +624,36 @@ class WasmGraphBuilder {
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
+ using BranchBuilder = std::function<void(Node*, BranchHint)>;
+ struct Callbacks {
+ BranchBuilder succeed_if;
+ BranchBuilder fail_if;
+ BranchBuilder fail_if_not;
+ };
+
+ // This type is used to collect control/effect nodes we need to merge at the
+ // end of BrOn* functions. Nodes are collected in {TypeCheck} etc. by calling
+ // the passed callbacks succeed_if, fail_if and fail_if_not. We have up to 5
+ // control nodes to merge; the EffectPhi needs an additional input.
+ using SmallNodeVector = base::SmallVector<Node*, 6>;
+
+ Callbacks TestCallbacks(GraphAssemblerLabel<1>* label);
+ Callbacks CastCallbacks(GraphAssemblerLabel<0>* label,
+ wasm::WasmCodePosition position);
+ Callbacks BranchCallbacks(SmallNodeVector& no_match_controls,
+ SmallNodeVector& no_match_effects,
+ SmallNodeVector& match_controls,
+ SmallNodeVector& match_effects);
+
+ void TypeCheck(Node* object, Node* rtt, ObjectReferenceKnowledge config,
+ bool null_succeeds, Callbacks callbacks);
+ void DataCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
+ void FuncCheck(Node* object, bool object_can_be_null, Callbacks callbacks);
+
+ Node* BrOnCastAbs(Node** match_control, Node** match_effect,
+ Node** no_match_control, Node** no_match_effect,
+ std::function<void(Callbacks)> type_checker);
+
// Asm.js specific functionality.
Node* BuildI32AsmjsSConvertF32(Node* input);
Node* BuildI32AsmjsSConvertF64(Node* input);
@@ -643,6 +715,7 @@ class WasmGraphBuilder {
WasmInstanceCacheNodes* instance_cache_ = nullptr;
SetOncePointer<Node> instance_node_;
+ SetOncePointer<Node> ref_null_node_;
SetOncePointer<Node> globals_start_;
SetOncePointer<Node> imported_mutable_globals_;
SetOncePointer<Node> stack_check_code_node_;
@@ -666,11 +739,17 @@ class WasmGraphBuilder {
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
+V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
+ Zone* zone, MachineGraph* mcgraph, const wasm::FunctionSig* signature,
+ const wasm::WasmModule* module, compiler::SourcePositionTable* spt,
+ StubCallMode stub_mode, wasm::WasmFeatures features,
+ const JSWasmCallData* js_wasm_call_data, Node* frame_state);
+
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
WasmGraphBuilder::UseRetpoline use_retpoline =
WasmGraphBuilder::kNoRetpoline,
- WasmCallKind kind = kWasmFunction);
+ WasmCallKind kind = kWasmFunction, bool need_frame_state = false);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, const CallDescriptor* call_descriptor);