summaryrefslogtreecommitdiff
path: root/deps/v8/src/maglev
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/maglev')
-rw-r--r--deps/v8/src/maglev/maglev-assembler-inl.h41
-rw-r--r--deps/v8/src/maglev/maglev-assembler.h40
-rw-r--r--deps/v8/src/maglev/maglev-basic-block.h9
-rw-r--r--deps/v8/src/maglev/maglev-code-gen-state.h14
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.cc580
-rw-r--r--deps/v8/src/maglev/maglev-code-generator.h3
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.cc13
-rw-r--r--deps/v8/src/maglev/maglev-compilation-info.h14
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.cc2
-rw-r--r--deps/v8/src/maglev/maglev-compilation-unit.h1
-rw-r--r--deps/v8/src/maglev/maglev-compiler.cc111
-rw-r--r--deps/v8/src/maglev/maglev-compiler.h2
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.cc16
-rw-r--r--deps/v8/src/maglev/maglev-concurrent-dispatcher.h2
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.cc1264
-rw-r--r--deps/v8/src/maglev/maglev-graph-builder.h285
-rw-r--r--deps/v8/src/maglev/maglev-graph-printer.cc12
-rw-r--r--deps/v8/src/maglev/maglev-graph-verifier.h17
-rw-r--r--deps/v8/src/maglev/maglev-interpreter-frame-state.h219
-rw-r--r--deps/v8/src/maglev/maglev-ir-inl.h24
-rw-r--r--deps/v8/src/maglev/maglev-ir.cc563
-rw-r--r--deps/v8/src/maglev/maglev-ir.h311
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.cc305
-rw-r--r--deps/v8/src/maglev/maglev-regalloc.h6
-rw-r--r--deps/v8/src/maglev/maglev.cc4
-rw-r--r--deps/v8/src/maglev/maglev.h2
26 files changed, 2252 insertions, 1608 deletions
diff --git a/deps/v8/src/maglev/maglev-assembler-inl.h b/deps/v8/src/maglev/maglev-assembler-inl.h
index f9fefb53f9..309e74a502 100644
--- a/deps/v8/src/maglev/maglev-assembler-inl.h
+++ b/deps/v8/src/maglev/maglev-assembler-inl.h
@@ -5,6 +5,10 @@
#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
#include "src/codegen/macro-assembler-inl.h"
#include "src/maglev/maglev-assembler.h"
#include "src/maglev/maglev-basic-block.h"
@@ -131,6 +135,9 @@ struct CopyForDeferredHelper<MaglevCompilationInfo*>
template <>
struct CopyForDeferredHelper<Register>
: public CopyForDeferredByValue<Register> {};
+template <>
+struct CopyForDeferredHelper<DoubleRegister>
+ : public CopyForDeferredByValue<DoubleRegister> {};
// Bytecode offsets are copied by value.
template <>
struct CopyForDeferredHelper<BytecodeOffset>
@@ -187,10 +194,10 @@ struct FunctionArgumentsTupleHelper<R (&)(A...)> {
};
template <typename T>
-struct StripFirstTwoTupleArgs;
+struct StripFirstTupleArg;
-template <typename T1, typename T2, typename... T>
-struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
+template <typename T1, typename... T>
+struct StripFirstTupleArg<std::tuple<T1, T...>> {
using Stripped = std::tuple<T...>;
};
@@ -199,9 +206,8 @@ class DeferredCodeInfoImpl final : public DeferredCodeInfo {
public:
using FunctionPointer =
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
- using Tuple = typename StripFirstTwoTupleArgs<
+ using Tuple = typename StripFirstTupleArg<
typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
- static constexpr size_t kSize = FunctionArgumentsTupleHelper<Function>::kSize;
template <typename... InArgs>
explicit DeferredCodeInfoImpl(MaglevCompilationInfo* compilation_info,
@@ -213,18 +219,12 @@ class DeferredCodeInfoImpl final : public DeferredCodeInfo {
DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
- void Generate(MaglevAssembler* masm, Label* return_label) override {
- DoCall(masm, return_label, std::make_index_sequence<kSize - 2>{});
+ void Generate(MaglevAssembler* masm) override {
+ std::apply(function,
+ std::tuple_cat(std::make_tuple(masm), std::move(args)));
}
private:
- template <size_t... I>
- auto DoCall(MaglevAssembler* masm, Label* return_label,
- std::index_sequence<I...>) {
- // TODO(leszeks): This could be replaced with std::apply in C++17.
- return function(masm, return_label, std::get<I>(args)...);
- }
-
FunctionPointer function;
Tuple args;
};
@@ -234,6 +234,16 @@ class DeferredCodeInfoImpl final : public DeferredCodeInfo {
template <typename Function, typename... Args>
inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode(
Function&& deferred_code_gen, Args&&... args) {
+ using FunctionPointer =
+ typename detail::FunctionArgumentsTupleHelper<Function>::FunctionPointer;
+ static_assert(
+ std::is_invocable_v<FunctionPointer, MaglevAssembler*,
+ decltype(detail::CopyForDeferred(
+ std::declval<MaglevCompilationInfo*>(),
+ std::declval<Args>()))...>,
+ "Parameters of deferred_code_gen function should match arguments into "
+ "PushDeferredCode");
+
using DeferredCodeInfoT = detail::DeferredCodeInfoImpl<Function>;
DeferredCodeInfoT* deferred_code =
compilation_info()->zone()->New<DeferredCodeInfoT>(
@@ -252,11 +262,10 @@ inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
Args&&... args) {
DeferredCodeInfo* deferred_code = PushDeferredCode<Function, Args...>(
std::forward<Function>(deferred_code_gen), std::forward<Args>(args)...);
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
RecordComment("-- Jump to deferred code");
}
j(cond, &deferred_code->deferred_code_label);
- bind(&deferred_code->return_label);
}
// ---
diff --git a/deps/v8/src/maglev/maglev-assembler.h b/deps/v8/src/maglev/maglev-assembler.h
index ec85919ef0..0bfac0bbdf 100644
--- a/deps/v8/src/maglev/maglev-assembler.h
+++ b/deps/v8/src/maglev/maglev-assembler.h
@@ -12,25 +12,6 @@ namespace v8 {
namespace internal {
namespace maglev {
-// Label allowed to be passed to deferred code.
-class ZoneLabelRef {
- public:
- explicit ZoneLabelRef(Zone* zone) : label_(zone->New<Label>()) {}
-
- static ZoneLabelRef UnsafeFromLabelPointer(Label* label) {
- // This is an unsafe operation, {label} must be zone allocated.
- return ZoneLabelRef(label);
- }
-
- Label* operator*() { return label_; }
-
- private:
- Label* label_;
-
- // Unsafe constructor. {label} must be zone allocated.
- explicit ZoneLabelRef(Label* label) : label_(label) {}
-};
-
class MaglevAssembler : public MacroAssembler {
public:
explicit MaglevAssembler(MaglevCodeGenState* code_gen_state)
@@ -103,6 +84,27 @@ class MaglevAssembler : public MacroAssembler {
MaglevCodeGenState* const code_gen_state_;
};
+// Label allowed to be passed to deferred code.
+class ZoneLabelRef {
+ public:
+ explicit ZoneLabelRef(Zone* zone) : label_(zone->New<Label>()) {}
+ explicit inline ZoneLabelRef(MaglevAssembler* masm)
+ : ZoneLabelRef(masm->compilation_info()->zone()) {}
+
+ static ZoneLabelRef UnsafeFromLabelPointer(Label* label) {
+ // This is an unsafe operation, {label} must be zone allocated.
+ return ZoneLabelRef(label);
+ }
+
+ Label* operator*() { return label_; }
+
+ private:
+ Label* label_;
+
+ // Unsafe constructor. {label} must be zone allocated.
+ explicit ZoneLabelRef(Label* label) : label_(label) {}
+};
+
} // namespace maglev
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/maglev/maglev-basic-block.h b/deps/v8/src/maglev/maglev-basic-block.h
index 11ca4c1c69..12c652942a 100644
--- a/deps/v8/src/maglev/maglev-basic-block.h
+++ b/deps/v8/src/maglev/maglev-basic-block.h
@@ -52,11 +52,6 @@ class BasicBlock {
bool is_empty_block() const { return is_empty_block_; }
- BasicBlock* empty_block_predecessor() const {
- DCHECK(is_empty_block());
- return empty_block_predecessor_;
- }
-
MergePointRegisterState& empty_block_register_state() {
DCHECK(is_empty_block());
return *empty_block_register_state_;
@@ -67,13 +62,12 @@ class BasicBlock {
empty_block_register_state_ = register_state;
}
- void set_empty_block_predecessor(BasicBlock* predecessor) {
+ void set_empty_block() {
DCHECK(nodes_.is_empty());
DCHECK(control_node()->Is<Jump>());
DCHECK_NULL(state_);
is_empty_block_ = true;
empty_block_register_state_ = nullptr;
- empty_block_predecessor_ = predecessor;
}
Phi::List* phis() const {
@@ -112,7 +106,6 @@ class BasicBlock {
MergePointInterpreterFrameState* state_;
MergePointRegisterState* empty_block_register_state_;
};
- BasicBlock* empty_block_predecessor_;
Label label_;
};
diff --git a/deps/v8/src/maglev/maglev-code-gen-state.h b/deps/v8/src/maglev/maglev-code-gen-state.h
index 01fdb8216b..72c4c42d06 100644
--- a/deps/v8/src/maglev/maglev-code-gen-state.h
+++ b/deps/v8/src/maglev/maglev-code-gen-state.h
@@ -24,16 +24,16 @@ class MaglevAssembler;
class DeferredCodeInfo {
public:
- virtual void Generate(MaglevAssembler* masm, Label* return_label) = 0;
+ virtual void Generate(MaglevAssembler* masm) = 0;
Label deferred_code_label;
- Label return_label;
};
class MaglevCodeGenState {
public:
- MaglevCodeGenState(MaglevCompilationInfo* compilation_info,
+ MaglevCodeGenState(Isolate* isolate, MaglevCompilationInfo* compilation_info,
MaglevSafepointTableBuilder* safepoint_table_builder)
- : compilation_info_(compilation_info),
+ : isolate_(isolate),
+ compilation_info_(compilation_info),
safepoint_table_builder_(safepoint_table_builder) {}
void set_tagged_slots(int slots) { tagged_slots_ = slots; }
@@ -45,6 +45,9 @@ class MaglevCodeGenState {
const std::vector<DeferredCodeInfo*>& deferred_code() const {
return deferred_code_;
}
+ std::vector<DeferredCodeInfo*> TakeDeferredCode() {
+ return std::exchange(deferred_code_, std::vector<DeferredCodeInfo*>());
+ }
void PushEagerDeopt(EagerDeoptInfo* info) { eager_deopts_.push_back(info); }
void PushLazyDeopt(LazyDeoptInfo* info) { lazy_deopts_.push_back(info); }
const std::vector<EagerDeoptInfo*>& eager_deopts() const {
@@ -60,7 +63,7 @@ class MaglevCodeGenState {
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
}
- Isolate* isolate() const { return compilation_info_->isolate(); }
+ Isolate* isolate() const { return isolate_; }
compiler::JSHeapBroker* broker() const { return compilation_info_->broker(); }
MaglevGraphLabeller* graph_labeller() const {
return compilation_info_->graph_labeller();
@@ -73,6 +76,7 @@ class MaglevCodeGenState {
MaglevCompilationInfo* compilation_info() const { return compilation_info_; }
private:
+ Isolate* const isolate_;
MaglevCompilationInfo* const compilation_info_;
MaglevSafepointTableBuilder* const safepoint_table_builder_;
diff --git a/deps/v8/src/maglev/maglev-code-generator.cc b/deps/v8/src/maglev/maglev-code-generator.cc
index e1578c118b..7c72a9e040 100644
--- a/deps/v8/src/maglev/maglev-code-generator.cc
+++ b/deps/v8/src/maglev/maglev-code-generator.cc
@@ -286,6 +286,7 @@ class ParallelMoveResolver {
if (has_cycle) {
if (!scratch_has_cycle_start_) {
Pop(kScratchRegT);
+ scratch_has_cycle_start_ = true;
}
EmitMovesFromSource(kScratchRegT, targets);
scratch_has_cycle_start_ = false;
@@ -366,6 +367,7 @@ class ParallelMoveResolver {
}
if (scratch_has_cycle_start_ && !targets.stack_slots.empty()) {
Push(kScratchRegT);
+ scratch_has_cycle_start_ = false;
}
for (uint32_t target_slot : targets.stack_slots) {
DCHECK_EQ(moves_from_stack_slot_.find(target_slot),
@@ -432,203 +434,210 @@ class ParallelMoveResolver {
class ExceptionHandlerTrampolineBuilder {
public:
+ static void Build(MaglevAssembler* masm, NodeBase* node) {
+ ExceptionHandlerTrampolineBuilder builder(masm);
+ builder.EmitTrampolineFor(node);
+ }
+
+ private:
explicit ExceptionHandlerTrampolineBuilder(MaglevAssembler* masm)
: masm_(masm) {}
+ struct Move {
+ explicit Move(const ValueLocation& target, ValueNode* source)
+ : target(target), source(source) {}
+ const ValueLocation& target;
+ ValueNode* const source;
+ };
+ using MoveVector = base::SmallVector<Move, 16>;
+
void EmitTrampolineFor(NodeBase* node) {
DCHECK(node->properties().can_throw());
- ExceptionHandlerInfo* handler_info = node->exception_handler_info();
+ ExceptionHandlerInfo* const handler_info = node->exception_handler_info();
DCHECK(handler_info->HasExceptionHandler());
+ BasicBlock* const catch_block = handler_info->catch_block.block_ptr();
+ LazyDeoptInfo* const deopt_info = node->lazy_deopt_info();
+
+ // The exception handler trampoline resolves moves for exception phis and
+ // then jumps to the actual catch block. There are a few points worth
+ // noting:
+ //
+ // - All source locations are assumed to be stack slots, except the
+ // accumulator which is stored in kReturnRegister0. We don't emit an
+ // explicit move for it, instead it is pushed and popped at the boundaries
+ // of the entire move sequence (necessary due to materialisation).
+ //
+ // - Some values may require materialisation, i.e. heap number construction
+ // through calls to the NewHeapNumber builtin. To avoid potential conflicts
+ // with other moves (which may happen due to stack slot reuse, i.e. a
+ // target location of move A may equal source location of move B), we
+ // materialise and push results to new temporary stack slots before the
+ // main move sequence, and then pop results into their final target
+ // locations afterwards. Note this is only safe because a) materialised
+ // values are tagged and b) the stack walk treats unknown stack slots as
+ // tagged.
+
+ // TODO(v8:7700): Handle inlining.
- BasicBlock* block = handler_info->catch_block.block_ptr();
- LazyDeoptInfo* deopt_info = node->lazy_deopt_info();
+ ParallelMoveResolver<Register> direct_moves(masm_);
+ MoveVector materialising_moves;
+ bool save_accumulator = false;
+ RecordMoves(deopt_info->unit, catch_block, deopt_info->state.register_frame,
+ &direct_moves, &materialising_moves, &save_accumulator);
__ bind(&handler_info->trampoline_entry);
- ClearState();
- // TODO(v8:7700): Handle inlining.
- RecordMoves(deopt_info->unit, block, deopt_info->state.register_frame);
- // We do moves that need to materialise values first, since we might need to
- // call a builtin to create a HeapNumber, and therefore we would need to
- // spill all registers.
- DoMaterialiseMoves();
- // Move the rest, we will not call HeapNumber anymore.
- DoDirectMoves();
- // Jump to the catch block.
- __ jmp(block->label());
+ __ RecordComment("-- Exception handler trampoline START");
+ EmitMaterialisationsAndPushResults(materialising_moves, save_accumulator);
+ __ RecordComment("EmitMoves");
+ direct_moves.EmitMoves();
+ EmitPopMaterialisedResults(materialising_moves, save_accumulator);
+ __ jmp(catch_block->label());
+ __ RecordComment("-- Exception handler trampoline END");
}
- private:
- MaglevAssembler* const masm_;
- using Move = std::pair<const ValueLocation&, ValueNode*>;
- base::SmallVector<Move, 16> direct_moves_;
- base::SmallVector<Move, 16> materialisation_moves_;
- bool save_accumulator_ = false;
-
MacroAssembler* masm() const { return masm_; }
- void ClearState() {
- direct_moves_.clear();
- materialisation_moves_.clear();
- save_accumulator_ = false;
- }
-
- void RecordMoves(const MaglevCompilationUnit& unit, BasicBlock* block,
- const CompactInterpreterFrameState* register_frame) {
- for (Phi* phi : *block->phis()) {
- DCHECK_EQ(phi->input_count(), 0);
+ void RecordMoves(const MaglevCompilationUnit& unit, BasicBlock* catch_block,
+ const CompactInterpreterFrameState* register_frame,
+ ParallelMoveResolver<Register>* direct_moves,
+ MoveVector* materialising_moves, bool* save_accumulator) {
+ for (Phi* phi : *catch_block->phis()) {
+ DCHECK(phi->is_exception_phi());
if (!phi->has_valid_live_range()) continue;
+
+ const ValueLocation& target = phi->result();
if (phi->owner() == interpreter::Register::virtual_accumulator()) {
// If the accumulator is live, then it is the exception object located
- // at kReturnRegister0. This is also the first phi in the list.
- DCHECK_EQ(phi->result().AssignedGeneralRegister(), kReturnRegister0);
- save_accumulator_ = true;
+ // at kReturnRegister0. We don't emit a move for it since the value is
+ // already in the right spot, but we do have to ensure it isn't
+ // clobbered by calls to the NewHeapNumber builtin during
+ // materialisation.
+ DCHECK_EQ(target.AssignedGeneralRegister(), kReturnRegister0);
+ *save_accumulator = true;
continue;
}
- ValueNode* value = register_frame->GetValueOf(phi->owner(), unit);
- DCHECK_NOT_NULL(value);
- switch (value->properties().value_representation()) {
+
+ ValueNode* const source = register_frame->GetValueOf(phi->owner(), unit);
+ DCHECK_NOT_NULL(source);
+ // All registers must have been spilled due to the call.
+ // TODO(jgruber): Which call? Because any throw requires at least a call
+ // to Runtime::kThrowFoo?
+ DCHECK(!source->allocation().IsRegister());
+
+ switch (source->properties().value_representation()) {
case ValueRepresentation::kTagged:
- // All registers should have been spilled due to the call.
- DCHECK(!value->allocation().IsRegister());
- direct_moves_.emplace_back(phi->result(), value);
+ direct_moves->RecordMove(
+ source, source->allocation(),
+ compiler::AllocatedOperand::cast(target.operand()));
break;
case ValueRepresentation::kInt32:
- if (value->allocation().IsConstant()) {
- direct_moves_.emplace_back(phi->result(), value);
+ if (source->allocation().IsConstant()) {
+ // TODO(jgruber): Why is it okay for Int32 constants to remain
+ // untagged while non-constants are unconditionally smi-tagged or
+ // converted to a HeapNumber during materialisation?
+ direct_moves->RecordMove(
+ source, source->allocation(),
+ compiler::AllocatedOperand::cast(target.operand()));
} else {
- materialisation_moves_.emplace_back(phi->result(), value);
+ materialising_moves->emplace_back(target, source);
}
break;
case ValueRepresentation::kFloat64:
- materialisation_moves_.emplace_back(phi->result(), value);
+ materialising_moves->emplace_back(target, source);
break;
}
}
}
- void DoMaterialiseMoves() {
- if (materialisation_moves_.size() == 0) return;
- if (save_accumulator_) {
+ void EmitMaterialisationsAndPushResults(const MoveVector& moves,
+ bool save_accumulator) const {
+ if (moves.size() == 0) return;
+
+ // It's possible to optimize this further, at the cost of additional
+ // complexity:
+ //
+ // - If the target location is a register, we could theoretically move the
+ // materialised result there immediately, with the additional complication
+ // that following calls to NewHeapNumber may clobber the register.
+ //
+ // - If the target location is a stack slot which is neither a source nor
+ // target slot for any other moves (direct or materialising), we could move
+ // the result there directly instead of pushing and later popping it. This
+ // doesn't seem worth the extra code complexity though, given we are
+ // talking about a presumably infrequent case for exception handlers.
+
+ __ RecordComment("EmitMaterialisationsAndPushResults");
+ if (save_accumulator) __ Push(kReturnRegister0);
+ for (const Move& move : moves) {
+ MaterialiseTo(move.source, kReturnRegister0);
__ Push(kReturnRegister0);
}
- for (auto it = materialisation_moves_.begin();
- it < materialisation_moves_.end(); it++) {
- switch (it->second->properties().value_representation()) {
- case ValueRepresentation::kInt32: {
- EmitMoveInt32ToReturnValue0(it->second);
- break;
- }
- case ValueRepresentation::kFloat64:
- EmitMoveFloat64ToReturnValue0(it->second);
- break;
- case ValueRepresentation::kTagged:
- UNREACHABLE();
- }
- if (it->first.operand().IsStackSlot()) {
- // If the target is in a stack sot, we can immediately move
- // the result to it.
- __ movq(ToMemOperand(it->first), kReturnRegister0);
- } else {
- // We spill the result to the stack, in order to be able to call the
- // NewHeapNumber builtin again, however we don't need to push the result
- // of the last one.
- if (it != materialisation_moves_.end() - 1) {
- __ Push(kReturnRegister0);
- }
- }
- }
- // If the last move target is a register, the result should be in
- // kReturnValue0, so so we emit a simple move. Otherwise it has already been
- // moved.
- const ValueLocation& last_move_target =
- materialisation_moves_.rbegin()->first;
- if (last_move_target.operand().IsRegister()) {
- __ Move(last_move_target.AssignedGeneralRegister(), kReturnRegister0);
- }
- // And then pop the rest.
- for (auto it = materialisation_moves_.rbegin() + 1;
- it < materialisation_moves_.rend(); it++) {
- if (it->first.operand().IsRegister()) {
- __ Pop(it->first.AssignedGeneralRegister());
- }
- }
- if (save_accumulator_) {
- __ Pop(kReturnRegister0);
- }
}
- void DoDirectMoves() {
- for (auto& [target, value] : direct_moves_) {
- if (value->allocation().IsConstant()) {
- if (Int32Constant* constant = value->TryCast<Int32Constant>()) {
- EmitMove(target, Smi::FromInt(constant->value()));
- } else {
- // Int32 and Float64 constants should have already been dealt with.
- DCHECK_EQ(value->properties().value_representation(),
- ValueRepresentation::kTagged);
- EmitConstantLoad(target, value);
- }
+ void EmitPopMaterialisedResults(const MoveVector& moves,
+ bool save_accumulator) const {
+ if (moves.size() == 0) return;
+ __ RecordComment("EmitPopMaterialisedResults");
+ for (auto it = moves.rbegin(); it < moves.rend(); it++) {
+ const ValueLocation& target = it->target;
+ if (target.operand().IsRegister()) {
+ __ Pop(target.AssignedGeneralRegister());
} else {
- EmitMove(target, ToMemOperand(value));
+ DCHECK(target.operand().IsStackSlot());
+ __ Pop(kScratchRegister);
+ __ movq(masm_->ToMemOperand(target.operand()), kScratchRegister);
}
}
- }
- void EmitMoveInt32ToReturnValue0(ValueNode* value) {
- // We consider Int32Constants together with tagged values.
- DCHECK(!value->allocation().IsConstant());
- using D = NewHeapNumberDescriptor;
- Label done;
- __ movq(kReturnRegister0, ToMemOperand(value));
- __ addl(kReturnRegister0, kReturnRegister0);
- __ j(no_overflow, &done);
- // If we overflow, instead of bailing out (deopting), we change
- // representation to a HeapNumber.
- __ Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue), ToMemOperand(value));
- __ CallBuiltin(Builtin::kNewHeapNumber);
- __ bind(&done);
+ if (save_accumulator) __ Pop(kReturnRegister0);
}
- void EmitMoveFloat64ToReturnValue0(ValueNode* value) {
+ void MaterialiseTo(ValueNode* value, Register dst) const {
using D = NewHeapNumberDescriptor;
- if (Float64Constant* constant = value->TryCast<Float64Constant>()) {
- __ Move(D::GetDoubleRegisterParameter(D::kValue), constant->value());
- } else {
- __ Movsd(D::GetDoubleRegisterParameter(D::kValue), ToMemOperand(value));
+ switch (value->properties().value_representation()) {
+ case ValueRepresentation::kInt32: {
+ // We consider Int32Constants together with tagged values.
+ DCHECK(!value->allocation().IsConstant());
+ Label done;
+ __ movq(dst, ToMemOperand(value));
+ __ addl(dst, dst);
+ __ j(no_overflow, &done);
+ // If we overflow, instead of bailing out (deopting), we change
+ // representation to a HeapNumber.
+ __ Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue),
+ ToMemOperand(value));
+ __ CallBuiltin(Builtin::kNewHeapNumber);
+ __ Move(dst, kReturnRegister0);
+ __ bind(&done);
+ break;
+ }
+ case ValueRepresentation::kFloat64:
+ if (Float64Constant* constant = value->TryCast<Float64Constant>()) {
+ __ Move(D::GetDoubleRegisterParameter(D::kValue), constant->value());
+ } else {
+ __ Movsd(D::GetDoubleRegisterParameter(D::kValue),
+ ToMemOperand(value));
+ }
+ __ CallBuiltin(Builtin::kNewHeapNumber);
+ __ Move(dst, kReturnRegister0);
+ break;
+ case ValueRepresentation::kTagged:
+ UNREACHABLE();
}
- __ CallBuiltin(Builtin::kNewHeapNumber);
}
- MemOperand ToMemOperand(ValueNode* node) {
+ MemOperand ToMemOperand(ValueNode* node) const {
DCHECK(node->allocation().IsAnyStackSlot());
return masm_->ToMemOperand(node->allocation());
}
- MemOperand ToMemOperand(const ValueLocation& location) {
+ MemOperand ToMemOperand(const ValueLocation& location) const {
DCHECK(location.operand().IsStackSlot());
return masm_->ToMemOperand(location.operand());
}
- template <typename Operand>
- void EmitMove(const ValueLocation& dst, Operand src) {
- if (dst.operand().IsRegister()) {
- __ Move(dst.AssignedGeneralRegister(), src);
- } else {
- __ Move(kScratchRegister, src);
- __ movq(ToMemOperand(dst), kScratchRegister);
- }
- }
-
- void EmitConstantLoad(const ValueLocation& dst, ValueNode* value) {
- DCHECK(value->allocation().IsConstant());
- if (dst.operand().IsRegister()) {
- value->LoadToRegister(masm_, dst.AssignedGeneralRegister());
- } else {
- value->LoadToRegister(masm_, kScratchRegister);
- __ movq(ToMemOperand(dst), kScratchRegister);
- }
- }
+ MaglevAssembler* const masm_;
};
class MaglevCodeGeneratingNodeProcessor {
@@ -637,134 +646,145 @@ class MaglevCodeGeneratingNodeProcessor {
: masm_(masm) {}
void PreProcessGraph(Graph* graph) {
- if (FLAG_maglev_break_on_entry) {
+ code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
+ code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
+
+ if (v8_flags.maglev_break_on_entry) {
__ int3();
}
- __ BailoutIfDeoptimized(rbx);
+ if (v8_flags.maglev_ool_prologue) {
+ // Call the out-of-line prologue (with parameters passed on the stack).
+ __ Push(Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
+ __ Push(Immediate(code_gen_state()->tagged_slots() * kSystemPointerSize));
+ __ CallBuiltin(Builtin::kMaglevOutOfLinePrologue);
+ } else {
+ __ BailoutIfDeoptimized(rbx);
- // Tiering support.
- // TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
- // per Maglev code object on x64).
- {
- // Scratch registers. Don't clobber regs related to the calling
- // convention (e.g. kJavaScriptCallArgCountRegister).
- Register flags = rcx;
- Register feedback_vector = r9;
-
- // Load the feedback vector.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
- __ LoadTaggedPointerField(
- feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- __ AssertFeedbackVector(feedback_vector);
-
- Label flags_need_processing, next;
- __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
- flags, feedback_vector, CodeKind::MAGLEV, &flags_need_processing);
- __ jmp(&next);
-
- __ bind(&flags_need_processing);
+ // Tiering support.
+ // TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
+ // per Maglev code object on x64).
{
- ASM_CODE_COMMENT_STRING(masm(), "Optimized marker check");
- __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
- flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
- __ Trap();
- }
-
- __ bind(&next);
- }
-
- __ EnterFrame(StackFrame::MAGLEV);
+ // Scratch registers. Don't clobber regs related to the calling
+ // convention (e.g. kJavaScriptCallArgCountRegister).
+ Register flags = rcx;
+ Register feedback_vector = r9;
+
+ // Load the feedback vector.
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ AssertFeedbackVector(feedback_vector);
+
+ Label flags_need_processing, next;
+ __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
+ flags, feedback_vector, CodeKind::MAGLEV, &flags_need_processing);
+ __ jmp(&next);
+
+ __ bind(&flags_need_processing);
+ {
+ ASM_CODE_COMMENT_STRING(masm(), "Optimized marker check");
+ __ OptimizeCodeOrTailCallOptimizedCodeSlot(
+ flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
+ __ Trap();
+ }
- // Save arguments in frame.
- // TODO(leszeks): Consider eliding this frame if we don't make any calls
- // that could clobber these registers.
- __ Push(kContextRegister);
- __ Push(kJSFunctionRegister); // Callee's JS function.
- __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
+ __ bind(&next);
+ }
- code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
- code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
+ __ EnterFrame(StackFrame::MAGLEV);
- {
- ASM_CODE_COMMENT_STRING(masm(), " Stack/interrupt check");
- // Stack check. This folds the checks for both the interrupt stack limit
- // check and the real stack limit into one by just checking for the
- // interrupt limit. The interrupt limit is either equal to the real stack
- // limit or tighter. By ensuring we have space until that limit after
- // building the frame we can quickly precheck both at once.
- __ Move(kScratchRegister, rsp);
- // TODO(leszeks): Include a max call argument size here.
- __ subq(kScratchRegister,
- Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
- __ cmpq(kScratchRegister,
- __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
+ // Save arguments in frame.
+ // TODO(leszeks): Consider eliding this frame if we don't make any calls
+ // that could clobber these registers.
+ __ Push(kContextRegister);
+ __ Push(kJSFunctionRegister); // Callee's JS function.
+ __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
- __ j(below, &deferred_call_stack_guard_);
- __ bind(&deferred_call_stack_guard_return_);
- }
+ {
+ ASM_CODE_COMMENT_STRING(masm(), " Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real
+ // stack limit or tighter. By ensuring we have space until that limit
+ // after building the frame we can quickly precheck both at once.
+ __ Move(kScratchRegister, rsp);
+ // TODO(leszeks): Include a max call argument size here.
+ __ subq(kScratchRegister, Immediate(code_gen_state()->stack_slots() *
+ kSystemPointerSize));
+ __ cmpq(kScratchRegister,
+ __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
+
+ __ j(below, &deferred_call_stack_guard_);
+ __ bind(&deferred_call_stack_guard_return_);
+ }
- // Initialize stack slots.
- if (graph->tagged_stack_slots() > 0) {
- ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
- // TODO(leszeks): Consider filling with xmm + movdqa instead.
- __ Move(rax, Immediate(0));
-
- // Magic value. Experimentally, an unroll size of 8 doesn't seem any worse
- // than fully unrolled pushes.
- const int kLoopUnrollSize = 8;
- int tagged_slots = graph->tagged_stack_slots();
- if (tagged_slots < 2 * kLoopUnrollSize) {
- // If the frame is small enough, just unroll the frame fill completely.
- for (int i = 0; i < tagged_slots; ++i) {
- __ pushq(rax);
- }
- } else {
- // Extract the first few slots to round to the unroll size.
- int first_slots = tagged_slots % kLoopUnrollSize;
- for (int i = 0; i < first_slots; ++i) {
- __ pushq(rax);
- }
- __ Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
- // We enter the loop unconditionally, so make sure we need to loop at
- // least once.
- DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
- Label loop;
- __ bind(&loop);
- for (int i = 0; i < kLoopUnrollSize; ++i) {
- __ pushq(rax);
+ // Initialize stack slots.
+ if (graph->tagged_stack_slots() > 0) {
+ ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
+ // TODO(leszeks): Consider filling with xmm + movdqa instead.
+ __ Move(rax, Immediate(0));
+
+ // Magic value. Experimentally, an unroll size of 8 doesn't seem any
+ // worse than fully unrolled pushes.
+ const int kLoopUnrollSize = 8;
+ int tagged_slots = graph->tagged_stack_slots();
+ if (tagged_slots < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill
+ // completely.
+ for (int i = 0; i < tagged_slots; ++i) {
+ __ pushq(rax);
+ }
+ } else {
+ // Extract the first few slots to round to the unroll size.
+ int first_slots = tagged_slots % kLoopUnrollSize;
+ for (int i = 0; i < first_slots; ++i) {
+ __ pushq(rax);
+ }
+ __ Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
+ // We enter the loop unconditionally, so make sure we need to loop at
+ // least once.
+ DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
+ Label loop;
+ __ bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; ++i) {
+ __ pushq(rax);
+ }
+ __ decl(rbx);
+ __ j(greater, &loop);
}
- __ decl(rbx);
- __ j(greater, &loop);
}
- }
- if (graph->untagged_stack_slots() > 0) {
- // Extend rsp by the size of the remaining untagged part of the frame, no
- // need to initialise these.
- __ subq(rsp,
- Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
+ if (graph->untagged_stack_slots() > 0) {
+ // Extend rsp by the size of the remaining untagged part of the frame,
+ // no need to initialise these.
+ __ subq(rsp,
+ Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
+ }
}
}
void PostProcessGraph(Graph*) {
__ int3();
- __ bind(&deferred_call_stack_guard_);
- ASM_CODE_COMMENT_STRING(masm(), "Stack/interrupt call");
- // Save any registers that can be referenced by RegisterInput.
- // TODO(leszeks): Only push those that are used by the graph.
- __ PushAll(RegisterInput::kAllowedRegisters);
- // Push the frame size
- __ Push(Immediate(
- Smi::FromInt(code_gen_state()->stack_slots() * kSystemPointerSize)));
- __ CallRuntime(Runtime::kStackGuardWithGap, 1);
- __ PopAll(RegisterInput::kAllowedRegisters);
- __ jmp(&deferred_call_stack_guard_return_);
+
+ if (!v8_flags.maglev_ool_prologue) {
+ __ bind(&deferred_call_stack_guard_);
+ ASM_CODE_COMMENT_STRING(masm(), "Stack/interrupt call");
+ // Save any registers that can be referenced by RegisterInput.
+ // TODO(leszeks): Only push those that are used by the graph.
+ __ PushAll(RegisterInput::kAllowedRegisters);
+ // Push the frame size
+ __ Push(Immediate(
+ Smi::FromInt(code_gen_state()->stack_slots() * kSystemPointerSize)));
+ __ CallRuntime(Runtime::kStackGuardWithGap, 1);
+ __ PopAll(RegisterInput::kAllowedRegisters);
+ __ jmp(&deferred_call_stack_guard_return_);
+ }
}
void PreProcessBasicBlock(BasicBlock* block) {
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- Block b" << graph_labeller()->BlockId(block);
__ RecordComment(ss.str());
@@ -775,14 +795,14 @@ class MaglevCodeGeneratingNodeProcessor {
template <typename NodeT>
void Process(NodeT* node, const ProcessingState& state) {
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- " << graph_labeller()->NodeId(node) << ": "
<< PrintNode(graph_labeller(), node);
__ RecordComment(ss.str());
}
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ movq(kScratchRegister, rbp);
__ subq(kScratchRegister, rsp);
__ cmpq(kScratchRegister,
@@ -806,7 +826,7 @@ class MaglevCodeGeneratingNodeProcessor {
compiler::AllocatedOperand::cast(value_node->result().operand());
// We shouldn't spill nodes which already output to the stack.
if (!source.IsAnyStackSlot()) {
- if (FLAG_code_comments) __ RecordComment("-- Spill:");
+ if (v8_flags.code_comments) __ RecordComment("-- Spill:");
if (source.IsRegister()) {
__ movq(masm()->GetStackSlot(value_node->spill_slot()),
ToRegister(source));
@@ -851,7 +871,7 @@ class MaglevCodeGeneratingNodeProcessor {
// TODO(leszeks): We should remove dead phis entirely and turn this into
// a DCHECK.
if (!phi->has_valid_live_range()) {
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- * "
<< phi->input(state.block()->predecessor_id()).operand() << " → "
@@ -866,7 +886,7 @@ class MaglevCodeGeneratingNodeProcessor {
compiler::InstructionOperand source = input.operand();
compiler::AllocatedOperand target =
compiler::AllocatedOperand::cast(phi->result().operand());
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- * " << source << " → " << target << " (n"
<< graph_labeller()->NodeId(phi) << ")";
@@ -889,7 +909,7 @@ class MaglevCodeGeneratingNodeProcessor {
if (LoadMergeState(state, &node, &merge)) {
compiler::InstructionOperand source =
merge->operand(predecessor_id);
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- * " << source << " → " << reg;
__ RecordComment(ss.str());
@@ -909,7 +929,7 @@ class MaglevCodeGeneratingNodeProcessor {
if (LoadMergeState(state, &node, &merge)) {
compiler::InstructionOperand source =
merge->operand(predecessor_id);
- if (FLAG_code_comments) {
+ if (v8_flags.code_comments) {
std::stringstream ss;
ss << "-- * " << source << " → " << reg;
__ RecordComment(ss.str());
@@ -943,17 +963,19 @@ class MaglevCodeGeneratingNodeProcessor {
class MaglevCodeGeneratorImpl final {
public:
- static MaybeHandle<Code> Generate(MaglevCompilationInfo* compilation_info,
+ static MaybeHandle<Code> Generate(Isolate* isolate,
+ MaglevCompilationInfo* compilation_info,
Graph* graph) {
- return MaglevCodeGeneratorImpl(compilation_info, graph).Generate();
+ return MaglevCodeGeneratorImpl(isolate, compilation_info, graph).Generate();
}
private:
- MaglevCodeGeneratorImpl(MaglevCompilationInfo* compilation_info, Graph* graph)
+ MaglevCodeGeneratorImpl(Isolate* isolate,
+ MaglevCompilationInfo* compilation_info, Graph* graph)
: safepoint_table_builder_(compilation_info->zone(),
graph->tagged_stack_slots(),
graph->untagged_stack_slots()),
- code_gen_state_(compilation_info, safepoint_table_builder()),
+ code_gen_state_(isolate, compilation_info, safepoint_table_builder()),
masm_(&code_gen_state_),
processor_(&masm_),
graph_(graph) {}
@@ -968,15 +990,20 @@ class MaglevCodeGeneratorImpl final {
processor_.ProcessGraph(graph_);
EmitDeferredCode();
EmitDeopts();
- EmitExceptionHandlersTrampolines();
+ EmitExceptionHandlerTrampolines();
}
void EmitDeferredCode() {
- for (DeferredCodeInfo* deferred_code : code_gen_state_.deferred_code()) {
- __ RecordComment("-- Deferred block");
- __ bind(&deferred_code->deferred_code_label);
- deferred_code->Generate(masm(), &deferred_code->return_label);
- __ Trap();
+ // Loop over deferred_code() multiple times, clearing the vector on each
+ // outer loop, so that deferred code can itself emit deferred code.
+ while (!code_gen_state_.deferred_code().empty()) {
+ for (DeferredCodeInfo* deferred_code :
+ code_gen_state_.TakeDeferredCode()) {
+ __ RecordComment("-- Deferred block");
+ __ bind(&deferred_code->deferred_code_label);
+ deferred_code->Generate(masm());
+ __ Trap();
+ }
}
}
@@ -1014,12 +1041,11 @@ class MaglevCodeGeneratorImpl final {
}
}
- void EmitExceptionHandlersTrampolines() {
+ void EmitExceptionHandlerTrampolines() {
if (code_gen_state_.handlers().size() == 0) return;
- ExceptionHandlerTrampolineBuilder builder(masm());
- __ RecordComment("-- Exception handlers trampolines");
+ __ RecordComment("-- Exception handler trampolines");
for (NodeBase* node : code_gen_state_.handlers()) {
- builder.EmitTrampolineFor(node);
+ ExceptionHandlerTrampolineBuilder::Build(masm(), node);
}
}
@@ -1151,9 +1177,7 @@ class MaglevCodeGeneratorImpl final {
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
}
- Isolate* isolate() const {
- return code_gen_state_.compilation_info()->isolate();
- }
+ Isolate* isolate() const { return code_gen_state_.isolate(); }
MaglevAssembler* masm() { return &masm_; }
MaglevSafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
@@ -1171,8 +1195,8 @@ class MaglevCodeGeneratorImpl final {
// static
MaybeHandle<Code> MaglevCodeGenerator::Generate(
- MaglevCompilationInfo* compilation_info, Graph* graph) {
- return MaglevCodeGeneratorImpl::Generate(compilation_info, graph);
+ Isolate* isolate, MaglevCompilationInfo* compilation_info, Graph* graph) {
+ return MaglevCodeGeneratorImpl::Generate(isolate, compilation_info, graph);
}
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-code-generator.h b/deps/v8/src/maglev/maglev-code-generator.h
index 8dbd8921ab..64ac1df0bc 100644
--- a/deps/v8/src/maglev/maglev-code-generator.h
+++ b/deps/v8/src/maglev/maglev-code-generator.h
@@ -16,7 +16,8 @@ class MaglevCompilationInfo;
class MaglevCodeGenerator : public AllStatic {
public:
- static MaybeHandle<Code> Generate(MaglevCompilationInfo* compilation_info,
+ static MaybeHandle<Code> Generate(Isolate* isolate,
+ MaglevCompilationInfo* compilation_info,
Graph* graph);
};
diff --git a/deps/v8/src/maglev/maglev-compilation-info.cc b/deps/v8/src/maglev/maglev-compilation-info.cc
index e15a30cac7..300d7e66e1 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.cc
+++ b/deps/v8/src/maglev/maglev-compilation-info.cc
@@ -52,14 +52,17 @@ class V8_NODISCARD MaglevCompilationHandleScope final {
MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
Handle<JSFunction> function)
: zone_(isolate->allocator(), kMaglevZoneName),
- isolate_(isolate),
broker_(new compiler::JSHeapBroker(
- isolate, zone(), FLAG_trace_heap_broker, CodeKind::MAGLEV))
-#define V(Name) , Name##_(FLAG_##Name)
+ isolate, zone(), v8_flags.trace_heap_broker, CodeKind::MAGLEV))
+#define V(Name) , Name##_(v8_flags.Name)
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
-{
- DCHECK(FLAG_maglev);
+ ,
+ specialize_to_function_context_(
+ v8_flags.maglev_function_context_specialization &&
+ function->raw_feedback_cell().map() ==
+ ReadOnlyRoots(isolate).one_closure_cell_map()) {
+ DCHECK(v8_flags.maglev);
MaglevCompilationHandleScope compilation(isolate, this);
diff --git a/deps/v8/src/maglev/maglev-compilation-info.h b/deps/v8/src/maglev/maglev-compilation-info.h
index 049990f222..ad65623bbd 100644
--- a/deps/v8/src/maglev/maglev-compilation-info.h
+++ b/deps/v8/src/maglev/maglev-compilation-info.h
@@ -34,6 +34,8 @@ class Graph;
class MaglevCompilationUnit;
class MaglevGraphLabeller;
+// A list of v8_flag values copied into the MaglevCompilationInfo for
+// guaranteed {immutable,threadsafe} access.
#define MAGLEV_COMPILATION_FLAG_LIST(V) \
V(code_comments) \
V(maglev) \
@@ -51,7 +53,6 @@ class MaglevCompilationInfo final {
}
~MaglevCompilationInfo();
- Isolate* isolate() const { return isolate_; }
Zone* zone() { return &zone_; }
compiler::JSHeapBroker* broker() const { return broker_.get(); }
MaglevCompilationUnit* toplevel_compilation_unit() const {
@@ -88,6 +89,10 @@ class MaglevCompilationInfo final {
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
+ bool specialize_to_function_context() const {
+ return specialize_to_function_context_;
+ }
+
// Must be called from within a MaglevCompilationHandleScope. Transfers owned
// handles (e.g. shared_, function_) to the new scope.
void ReopenHandlesInNewHandleScope(Isolate* isolate);
@@ -105,7 +110,6 @@ class MaglevCompilationInfo final {
MaglevCompilationInfo(Isolate* isolate, Handle<JSFunction> function);
Zone zone_;
- Isolate* const isolate_;
const std::unique_ptr<compiler::JSHeapBroker> broker_;
// Must be initialized late since it requires an initialized heap broker.
MaglevCompilationUnit* toplevel_compilation_unit_ = nullptr;
@@ -123,6 +127,12 @@ class MaglevCompilationInfo final {
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
+ // If enabled, the generated code can rely on the function context to be a
+ // constant (known at compile-time). This opens new optimization
+ // opportunities, but prevents code sharing between different function
+ // contexts.
+ const bool specialize_to_function_context_;
+
// 1) PersistentHandles created via PersistentHandlesScope inside of
// CompilationHandleScope.
// 2) Owned by MaglevCompilationInfo.
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.cc b/deps/v8/src/maglev/maglev-compilation-unit.cc
index 16b8ae08ea..590b2e3f78 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.cc
+++ b/deps/v8/src/maglev/maglev-compilation-unit.cc
@@ -40,8 +40,6 @@ compiler::JSHeapBroker* MaglevCompilationUnit::broker() const {
return info_->broker();
}
-Isolate* MaglevCompilationUnit::isolate() const { return info_->isolate(); }
-
Zone* MaglevCompilationUnit::zone() const { return info_->zone(); }
bool MaglevCompilationUnit::has_graph_labeller() const {
diff --git a/deps/v8/src/maglev/maglev-compilation-unit.h b/deps/v8/src/maglev/maglev-compilation-unit.h
index 5281fa16fc..000e936965 100644
--- a/deps/v8/src/maglev/maglev-compilation-unit.h
+++ b/deps/v8/src/maglev/maglev-compilation-unit.h
@@ -42,7 +42,6 @@ class MaglevCompilationUnit : public ZoneObject {
MaglevCompilationInfo* info() const { return info_; }
const MaglevCompilationUnit* caller() const { return caller_; }
compiler::JSHeapBroker* broker() const;
- Isolate* isolate() const;
LocalIsolate* local_isolate() const;
Zone* zone() const;
int register_count() const { return register_count_; }
diff --git a/deps/v8/src/maglev/maglev-compiler.cc b/deps/v8/src/maglev/maglev-compiler.cc
index 78decb2857..2855bdbf5a 100644
--- a/deps/v8/src/maglev/maglev-compiler.cc
+++ b/deps/v8/src/maglev/maglev-compiler.cc
@@ -35,6 +35,7 @@
#include "src/maglev/maglev-graph-verifier.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
+#include "src/maglev/maglev-ir-inl.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc.h"
#include "src/maglev/maglev-vreg-allocator.h"
@@ -175,56 +176,24 @@ class UseMarkingProcessor {
}
}
- void MarkCheckpointNodes(NodeBase* node, const MaglevCompilationUnit& unit,
- const CheckpointedInterpreterState* checkpoint_state,
- InputLocation* input_locations,
- LoopUsedNodes* loop_used_nodes,
- const ProcessingState& state, int& index) {
- if (checkpoint_state->parent) {
- MarkCheckpointNodes(node, *unit.caller(), checkpoint_state->parent,
- input_locations, loop_used_nodes, state, index);
- }
-
- const CompactInterpreterFrameState* register_frame =
- checkpoint_state->register_frame;
- int use_id = node->id();
-
- register_frame->ForEachValue(
- unit, [&](ValueNode* node, interpreter::Register reg) {
- MarkUse(node, use_id, &input_locations[index++], loop_used_nodes);
- });
- }
void MarkCheckpointNodes(NodeBase* node, const EagerDeoptInfo* deopt_info,
LoopUsedNodes* loop_used_nodes,
const ProcessingState& state) {
- int index = 0;
- MarkCheckpointNodes(node, deopt_info->unit, &deopt_info->state,
- deopt_info->input_locations, loop_used_nodes, state,
- index);
+ int use_id = node->id();
+ detail::DeepForEachInput(
+ deopt_info,
+ [&](ValueNode* node, interpreter::Register reg, InputLocation* input) {
+ MarkUse(node, use_id, input, loop_used_nodes);
+ });
}
void MarkCheckpointNodes(NodeBase* node, const LazyDeoptInfo* deopt_info,
LoopUsedNodes* loop_used_nodes,
const ProcessingState& state) {
- int index = 0;
-
- if (deopt_info->state.parent) {
- MarkCheckpointNodes(node, *deopt_info->unit.caller(),
- deopt_info->state.parent, deopt_info->input_locations,
- loop_used_nodes, state, index);
- }
-
- // Handle the top-of-frame info manually, since we have to handle the result
- // location.
- const CompactInterpreterFrameState* register_frame =
- deopt_info->state.register_frame;
int use_id = node->id();
-
- register_frame->ForEachValue(
- deopt_info->unit, [&](ValueNode* node, interpreter::Register reg) {
- // Skip over the result location.
- if (deopt_info->IsResultRegister(reg)) return;
- MarkUse(node, use_id, &deopt_info->input_locations[index++],
- loop_used_nodes);
+ detail::DeepForEachInput(
+ deopt_info,
+ [&](ValueNode* node, interpreter::Register reg, InputLocation* input) {
+ MarkUse(node, use_id, input, loop_used_nodes);
});
}
@@ -264,14 +233,13 @@ class TranslationArrayProcessor {
}
private:
- const InputLocation* EmitDeoptFrame(const MaglevCompilationUnit& unit,
- const CheckpointedInterpreterState& state,
- const InputLocation* input_locations) {
+ void EmitDeoptFrame(const MaglevCompilationUnit& unit,
+ const CheckpointedInterpreterState& state,
+ const InputLocation* input_locations) {
if (state.parent) {
// Deopt input locations are in the order of deopt frame emission, so
// update the pointer after emitting the parent frame.
- input_locations =
- EmitDeoptFrame(*unit.caller(), *state.parent, input_locations);
+ EmitDeoptFrame(*unit.caller(), *state.parent, input_locations);
}
// Returns are used for updating an accumulator or register after a lazy
@@ -283,9 +251,8 @@ class TranslationArrayProcessor {
GetDeoptLiteral(*unit.shared_function_info().object()),
unit.register_count(), return_offset, return_count);
- return EmitDeoptFrameValues(unit, state.register_frame, input_locations,
- interpreter::Register::invalid_value(),
- return_count);
+ EmitDeoptFrameValues(unit, state.register_frame, input_locations,
+ interpreter::Register::invalid_value(), return_count);
}
void EmitEagerDeopt(EagerDeoptInfo* deopt_info) {
@@ -314,8 +281,8 @@ class TranslationArrayProcessor {
if (deopt_info->state.parent) {
// Deopt input locations are in the order of deopt frame emission, so
// update the pointer after emitting the parent frame.
- input_locations = EmitDeoptFrame(
- *unit.caller(), *deopt_info->state.parent, input_locations);
+ EmitDeoptFrame(*unit.caller(), *deopt_info->state.parent,
+ input_locations);
}
// Return offsets are counted from the end of the translation frame, which
@@ -431,10 +398,10 @@ class TranslationArrayProcessor {
result_location.index() + result_size - 1);
}
- const InputLocation* EmitDeoptFrameValues(
+ void EmitDeoptFrameValues(
const MaglevCompilationUnit& compilation_unit,
const CompactInterpreterFrameState* checkpoint_state,
- const InputLocation* input_locations,
+ const InputLocation*& input_location,
interpreter::Register result_location, int result_size) {
// Closure
if (compilation_unit.inlining_depth() == 0) {
@@ -449,7 +416,6 @@ class TranslationArrayProcessor {
// TODO(leszeks): The input locations array happens to be in the same order
// as parameters+context+locals+accumulator are accessed here. We should
// make this clearer and guard against this invariant failing.
- const InputLocation* input_location = input_locations;
// Parameters
{
@@ -461,9 +427,9 @@ class TranslationArrayProcessor {
translation_array_builder().StoreOptimizedOut();
} else {
EmitDeoptFrameSingleValue(value, *input_location);
+ input_location++;
}
i++;
- input_location++;
});
}
@@ -478,18 +444,15 @@ class TranslationArrayProcessor {
checkpoint_state->ForEachLocal(
compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
DCHECK_LE(i, reg.index());
- if (InReturnValues(reg, result_location, result_size)) {
- input_location++;
- return;
- }
+ if (InReturnValues(reg, result_location, result_size)) return;
while (i < reg.index()) {
translation_array_builder().StoreOptimizedOut();
i++;
}
DCHECK_EQ(i, reg.index());
EmitDeoptFrameSingleValue(value, *input_location);
- i++;
input_location++;
+ i++;
});
while (i < compilation_unit.register_count()) {
translation_array_builder().StoreOptimizedOut();
@@ -504,12 +467,11 @@ class TranslationArrayProcessor {
result_location, result_size)) {
ValueNode* value = checkpoint_state->accumulator(compilation_unit);
EmitDeoptFrameSingleValue(value, *input_location);
+ input_location++;
} else {
translation_array_builder().StoreOptimizedOut();
}
}
-
- return input_location;
}
int GetDeoptLiteral(Object obj) {
@@ -539,13 +501,14 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
compiler::UnparkedScopeIfNeeded unparked_scope(compilation_info->broker());
// Build graph.
- if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
- FLAG_trace_maglev_graph_building || FLAG_trace_maglev_regalloc) {
+ if (v8_flags.print_maglev_code || v8_flags.code_comments ||
+ v8_flags.print_maglev_graph || v8_flags.trace_maglev_graph_building ||
+ v8_flags.trace_maglev_regalloc) {
compilation_info->set_graph_labeller(new MaglevGraphLabeller());
}
- if (FLAG_print_maglev_code || FLAG_print_maglev_graph ||
- FLAG_trace_maglev_graph_building || FLAG_trace_maglev_regalloc) {
+ if (v8_flags.print_maglev_code || v8_flags.print_maglev_graph ||
+ v8_flags.trace_maglev_graph_building || v8_flags.trace_maglev_regalloc) {
MaglevCompilationUnit* top_level_unit =
compilation_info->toplevel_compilation_unit();
std::cout << "Compiling " << Brief(*top_level_unit->function().object())
@@ -561,7 +524,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
graph_builder.Build();
- if (FLAG_print_maglev_graph) {
+ if (v8_flags.print_maglev_graph) {
std::cout << "\nAfter graph buiding" << std::endl;
PrintGraph(std::cout, compilation_info, graph_builder.graph());
}
@@ -579,7 +542,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
processor.ProcessGraph(graph_builder.graph());
}
- if (FLAG_print_maglev_graph) {
+ if (v8_flags.print_maglev_graph) {
std::cout << "After node processor" << std::endl;
PrintGraph(std::cout, compilation_info, graph_builder.graph());
}
@@ -587,7 +550,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
StraightForwardRegisterAllocator allocator(compilation_info,
graph_builder.graph());
- if (FLAG_print_maglev_graph) {
+ if (v8_flags.print_maglev_graph) {
std::cout << "After register allocation" << std::endl;
PrintGraph(std::cout, compilation_info, graph_builder.graph());
}
@@ -602,7 +565,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
// static
MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
- MaglevCompilationInfo* compilation_info) {
+ Isolate* isolate, MaglevCompilationInfo* compilation_info) {
Graph* const graph = compilation_info->graph();
if (graph == nullptr) {
// Compilation failed.
@@ -614,7 +577,8 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
}
Handle<Code> code;
- if (!MaglevCodeGenerator::Generate(compilation_info, graph).ToHandle(&code)) {
+ if (!MaglevCodeGenerator::Generate(isolate, compilation_info, graph)
+ .ToHandle(&code)) {
compilation_info->toplevel_compilation_unit()
->shared_function_info()
.object()
@@ -629,11 +593,10 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
return {};
}
- if (FLAG_print_maglev_code) {
+ if (v8_flags.print_maglev_code) {
code->Print();
}
- Isolate* const isolate = compilation_info->isolate();
isolate->native_context()->AddOptimizedCode(ToCodeT(*code));
return ToCodeT(code, isolate);
}
diff --git a/deps/v8/src/maglev/maglev-compiler.h b/deps/v8/src/maglev/maglev-compiler.h
index 1bb7ad6d37..febe387d87 100644
--- a/deps/v8/src/maglev/maglev-compiler.h
+++ b/deps/v8/src/maglev/maglev-compiler.h
@@ -30,7 +30,7 @@ class MaglevCompiler : public AllStatic {
// Called on the main thread after Compile has completed.
// TODO(v8:7700): Move this to a different class?
static MaybeHandle<CodeT> GenerateCode(
- MaglevCompilationInfo* compilation_info);
+ Isolate* isolate, MaglevCompilationInfo* compilation_info);
};
} // namespace maglev
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
index 87756c21a0..cb8e1044ee 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.cc
@@ -88,7 +88,7 @@ MaglevCompilationJob::MaglevCompilationJob(
std::unique_ptr<MaglevCompilationInfo>&& info)
: OptimizedCompilationJob(kMaglevCompilerName, State::kReadyToPrepare),
info_(std::move(info)) {
- DCHECK(FLAG_maglev);
+ DCHECK(v8_flags.maglev);
}
MaglevCompilationJob::~MaglevCompilationJob() = default;
@@ -108,7 +108,7 @@ CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
Handle<CodeT> codet;
- if (!maglev::MaglevCompiler::GenerateCode(info()).ToHandle(&codet)) {
+ if (!maglev::MaglevCompiler::GenerateCode(isolate, info()).ToHandle(&codet)) {
return CompilationJob::FAILED;
}
info()->toplevel_compilation_unit()->function().object()->set_code(*codet);
@@ -119,6 +119,10 @@ Handle<JSFunction> MaglevCompilationJob::function() const {
return info_->toplevel_compilation_unit()->function().object();
}
+bool MaglevCompilationJob::specialize_to_function_context() const {
+ return info_->specialize_to_function_context();
+}
+
// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
// processing the incoming queue on a worker thread.
class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
@@ -134,6 +138,9 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
std::unique_ptr<MaglevCompilationJob> job;
if (!incoming_queue()->Dequeue(&job)) break;
DCHECK_NOT_NULL(job);
+ TRACE_EVENT_WITH_FLOW0(
+ TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.MaglevBackground",
+ job.get(), TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement.
CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
@@ -157,7 +164,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
MaglevConcurrentDispatcher::MaglevConcurrentDispatcher(Isolate* isolate)
: isolate_(isolate) {
- if (FLAG_concurrent_recompilation && FLAG_maglev) {
+ if (v8_flags.concurrent_recompilation && v8_flags.maglev) {
job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible, std::make_unique<JobTask>(this));
DCHECK(is_enabled());
@@ -188,6 +195,9 @@ void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<MaglevCompilationJob> job;
outgoing_queue_.Dequeue(&job);
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.MaglevConcurrentFinalize", job.get(),
+ TRACE_EVENT_FLAG_FLOW_IN);
Compiler::FinalizeMaglevCompilationJob(job.get(), isolate_);
}
}
diff --git a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
index 5510fa7c9e..09f046eee2 100644
--- a/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
+++ b/deps/v8/src/maglev/maglev-concurrent-dispatcher.h
@@ -56,6 +56,8 @@ class MaglevCompilationJob final : public OptimizedCompilationJob {
Handle<JSFunction> function() const;
+ bool specialize_to_function_context() const;
+
base::TimeDelta time_taken_to_prepare() { return time_taken_to_prepare_; }
base::TimeDelta time_taken_to_execute() { return time_taken_to_execute_; }
base::TimeDelta time_taken_to_finalize() { return time_taken_to_finalize_; }
diff --git a/deps/v8/src/maglev/maglev-graph-builder.cc b/deps/v8/src/maglev/maglev-graph-builder.cc
index 27d0b7d75d..d47b82713b 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.cc
+++ b/deps/v8/src/maglev/maglev-graph-builder.cc
@@ -9,6 +9,7 @@
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/common/globals.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
@@ -30,10 +31,59 @@
#include "src/objects/property-details.h"
#include "src/objects/slots-inl.h"
-namespace v8 {
-namespace internal {
+namespace v8::internal::maglev {
-namespace maglev {
+namespace {
+
+ValueNode* TryGetParentContext(ValueNode* node) {
+ if (CreateFunctionContext* n = node->TryCast<CreateFunctionContext>()) {
+ return n->context().node();
+ }
+
+ if (CallRuntime* n = node->TryCast<CallRuntime>()) {
+ switch (n->function_id()) {
+ case Runtime::kPushBlockContext:
+ case Runtime::kPushCatchContext:
+ case Runtime::kNewFunctionContext:
+ return n->context().node();
+ default:
+ break;
+ }
+ }
+
+ return nullptr;
+}
+
+// Attempts to walk up the context chain through the graph in order to reduce
+// depth and thus the number of runtime loads.
+void MinimizeContextChainDepth(ValueNode** context, size_t* depth) {
+ while (*depth > 0) {
+ ValueNode* parent_context = TryGetParentContext(*context);
+ if (parent_context == nullptr) return;
+ *context = parent_context;
+ (*depth)--;
+ }
+}
+
+class FunctionContextSpecialization final : public AllStatic {
+ public:
+ static base::Optional<compiler::ContextRef> TryToRef(
+ const MaglevCompilationUnit* unit, ValueNode* context, size_t* depth) {
+ DCHECK(unit->info()->specialize_to_function_context());
+ base::Optional<compiler::ContextRef> ref;
+ if (InitialValue* n = context->TryCast<InitialValue>()) {
+ if (n->source().is_current_context()) {
+ ref = unit->function().context();
+ }
+ } else if (Constant* n = context->TryCast<Constant>()) {
+ ref = n->ref().AsContext();
+ }
+ if (!ref.has_value()) return {};
+ return ref->previous(depth);
+ }
+};
+
+} // namespace
MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
MaglevCompilationUnit* compilation_unit,
@@ -76,11 +126,10 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
void MaglevGraphBuilder::StartPrologue() {
current_block_ = zone()->New<BasicBlock>(nullptr);
- block_offset_ = -1;
}
BasicBlock* MaglevGraphBuilder::EndPrologue() {
- BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
+ BasicBlock* first_block = FinishBlock<Jump>({}, &jump_targets_[0]);
MergeIntoFrameState(first_block, 0);
return first_block;
}
@@ -133,12 +182,12 @@ void MaglevGraphBuilder::BuildMergeStates() {
const compiler::LoopInfo& loop_info = offset_and_info.second;
const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset);
DCHECK_NULL(merge_states_[offset]);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << "- Creating loop merge state at @" << offset << std::endl;
}
merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop(
- *compilation_unit_, offset, NumPredecessors(offset), liveness,
- &loop_info);
+ current_interpreter_frame_, *compilation_unit_, offset,
+ NumPredecessors(offset), liveness, &loop_info);
}
if (bytecode().handler_table_size() > 0) {
@@ -150,7 +199,7 @@ void MaglevGraphBuilder::BuildMergeStates() {
GetInLivenessFor(offset);
DCHECK_EQ(NumPredecessors(offset), 0);
DCHECK_NULL(merge_states_[offset]);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << "- Creating exception merge state at @" << offset
<< ", context register r" << context_reg.index() << std::endl;
}
@@ -326,13 +375,9 @@ void MaglevGraphBuilder::BuildGenericBinarySmiOperationNode() {
template <Operation kOperation>
void MaglevGraphBuilder::BuildInt32BinaryOperationNode() {
// TODO(v8:7700): Do constant folding.
- ValueNode *left, *right;
- if (IsRegisterEqualToAccumulator(0)) {
- left = right = LoadRegisterInt32(0);
- } else {
- left = LoadRegisterInt32(0);
- right = GetAccumulatorInt32();
- }
+ ValueNode* left = LoadRegisterInt32(0);
+ ValueNode* right = GetAccumulatorInt32();
+
SetAccumulator(AddNewInt32BinaryOperationNode<kOperation>({left, right}));
}
@@ -362,13 +407,9 @@ void MaglevGraphBuilder::BuildFloat64BinarySmiOperationNode() {
template <Operation kOperation>
void MaglevGraphBuilder::BuildFloat64BinaryOperationNode() {
// TODO(v8:7700): Do constant folding.
- ValueNode *left, *right;
- if (IsRegisterEqualToAccumulator(0)) {
- left = right = LoadRegisterFloat64(0);
- } else {
- left = LoadRegisterFloat64(0);
- right = GetAccumulatorFloat64();
- }
+ ValueNode* left = LoadRegisterFloat64(0);
+ ValueNode* right = GetAccumulatorFloat64();
+
SetAccumulator(AddNewFloat64BinaryOperationNode<kOperation>({left, right}));
}
@@ -488,9 +529,21 @@ bool MaglevGraphBuilder::TryBuildCompareOperation(Operation operation,
}
BasicBlock* block = FinishBlock<CompareControlNode>(
- next_offset(), {left, right}, operation, &jump_targets_[true_offset],
+ {left, right}, operation, &jump_targets_[true_offset],
&jump_targets_[false_offset]);
+ if (true_offset == iterator_.GetJumpTargetOffset()) {
+ block->control_node()
+ ->Cast<BranchControlNode>()
+ ->set_true_interrupt_correction(
+ iterator_.GetRelativeJumpTargetOffset());
+ } else {
+ block->control_node()
+ ->Cast<BranchControlNode>()
+ ->set_false_interrupt_correction(
+ iterator_.GetRelativeJumpTargetOffset());
+ }
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ StartFallthroughBlock(next_offset(), block);
return true;
}
@@ -504,13 +557,9 @@ void MaglevGraphBuilder::VisitCompareOperation() {
return;
case CompareOperationHint::kSignedSmall:
if (BinaryOperationHasInt32FastPath<kOperation>()) {
- ValueNode *left, *right;
- if (IsRegisterEqualToAccumulator(0)) {
- left = right = LoadRegisterInt32(0);
- } else {
- left = LoadRegisterInt32(0);
- right = GetAccumulatorInt32();
- }
+ ValueNode* left = LoadRegisterInt32(0);
+ ValueNode* right = GetAccumulatorInt32();
+
if (TryBuildCompareOperation<BranchIfInt32Compare>(kOperation, left,
right)) {
return;
@@ -522,13 +571,9 @@ void MaglevGraphBuilder::VisitCompareOperation() {
break;
case CompareOperationHint::kNumber:
if (BinaryOperationHasFloat64FastPath<kOperation>()) {
- ValueNode *left, *right;
- if (IsRegisterEqualToAccumulator(0)) {
- left = right = LoadRegisterFloat64(0);
- } else {
- left = LoadRegisterFloat64(0);
- right = GetAccumulatorFloat64();
- }
+ ValueNode* left = LoadRegisterFloat64(0);
+ ValueNode* right = GetAccumulatorFloat64();
+
if (TryBuildCompareOperation<BranchIfFloat64Compare>(kOperation, left,
right)) {
return;
@@ -549,10 +594,28 @@ void MaglevGraphBuilder::VisitCompareOperation() {
kOperation == Operation::kStrictEqual);
ValueNode *left, *right;
if (IsRegisterEqualToAccumulator(0)) {
- left = right = LoadRegister<CheckedInternalizedString>(0);
+ interpreter::Register reg = iterator_.GetRegisterOperand(0);
+ ValueNode* value = GetTaggedValue(reg);
+ if (!value->Is<CheckedInternalizedString>()) {
+ value = AddNewNode<CheckedInternalizedString>({value});
+ current_interpreter_frame_.set(reg, value);
+ current_interpreter_frame_.set(
+ interpreter::Register::virtual_accumulator(), value);
+ }
+ left = right = value;
} else {
- left = LoadRegister<CheckedInternalizedString>(0);
- right = GetAccumulator<CheckedInternalizedString>();
+ interpreter::Register reg = iterator_.GetRegisterOperand(0);
+ left = GetTaggedValue(reg);
+ if (!left->Is<CheckedInternalizedString>()) {
+ left = AddNewNode<CheckedInternalizedString>({left});
+ current_interpreter_frame_.set(reg, left);
+ }
+ right = GetAccumulatorTagged();
+ if (!right->Is<CheckedInternalizedString>()) {
+ right = AddNewNode<CheckedInternalizedString>({right});
+ current_interpreter_frame_.set(
+ interpreter::Register::virtual_accumulator(), right);
+ }
}
if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
right)) {
@@ -564,16 +627,10 @@ void MaglevGraphBuilder::VisitCompareOperation() {
case CompareOperationHint::kSymbol: {
DCHECK(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual);
- ValueNode *left, *right;
- if (IsRegisterEqualToAccumulator(0)) {
- left = right = LoadRegisterTagged(0);
- BuildCheckSymbol(left);
- } else {
- left = LoadRegisterTagged(0);
- right = GetAccumulatorTagged();
- BuildCheckSymbol(left);
- BuildCheckSymbol(right);
- }
+ ValueNode* left = LoadRegisterTagged(0);
+ ValueNode* right = GetAccumulatorTagged();
+ BuildCheckSymbol(left);
+ BuildCheckSymbol(right);
if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
right)) {
return;
@@ -617,12 +674,69 @@ void MaglevGraphBuilder::VisitLdaConstant() {
SetAccumulator(GetConstant(GetRefOperand<HeapObject>(0)));
}
-void MaglevGraphBuilder::VisitLdaContextSlot() {
- ValueNode* context = LoadRegisterTagged(0);
- int slot_index = iterator_.GetIndexOperand(1);
- int depth = iterator_.GetUnsignedImmediateOperand(2);
+bool MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext(
+ ValueNode** context, size_t* depth, int slot_index,
+ ContextSlotMutability slot_mutability) {
+ DCHECK(compilation_unit_->info()->specialize_to_function_context());
+
+ size_t new_depth = *depth;
+ base::Optional<compiler::ContextRef> maybe_context_ref =
+ FunctionContextSpecialization::TryToRef(compilation_unit_, *context,
+ &new_depth);
+ if (!maybe_context_ref.has_value()) return false;
+
+ compiler::ContextRef context_ref = maybe_context_ref.value();
+ if (slot_mutability == kMutable || new_depth != 0) {
+ *depth = new_depth;
+ *context = GetConstant(context_ref);
+ return false;
+ }
+
+ base::Optional<compiler::ObjectRef> maybe_slot_value =
+ context_ref.get(slot_index);
+ if (!maybe_slot_value.has_value()) {
+ *depth = new_depth;
+ *context = GetConstant(context_ref);
+ return false;
+ }
+
+ compiler::ObjectRef slot_value = maybe_slot_value.value();
+ if (slot_value.IsHeapObject()) {
+ // Even though the context slot is immutable, the context might have escaped
+ // before the function to which it belongs has initialized the slot. We
+ // must be conservative and check if the value in the slot is currently the
+ // hole or undefined. Only if it is neither of these, can we be sure that it
+ // won't change anymore.
+ //
+ // See also: JSContextSpecialization::ReduceJSLoadContext.
+ compiler::OddballType oddball_type =
+ slot_value.AsHeapObject().map().oddball_type();
+ if (oddball_type == compiler::OddballType::kUndefined ||
+ oddball_type == compiler::OddballType::kHole) {
+ *depth = new_depth;
+ *context = GetConstant(context_ref);
+ return false;
+ }
+ }
- for (int i = 0; i < depth; ++i) {
+ // Fold the load of the immutable slot.
+
+ SetAccumulator(GetConstant(slot_value));
+ return true;
+}
+
+void MaglevGraphBuilder::BuildLoadContextSlot(
+ ValueNode* context, size_t depth, int slot_index,
+ ContextSlotMutability slot_mutability) {
+ MinimizeContextChainDepth(&context, &depth);
+
+ if (compilation_unit_->info()->specialize_to_function_context() &&
+ TrySpecializeLoadContextSlotToFunctionContext(
+ &context, &depth, slot_index, slot_mutability)) {
+ return; // Our work here is done.
+ }
+
+ for (size_t i = 0; i < depth; ++i) {
context = AddNewNode<LoadTaggedField>(
{context}, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX));
}
@@ -630,28 +744,47 @@ void MaglevGraphBuilder::VisitLdaContextSlot() {
SetAccumulator(AddNewNode<LoadTaggedField>(
{context}, Context::OffsetOfElementAt(slot_index)));
}
+
+void MaglevGraphBuilder::VisitLdaContextSlot() {
+ ValueNode* context = LoadRegisterTagged(0);
+ int slot_index = iterator_.GetIndexOperand(1);
+ size_t depth = iterator_.GetUnsignedImmediateOperand(2);
+ BuildLoadContextSlot(context, depth, slot_index, kMutable);
+}
void MaglevGraphBuilder::VisitLdaImmutableContextSlot() {
- // TODO(leszeks): Consider context specialising.
- VisitLdaContextSlot();
+ ValueNode* context = LoadRegisterTagged(0);
+ int slot_index = iterator_.GetIndexOperand(1);
+ size_t depth = iterator_.GetUnsignedImmediateOperand(2);
+ BuildLoadContextSlot(context, depth, slot_index, kImmutable);
}
void MaglevGraphBuilder::VisitLdaCurrentContextSlot() {
ValueNode* context = GetContext();
int slot_index = iterator_.GetIndexOperand(0);
-
- SetAccumulator(AddNewNode<LoadTaggedField>(
- {context}, Context::OffsetOfElementAt(slot_index)));
+ BuildLoadContextSlot(context, 0, slot_index, kMutable);
}
void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
- // TODO(leszeks): Consider context specialising.
- VisitLdaCurrentContextSlot();
+ ValueNode* context = GetContext();
+ int slot_index = iterator_.GetIndexOperand(0);
+ BuildLoadContextSlot(context, 0, slot_index, kImmutable);
}
void MaglevGraphBuilder::VisitStaContextSlot() {
ValueNode* context = LoadRegisterTagged(0);
int slot_index = iterator_.GetIndexOperand(1);
- int depth = iterator_.GetUnsignedImmediateOperand(2);
+ size_t depth = iterator_.GetUnsignedImmediateOperand(2);
+
+ MinimizeContextChainDepth(&context, &depth);
+
+ if (compilation_unit_->info()->specialize_to_function_context()) {
+ base::Optional<compiler::ContextRef> maybe_ref =
+ FunctionContextSpecialization::TryToRef(compilation_unit_, context,
+ &depth);
+ if (maybe_ref.has_value()) {
+ context = GetConstant(maybe_ref.value());
+ }
+ }
- for (int i = 0; i < depth; ++i) {
+ for (size_t i = 0; i < depth; ++i) {
context = AddNewNode<LoadTaggedField>(
{context}, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX));
}
@@ -918,28 +1051,54 @@ void MaglevGraphBuilder::VisitStaLookupSlot() {
SetAccumulator(BuildCallRuntime(StaLookupSlotFunction(flags), {name, value}));
}
+namespace {
+NodeType StaticTypeForNode(ValueNode* node) {
+ DCHECK(node->is_tagged());
+ switch (node->opcode()) {
+ case Opcode::kCheckedSmiTag:
+ case Opcode::kSmiConstant:
+ return NodeType::kSmi;
+ case Opcode::kConstant: {
+ compiler::HeapObjectRef ref = node->Cast<Constant>()->object();
+ if (ref.IsString()) {
+ return NodeType::kString;
+ } else if (ref.IsSymbol()) {
+ return NodeType::kSymbol;
+ } else if (ref.IsHeapNumber()) {
+ return NodeType::kHeapNumber;
+ }
+ return NodeType::kHeapObjectWithKnownMap;
+ }
+ default:
+ return NodeType::kUnknown;
+ }
+}
+} // namespace
+
void MaglevGraphBuilder::BuildCheckSmi(ValueNode* object) {
- NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
- if (NodeInfo::IsSmi(known_info)) return;
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(object);
+ if (known_info->is_smi()) return;
+ known_info->type = StaticTypeForNode(object);
+ if (known_info->is_smi()) return;
// TODO(leszeks): Figure out a way to also handle CheckedSmiUntag.
AddNewNode<CheckSmi>({object});
- known_node_aspects().InsertOrUpdateNodeType(object, known_info,
- NodeType::kSmi);
+ known_info->type = NodeType::kSmi;
}
void MaglevGraphBuilder::BuildCheckHeapObject(ValueNode* object) {
- NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
- if (NodeInfo::IsAnyHeapObject(known_info)) return;
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(object);
+ if (known_info->is_any_heap_object()) return;
+ known_info->type = StaticTypeForNode(object);
+ if (known_info->is_any_heap_object()) return;
AddNewNode<CheckHeapObject>({object});
- known_node_aspects().InsertOrUpdateNodeType(object, known_info,
- NodeType::kAnyHeapObject);
+ known_info->type = NodeType::kAnyHeapObject;
}
namespace {
CheckType GetCheckType(NodeInfo* known_info) {
- if (NodeInfo::IsAnyHeapObject(known_info)) {
+ if (known_info->is_any_heap_object()) {
return CheckType::kOmitHeapObjectCheck;
}
return CheckType::kCheckHeapObject;
@@ -947,21 +1106,23 @@ CheckType GetCheckType(NodeInfo* known_info) {
} // namespace
void MaglevGraphBuilder::BuildCheckString(ValueNode* object) {
- NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
- if (NodeInfo::IsString(known_info)) return;
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(object);
+ if (known_info->is_string()) return;
+ known_info->type = StaticTypeForNode(object);
+ if (known_info->is_string()) return;
AddNewNode<CheckString>({object}, GetCheckType(known_info));
- known_node_aspects().InsertOrUpdateNodeType(object, known_info,
- NodeType::kString);
+ known_info->type = NodeType::kString;
}
void MaglevGraphBuilder::BuildCheckSymbol(ValueNode* object) {
- NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
- if (NodeInfo::IsSymbol(known_info)) return;
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(object);
+ if (known_info->is_symbol()) return;
+ known_info->type = StaticTypeForNode(object);
+ if (known_info->is_symbol()) return;
AddNewNode<CheckSymbol>({object}, GetCheckType(known_info));
- known_node_aspects().InsertOrUpdateNodeType(object, known_info,
- NodeType::kSymbol);
+ known_info->type = NodeType::kSymbol;
}
void MaglevGraphBuilder::BuildMapCheck(ValueNode* object,
@@ -975,10 +1136,32 @@ void MaglevGraphBuilder::BuildMapCheck(ValueNode* object,
// Map is already checked.
return;
}
- // TODO(leszeks): Insert an unconditional deopt if the known type doesn't
- // match the required type.
+ // TODO(leszeks): Insert an unconditional deopt if the known map doesn't
+ // match the required map.
+ }
+ NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(object);
+ if (known_info->type == NodeType::kUnknown) {
+ known_info->type = StaticTypeForNode(object);
+ if (known_info->type == NodeType::kHeapObjectWithKnownMap) {
+ // The only case where the type becomes a heap-object with a known map is
+ // when the object is a constant.
+ DCHECK(object->Is<Constant>());
+ // For constants with stable maps that match the desired map, we don't
+ // need to emit a map check, and can use the dependency -- we can't do
+ // this for unstable maps because the constant could migrate during
+ // compilation.
+ // TODO(leszeks): Insert an unconditional deopt if the constant map
+ // doesn't match the required map.
+ compiler::MapRef constant_map = object->Cast<Constant>()->object().map();
+ if (constant_map.equals(map) && map.is_stable()) {
+ DCHECK_EQ(&map_of_maps, &known_node_aspects().stable_maps);
+ map_of_maps.emplace(object, map);
+ broker()->dependencies()->DependOnStableMap(map);
+ return;
+ }
+ }
}
- NodeInfo* known_info = known_node_aspects().GetInfoFor(object);
+
if (map.is_migration_target()) {
AddNewNode<CheckMapsWithMigration>({object}, map, GetCheckType(known_info));
} else {
@@ -986,210 +1169,384 @@ void MaglevGraphBuilder::BuildMapCheck(ValueNode* object,
}
map_of_maps.emplace(object, map);
if (map.is_stable()) {
- compilation_unit_->broker()->dependencies()->DependOnStableMap(map);
+ broker()->dependencies()->DependOnStableMap(map);
+ }
+ known_info->type = NodeType::kHeapObjectWithKnownMap;
+}
+
+bool MaglevGraphBuilder::TryFoldLoadDictPrototypeConstant(
+ compiler::PropertyAccessInfo access_info) {
+ DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
+ DCHECK(access_info.IsDictionaryProtoDataConstant());
+ DCHECK(access_info.holder().has_value());
+
+ base::Optional<compiler::ObjectRef> constant =
+ access_info.holder()->GetOwnDictionaryProperty(
+ access_info.dictionary_index(), broker()->dependencies());
+ if (!constant.has_value()) return false;
+
+ for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
+ Handle<Map> map_handle = map.object();
+ // Non-JSReceivers that passed AccessInfoFactory::ComputePropertyAccessInfo
+ // must have different lookup start map.
+ if (!map_handle->IsJSReceiverMap()) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ JSFunction constructor =
+ Map::GetConstructorFunction(
+ *map_handle, *broker()->target_native_context().object())
+ .value();
+ // {constructor.initial_map()} is loaded/stored with acquire-release
+ // semantics for constructors.
+ map = MakeRefAssumeMemoryFence(broker(), constructor.initial_map());
+ DCHECK(map.object()->IsJSObjectMap());
+ }
+ broker()->dependencies()->DependOnConstantInDictionaryPrototypeChain(
+ map, access_info.name(), constant.value(), PropertyKind::kData);
+ }
+
+ SetAccumulator(GetConstant(constant.value()));
+ return true;
+}
+
+bool MaglevGraphBuilder::TryFoldLoadConstantDataField(
+ compiler::PropertyAccessInfo access_info) {
+ if (access_info.holder().has_value()) {
+ base::Optional<compiler::ObjectRef> constant =
+ access_info.holder()->GetOwnFastDataProperty(
+ access_info.field_representation(), access_info.field_index(),
+ broker()->dependencies());
+ if (constant.has_value()) {
+ SetAccumulator(GetConstant(constant.value()));
+ return true;
+ }
}
- known_node_aspects().InsertOrUpdateNodeType(
- object, known_info, NodeType::kHeapObjectWithKnownMap);
+ // TODO(victorgomes): Check if lookup_start_object is a constant object and
+ // unfold the load.
+ return false;
}
-bool MaglevGraphBuilder::TryBuildMonomorphicLoad(ValueNode* receiver,
- ValueNode* lookup_start_object,
- const compiler::MapRef& map,
- MaybeObjectHandle handler) {
- if (handler.is_null()) return false;
+bool MaglevGraphBuilder::TryBuildPropertyGetterCall(
+ compiler::PropertyAccessInfo access_info, ValueNode* receiver) {
+ compiler::ObjectRef constant = access_info.constant().value();
- if (handler->IsSmi()) {
- return TryBuildMonomorphicLoadFromSmiHandler(receiver, lookup_start_object,
- map, handler->ToSmi().value());
+ if (access_info.IsDictionaryProtoAccessorConstant()) {
+ // For fast mode holders we recorded dependencies in BuildPropertyLoad.
+ for (const compiler::MapRef map : access_info.lookup_start_object_maps()) {
+ broker()->dependencies()->DependOnConstantInDictionaryPrototypeChain(
+ map, access_info.name(), constant, PropertyKind::kAccessor);
+ }
}
- HeapObject ho_handler;
- if (!handler->GetHeapObject(&ho_handler)) return false;
- if (ho_handler.IsCodeT()) {
- // TODO(leszeks): Call the code object directly.
- return false;
- } else if (ho_handler.IsAccessorPair()) {
- // TODO(leszeks): Call the getter directly.
- return false;
+ // Introduce the call to the getter function.
+ if (constant.IsJSFunction()) {
+ Call* call = CreateNewNode<Call>(Call::kFixedInputCount + 1,
+ ConvertReceiverMode::kNotNullOrUndefined,
+ GetConstant(constant), GetContext());
+ call->set_arg(0, receiver);
+ SetAccumulator(AddNode(call));
+ return true;
} else {
- return TryBuildMonomorphicLoadFromLoadHandler(
- receiver, lookup_start_object, map, LoadHandler::cast(ho_handler));
+ // TODO(victorgomes): API calls.
+ return false;
}
}
-bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromSmiHandler(
- ValueNode* receiver, ValueNode* lookup_start_object,
- const compiler::MapRef& map, int32_t handler) {
- // Smi handler, emit a map check and LoadField.
- LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
- if (kind != LoadHandler::Kind::kField) return false;
- if (LoadHandler::IsWasmStructBits::decode(handler)) return false;
+bool MaglevGraphBuilder::TryBuildPropertySetterCall(
+ compiler::PropertyAccessInfo access_info, ValueNode* receiver,
+ ValueNode* value) {
+ compiler::ObjectRef constant = access_info.constant().value();
+ if (constant.IsJSFunction()) {
+ Call* call = CreateNewNode<Call>(Call::kFixedInputCount + 2,
+ ConvertReceiverMode::kNotNullOrUndefined,
+ GetConstant(constant), GetContext());
+ call->set_arg(0, receiver);
+ call->set_arg(1, value);
+ SetAccumulator(AddNode(call));
+ return true;
+ } else {
+ // TODO(victorgomes): API calls.
+ return false;
+ }
+}
- BuildMapCheck(lookup_start_object, map);
+void MaglevGraphBuilder::BuildLoadField(
+ compiler::PropertyAccessInfo access_info, ValueNode* lookup_start_object) {
+ if (TryFoldLoadConstantDataField(access_info)) return;
+ // Resolve property holder.
ValueNode* load_source;
- if (LoadHandler::IsInobjectBits::decode(handler)) {
- load_source = lookup_start_object;
+ if (access_info.holder().has_value()) {
+ load_source = GetConstant(access_info.holder().value());
} else {
+ load_source = lookup_start_object;
+ }
+
+ FieldIndex field_index = access_info.field_index();
+ if (!field_index.is_inobject()) {
// The field is in the property array, first load it from there.
load_source = AddNewNode<LoadTaggedField>(
- {lookup_start_object}, JSReceiver::kPropertiesOrHashOffset);
- }
- int field_index = LoadHandler::FieldIndexBits::decode(handler);
- if (LoadHandler::IsDoubleBits::decode(handler)) {
- FieldIndex field = FieldIndex::ForSmiLoadHandler(*map.object(), handler);
- DescriptorArray descriptors = *map.instance_descriptors().object();
- InternalIndex index =
- descriptors.Search(field.property_index(), *map.object());
- DCHECK(index.is_found());
- DCHECK(Representation::Double().CanBeInPlaceChangedTo(
- descriptors.GetDetails(index).representation()));
- const compiler::CompilationDependency* dep =
- broker()->dependencies()->FieldRepresentationDependencyOffTheRecord(
- map, index, Representation::Double());
- broker()->dependencies()->RecordDependency(dep);
+ {load_source}, JSReceiver::kPropertiesOrHashOffset);
+ }
+ // Do the load.
+ if (field_index.is_double()) {
SetAccumulator(
- AddNewNode<LoadDoubleField>({load_source}, field_index * kTaggedSize));
+ AddNewNode<LoadDoubleField>({load_source}, field_index.offset()));
} else {
SetAccumulator(
- AddNewNode<LoadTaggedField>({load_source}, field_index * kTaggedSize));
+ AddNewNode<LoadTaggedField>({load_source}, field_index.offset()));
}
- return true;
}
-bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler(
- ValueNode* receiver, ValueNode* lookup_start_object,
- const compiler::MapRef& map, LoadHandler handler) {
- Object maybe_smi_handler = handler.smi_handler(local_isolate_);
- if (!maybe_smi_handler.IsSmi()) return false;
- int smi_handler = Smi::cast(maybe_smi_handler).value();
- LoadHandler::Kind kind = LoadHandler::KindBits::decode(smi_handler);
- bool do_access_check_on_lookup_start_object =
- LoadHandler::DoAccessCheckOnLookupStartObjectBits::decode(smi_handler);
- bool lookup_on_lookup_start_object =
- LoadHandler::LookupOnLookupStartObjectBits::decode(smi_handler);
- if (lookup_on_lookup_start_object) return false;
- if (kind != LoadHandler::Kind::kConstantFromPrototype &&
- kind != LoadHandler::Kind::kAccessorFromPrototype)
- return false;
+bool MaglevGraphBuilder::TryBuildStoreField(
+ compiler::PropertyAccessInfo access_info, ValueNode* receiver) {
+ FieldIndex field_index = access_info.field_index();
+ Representation field_representation = access_info.field_representation();
- if (map.IsStringMap()) {
- // Check for string maps before checking if we need to do an access check.
- // Primitive strings always get the prototype from the native context
- // they're operated on, so they don't need the access check.
- BuildCheckString(lookup_start_object);
- } else if (do_access_check_on_lookup_start_object) {
- return false;
+ // TODO(victorgomes): Support double stores.
+ if (field_representation.IsDouble()) return false;
+
+ // TODO(victorgomes): Support transition maps.
+ if (access_info.HasTransitionMap()) return false;
+
+ ValueNode* store_target;
+ if (field_index.is_inobject()) {
+ store_target = receiver;
} else {
- BuildMapCheck(lookup_start_object, map);
+ // The field is in the property array, first load it from there.
+ store_target = AddNewNode<LoadTaggedField>(
+ {receiver}, JSReceiver::kPropertiesOrHashOffset);
}
- Object validity_cell = handler.validity_cell(local_isolate_);
- if (validity_cell.IsCell(local_isolate_)) {
- compiler::MapRef receiver_map = map;
- if (receiver_map.IsPrimitiveMap()) {
- // Perform the implicit ToObject for primitives here.
- // Implemented according to ES6 section 7.3.2 GetV (V, P).
- // Note: Keep sync'd with AccessInfoFactory::ComputePropertyAccessInfo.
- base::Optional<compiler::JSFunctionRef> constructor =
- broker()->target_native_context().GetConstructorFunction(
- receiver_map);
- receiver_map = constructor.value().initial_map(broker()->dependencies());
+ if (field_representation.IsSmi()) {
+ ValueNode* value = GetAccumulatorTagged();
+ BuildCheckSmi(value);
+ AddNewNode<StoreTaggedFieldNoWriteBarrier>({store_target, value},
+ field_index.offset());
+ } else if (field_representation.IsDouble()) {
+ // TODO(victorgomes): Implement store double.
+ UNREACHABLE();
+ } else {
+ ValueNode* value = GetAccumulatorTagged();
+ if (field_representation.IsHeapObject()) {
+ // Emit a map check for the field type, if needed, otherwise just a
+ // HeapObject check.
+ if (access_info.field_map().has_value()) {
+ BuildMapCheck(value, access_info.field_map().value());
+ } else {
+ BuildCheckHeapObject(value);
+ }
}
+ AddNewNode<StoreTaggedFieldWithWriteBarrier>({store_target, value},
+ field_index.offset());
+ }
+ return true;
+}
- compiler::MapRef proto_map = receiver_map.prototype().map();
- while (proto_map.object()->prototype_validity_cell(
- local_isolate_, kRelaxedLoad) == validity_cell) {
- broker()->dependencies()->DependOnStableMap(proto_map);
- proto_map = proto_map.prototype().map();
+bool MaglevGraphBuilder::TryBuildPropertyLoad(
+ ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::PropertyAccessInfo const& access_info) {
+ if (access_info.holder().has_value() && !access_info.HasDictionaryHolder()) {
+ broker()->dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
+ access_info.holder().value());
+ }
+
+ switch (access_info.kind()) {
+ case compiler::PropertyAccessInfo::kInvalid:
+ UNREACHABLE();
+ case compiler::PropertyAccessInfo::kNotFound:
+ SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue));
+ return true;
+ case compiler::PropertyAccessInfo::kDataField:
+ case compiler::PropertyAccessInfo::kFastDataConstant:
+ BuildLoadField(access_info, lookup_start_object);
+ return true;
+ case compiler::PropertyAccessInfo::kDictionaryProtoDataConstant:
+ return TryFoldLoadDictPrototypeConstant(access_info);
+ case compiler::PropertyAccessInfo::kFastAccessorConstant:
+ case compiler::PropertyAccessInfo::kDictionaryProtoAccessorConstant:
+ return TryBuildPropertyGetterCall(access_info, receiver);
+ case compiler::PropertyAccessInfo::kModuleExport: {
+ ValueNode* cell = GetConstant(access_info.constant().value().AsCell());
+ SetAccumulator(AddNewNode<LoadTaggedField>({cell}, Cell::kValueOffset));
+ return true;
}
+ case compiler::PropertyAccessInfo::kStringLength:
+ DCHECK_EQ(receiver, lookup_start_object);
+ SetAccumulator(AddNewNode<StringLength>({receiver}));
+ return true;
+ }
+}
+
+bool MaglevGraphBuilder::TryBuildPropertyStore(
+ ValueNode* receiver, compiler::PropertyAccessInfo const& access_info) {
+ if (access_info.holder().has_value()) {
+ broker()->dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
+ access_info.holder().value());
+ }
+
+ if (access_info.IsFastAccessorConstant()) {
+ return TryBuildPropertySetterCall(access_info, receiver,
+ GetAccumulatorTagged());
} else {
- DCHECK_EQ(Smi::ToInt(validity_cell), Map::kPrototypeChainValid);
+ DCHECK(access_info.IsDataField() || access_info.IsFastDataConstant());
+ return TryBuildStoreField(access_info, receiver);
+ }
+}
+
+bool MaglevGraphBuilder::TryBuildPropertyAccess(
+ ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::PropertyAccessInfo const& access_info,
+ compiler::AccessMode access_mode) {
+ switch (access_mode) {
+ case compiler::AccessMode::kLoad:
+ return TryBuildPropertyLoad(receiver, lookup_start_object, access_info);
+ case compiler::AccessMode::kStore:
+ case compiler::AccessMode::kStoreInLiteral:
+ case compiler::AccessMode::kDefine:
+ DCHECK_EQ(receiver, lookup_start_object);
+ return TryBuildPropertyStore(receiver, access_info);
+ case compiler::AccessMode::kHas:
+ // TODO(victorgomes): BuildPropertyTest.
+ return false;
}
+}
- switch (kind) {
- case LoadHandler::Kind::kConstantFromPrototype: {
- MaybeObject value = handler.data1(local_isolate_);
- if (value.IsSmi()) {
- SetAccumulator(GetSmiConstant(value.ToSmi().value()));
- } else {
- SetAccumulator(GetConstant(MakeRefAssumeMemoryFence(
- broker(),
- broker()->CanonicalPersistentHandle(value.GetHeapObject()))));
- }
- break;
+bool MaglevGraphBuilder::TryBuildNamedAccess(
+ ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::NamedAccessFeedback const& feedback,
+ compiler::AccessMode access_mode) {
+ ZoneVector<compiler::PropertyAccessInfo> access_infos(zone());
+ {
+ ZoneVector<compiler::PropertyAccessInfo> access_infos_for_feedback(zone());
+ for (const compiler::MapRef& map : feedback.maps()) {
+ if (map.is_deprecated()) continue;
+ compiler::PropertyAccessInfo access_info =
+ broker()->GetPropertyAccessInfo(map, feedback.name(), access_mode,
+ broker()->dependencies());
+ access_infos_for_feedback.push_back(access_info);
}
- case LoadHandler::Kind::kAccessorFromPrototype: {
- MaybeObject getter = handler.data1(local_isolate_);
- compiler::ObjectRef getter_ref = MakeRefAssumeMemoryFence(
- broker(),
- broker()->CanonicalPersistentHandle(getter.GetHeapObject()));
-
- Call* call = CreateNewNode<Call>(Call::kFixedInputCount + 1,
- ConvertReceiverMode::kNotNullOrUndefined,
- GetConstant(getter_ref), GetContext());
- call->set_arg(0, receiver);
- SetAccumulator(AddNode(call));
- break;
+
+ compiler::AccessInfoFactory access_info_factory(
+ broker(), broker()->dependencies(), zone());
+ if (!access_info_factory.FinalizePropertyAccessInfos(
+ access_infos_for_feedback, access_mode, &access_infos)) {
+ return false;
}
- default:
- UNREACHABLE();
}
- return true;
-}
-bool MaglevGraphBuilder::TryBuildMonomorphicElementLoad(
- ValueNode* object, ValueNode* index, const compiler::MapRef& map,
- MaybeObjectHandle handler) {
- if (handler.is_null()) return false;
+ // Check for monomorphic case.
+ if (access_infos.size() == 1) {
+ compiler::PropertyAccessInfo access_info = access_infos.front();
+ const compiler::MapRef& map =
+ access_info.lookup_start_object_maps().front();
+ if (map.IsStringMap()) {
+ // Check for string maps before checking if we need to do an access
+ // check. Primitive strings always get the prototype from the native
+ // context they're operated on, so they don't need the access check.
+ BuildCheckString(lookup_start_object);
+ } else {
+ BuildMapCheck(lookup_start_object, map);
+ }
- if (handler->IsSmi()) {
- return TryBuildMonomorphicElementLoadFromSmiHandler(
- object, index, map, handler->ToSmi().value());
+ // Generate the actual property access.
+ return TryBuildPropertyAccess(receiver, lookup_start_object, access_info,
+ access_mode);
+ } else {
+ // TODO(victorgomes): polymorphic case.
+ return false;
}
- return false;
}
-bool MaglevGraphBuilder::TryBuildMonomorphicElementLoadFromSmiHandler(
- ValueNode* object, ValueNode* index, const compiler::MapRef& map,
- int32_t handler) {
- LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
+bool MaglevGraphBuilder::TryBuildElementAccess(
+ ValueNode* object, ValueNode* index,
+ compiler::ElementAccessFeedback const& feedback) {
+ // TODO(victorgomes): Implement other access modes.
+ if (feedback.keyed_mode().access_mode() != compiler::AccessMode::kLoad) {
+ return false;
+ }
- switch (kind) {
- case LoadHandler::Kind::kElement: {
- if (LoadHandler::AllowOutOfBoundsBits::decode(handler)) {
- return false;
- }
- ElementsKind elements_kind =
- LoadHandler::ElementsKindBits::decode(handler);
- if (!IsFastElementsKind(elements_kind)) return false;
+ // TODO(victorgomes): Add fast path for loading from HeapConstant.
+ // TODO(victorgomes): Add fast path for loading from String.
- // TODO(leszeks): Handle holey elements.
- if (IsHoleyElementsKind(elements_kind)) return false;
- DCHECK(!LoadHandler::ConvertHoleBits::decode(handler));
+ compiler::AccessInfoFactory access_info_factory(
+ broker(), broker()->dependencies(), zone());
+ ZoneVector<compiler::ElementAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputeElementAccessInfos(feedback, &access_infos) ||
+ access_infos.empty()) {
+ return false;
+ }
- BuildMapCheck(object, map);
- BuildCheckSmi(index);
+ // Check for monomorphic case.
+ if (access_infos.size() == 1) {
+ compiler::ElementAccessInfo access_info = access_infos.front();
- if (LoadHandler::IsJsArrayBits::decode(handler)) {
- DCHECK(map.IsJSArrayMap());
- AddNewNode<CheckJSArrayBounds>({object, index});
- } else {
- DCHECK(!map.IsJSArrayMap());
- DCHECK(map.IsJSObjectMap());
- AddNewNode<CheckJSObjectElementsBounds>({object, index});
+ // TODO(victorgomes): Support elment kind transitions.
+ if (access_info.transition_sources().size() != 0) return false;
+
+ // TODO(victorgomes): Support more elements kind.
+ ElementsKind elements_kind = access_info.elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return false;
+ if (IsHoleyElementsKind(elements_kind)) return false;
+
+ const compiler::MapRef& map =
+ access_info.lookup_start_object_maps().front();
+ BuildMapCheck(object, map);
+
+ switch (index->properties().value_representation()) {
+ case ValueRepresentation::kTagged: {
+ if (SmiConstant* constant = index->TryCast<SmiConstant>()) {
+ index = GetInt32Constant(constant->value().value());
+ } else {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(index);
+ if (node_info->is_smi()) {
+ if (!node_info->int32_alternative) {
+ // TODO(leszeks): This could be unchecked.
+ node_info->int32_alternative =
+ AddNewNode<CheckedSmiUntag>({index});
+ }
+ index = node_info->int32_alternative;
+ } else {
+ // TODO(leszeks): Cache this knowledge/converted value somehow on
+ // the node info.
+ index = AddNewNode<CheckedObjectToIndex>({index});
+ }
+ }
+ break;
}
- if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
- SetAccumulator(AddNewNode<LoadDoubleElement>({object, index}));
- } else {
- DCHECK(!IsDoubleElementsKind(elements_kind));
- SetAccumulator(AddNewNode<LoadTaggedElement>({object, index}));
+ case ValueRepresentation::kInt32: {
+ // Already good.
+ break;
+ }
+ case ValueRepresentation::kFloat64: {
+ // TODO(leszeks): Pass in the index register (probably the
+ // accumulator), so that we can save this truncation on there as a
+ // conversion node.
+ index = AddNewNode<CheckedTruncateFloat64ToInt32>({index});
+ break;
}
- return true;
}
- default:
- return false;
+
+ if (map.IsJSArrayMap()) {
+ AddNewNode<CheckJSArrayBounds>({object, index});
+ } else {
+ DCHECK(map.IsJSObjectMap());
+ AddNewNode<CheckJSObjectElementsBounds>({object, index});
+ }
+ if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
+ SetAccumulator(AddNewNode<LoadDoubleElement>({object, index}));
+ } else {
+ DCHECK(!IsDoubleElementsKind(elements_kind));
+ SetAccumulator(AddNewNode<LoadTaggedElement>({object, index}));
+ }
+ return true;
+
+ } else {
+ // TODO(victorgomes): polymorphic case.
+ return false;
}
}
@@ -1210,20 +1567,13 @@ void MaglevGraphBuilder::VisitGetNamedProperty() {
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
return;
- case compiler::ProcessedFeedback::kNamedAccess: {
- const compiler::NamedAccessFeedback& named_feedback =
- processed_feedback.AsNamedAccess();
- if (named_feedback.maps().size() != 1) break;
- compiler::MapRef map = named_feedback.maps()[0];
-
- // Monomorphic load, check the handler.
- // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
- MaybeObjectHandle handler =
- FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
-
- if (TryBuildMonomorphicLoad(object, object, map, handler)) return;
- } break;
-
+ case compiler::ProcessedFeedback::kNamedAccess:
+ if (TryBuildNamedAccess(object, object,
+ processed_feedback.AsNamedAccess(),
+ compiler::AccessMode::kLoad)) {
+ return;
+ }
+ break;
default:
break;
}
@@ -1258,20 +1608,13 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() {
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
return;
- case compiler::ProcessedFeedback::kNamedAccess: {
- const compiler::NamedAccessFeedback& named_feedback =
- processed_feedback.AsNamedAccess();
- if (named_feedback.maps().size() != 1) break;
- compiler::MapRef map = named_feedback.maps()[0];
-
- // Monomorphic load, check the handler.
- // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
- MaybeObjectHandle handler =
- FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
-
- if (TryBuildMonomorphicLoad(receiver, lookup_start_object, map, handler))
+ case compiler::ProcessedFeedback::kNamedAccess:
+ if (TryBuildNamedAccess(receiver, lookup_start_object,
+ processed_feedback.AsNamedAccess(),
+ compiler::AccessMode::kLoad)) {
return;
- } break;
+ }
+ break;
default:
break;
@@ -1288,7 +1631,6 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
ValueNode* object = LoadRegisterTagged(0);
// TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple
// monomorphic element load.
- ValueNode* key = GetAccumulatorTagged();
FeedbackSlot slot = GetSlotOperand(1);
compiler::FeedbackSource feedback_source{feedback(), slot};
@@ -1303,19 +1645,13 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
return;
case compiler::ProcessedFeedback::kElementAccess: {
- const compiler::ElementAccessFeedback& element_feedback =
- processed_feedback.AsElementAccess();
- if (element_feedback.transition_groups().size() != 1) break;
- if (element_feedback.transition_groups()[0].size() != 1) break;
- compiler::MapRef map = MakeRefAssumeMemoryFence(
- broker(), element_feedback.transition_groups()[0].front());
-
- // Monomorphic load, check the handler.
- // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
- MaybeObjectHandle handler =
- FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
-
- if (TryBuildMonomorphicElementLoad(object, key, map, handler)) return;
+ // Get the accumulator without conversion. TryBuildElementAccess
+ // will try to pick the best representation.
+ ValueNode* index = current_interpreter_frame_.accumulator();
+ if (TryBuildElementAccess(object, index,
+ processed_feedback.AsElementAccess())) {
+ return;
+ }
break;
}
@@ -1325,6 +1661,7 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
// Create a generic store in the fallthrough.
ValueNode* context = GetContext();
+ ValueNode* key = GetAccumulatorTagged();
SetAccumulator(
AddNewNode<GetKeyedGeneric>({context, object, key}, feedback_source));
}
@@ -1332,10 +1669,21 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
void MaglevGraphBuilder::VisitLdaModuleVariable() {
// LdaModuleVariable <cell_index> <depth>
int cell_index = iterator_.GetImmediateOperand(0);
- int depth = iterator_.GetUnsignedImmediateOperand(1);
+ size_t depth = iterator_.GetUnsignedImmediateOperand(1);
ValueNode* context = GetContext();
- for (int i = 0; i < depth; i++) {
+ MinimizeContextChainDepth(&context, &depth);
+
+ if (compilation_unit_->info()->specialize_to_function_context()) {
+ base::Optional<compiler::ContextRef> maybe_ref =
+ FunctionContextSpecialization::TryToRef(compilation_unit_, context,
+ &depth);
+ if (maybe_ref.has_value()) {
+ context = GetConstant(maybe_ref.value());
+ }
+ }
+
+ for (size_t i = 0; i < depth; i++) {
context = AddNewNode<LoadTaggedField>(
{context}, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX));
}
@@ -1366,9 +1714,21 @@ void MaglevGraphBuilder::VisitStaModuleVariable() {
AbortReason::kUnsupportedModuleOperation))});
return;
}
+
ValueNode* context = GetContext();
- int depth = iterator_.GetUnsignedImmediateOperand(1);
- for (int i = 0; i < depth; i++) {
+ size_t depth = iterator_.GetUnsignedImmediateOperand(1);
+ MinimizeContextChainDepth(&context, &depth);
+
+ if (compilation_unit_->info()->specialize_to_function_context()) {
+ base::Optional<compiler::ContextRef> maybe_ref =
+ FunctionContextSpecialization::TryToRef(compilation_unit_, context,
+ &depth);
+ if (maybe_ref.has_value()) {
+ context = GetConstant(maybe_ref.value());
+ }
+ }
+
+ for (size_t i = 0; i < depth; i++) {
context = AddNewNode<LoadTaggedField>(
{context}, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX));
}
@@ -1383,86 +1743,6 @@ void MaglevGraphBuilder::VisitStaModuleVariable() {
Cell::kValueOffset);
}
-bool MaglevGraphBuilder::TryBuildMonomorphicStoreFromSmiHandler(
- ValueNode* object, const compiler::MapRef& map, int32_t handler) {
- StoreHandler::Kind kind = StoreHandler::KindBits::decode(handler);
- if (kind != StoreHandler::Kind::kField) return false;
-
- Representation::Kind representation =
- StoreHandler::RepresentationBits::decode(handler);
- if (representation == Representation::kDouble) return false;
-
- InternalIndex descriptor_idx(StoreHandler::DescriptorBits::decode(handler));
- PropertyDetails property_details =
- map.instance_descriptors().GetPropertyDetails(descriptor_idx);
-
- // TODO(leszeks): Allow a fast path which checks for equality with the current
- // value.
- if (property_details.constness() == PropertyConstness::kConst) return false;
-
- BuildMapCheck(object, map);
-
- ValueNode* store_target;
- if (StoreHandler::IsInobjectBits::decode(handler)) {
- store_target = object;
- } else {
- // The field is in the property array, first Store it from there.
- store_target = AddNewNode<LoadTaggedField>(
- {object}, JSReceiver::kPropertiesOrHashOffset);
- }
-
- int field_index = StoreHandler::FieldIndexBits::decode(handler);
- int offset = field_index * kTaggedSize;
-
- ValueNode* value = GetAccumulatorTagged();
- if (representation == Representation::kSmi) {
- BuildCheckSmi(value);
- AddNewNode<StoreTaggedFieldNoWriteBarrier>({store_target, value}, offset);
- return true;
- }
-
- if (representation == Representation::kHeapObject) {
- FieldType descriptors_field_type =
- map.instance_descriptors().object()->GetFieldType(descriptor_idx);
- if (descriptors_field_type.IsNone()) {
- // Store is not safe if the field type was cleared. Since we check this
- // late, we'll emit a useless map check and maybe property store load, but
- // that's fine, this case should be rare.
- return false;
- }
-
- // Emit a map check for the field type, if needed, otherwise just a
- // HeapObject check.
- if (descriptors_field_type.IsClass()) {
- // Check that the value matches the expected field type.
- base::Optional<compiler::MapRef> maybe_field_map =
- TryMakeRef(broker(), descriptors_field_type.AsClass());
- if (!maybe_field_map.has_value()) return false;
-
- BuildMapCheck(value, *maybe_field_map);
- } else {
- BuildCheckHeapObject(value);
- }
- }
- AddNewNode<StoreTaggedFieldWithWriteBarrier>({store_target, value}, offset);
- return true;
-}
-
-bool MaglevGraphBuilder::TryBuildMonomorphicStore(ValueNode* object,
- const compiler::MapRef& map,
- MaybeObjectHandle handler) {
- if (handler.is_null()) return false;
-
- if (handler->IsSmi()) {
- return TryBuildMonomorphicStoreFromSmiHandler(object, map,
- handler->ToSmi().value());
- }
- // TODO(leszeks): If we add non-Smi paths here, make sure to differentiate
- // between Define and Set.
-
- return false;
-}
-
void MaglevGraphBuilder::BuildLoadGlobal(
compiler::NameRef name, compiler::FeedbackSource& feedback_source,
TypeofMode typeof_mode) {
@@ -1504,20 +1784,13 @@ void MaglevGraphBuilder::VisitSetNamedProperty() {
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
return;
- case compiler::ProcessedFeedback::kNamedAccess: {
- const compiler::NamedAccessFeedback& named_feedback =
- processed_feedback.AsNamedAccess();
- if (named_feedback.maps().size() != 1) break;
- compiler::MapRef map = named_feedback.maps()[0];
-
- // Monomorphic store, check the handler.
- // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
- MaybeObjectHandle handler =
- FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
-
- if (TryBuildMonomorphicStore(object, map, handler)) return;
- } break;
-
+ case compiler::ProcessedFeedback::kNamedAccess:
+ if (TryBuildNamedAccess(object, object,
+ processed_feedback.AsNamedAccess(),
+ compiler::AccessMode::kStore)) {
+ return;
+ }
+ break;
default:
break;
}
@@ -1546,19 +1819,13 @@ void MaglevGraphBuilder::VisitDefineNamedOwnProperty() {
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
return;
- case compiler::ProcessedFeedback::kNamedAccess: {
- const compiler::NamedAccessFeedback& named_feedback =
- processed_feedback.AsNamedAccess();
- if (named_feedback.maps().size() != 1) break;
- compiler::MapRef map = named_feedback.maps()[0];
-
- // Monomorphic store, check the handler.
- // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
- MaybeObjectHandle handler =
- FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
-
- if (TryBuildMonomorphicStore(object, map, handler)) return;
- } break;
+ case compiler::ProcessedFeedback::kNamedAccess:
+ if (TryBuildNamedAccess(object, object,
+ processed_feedback.AsNamedAccess(),
+ compiler::AccessMode::kDefine)) {
+ return;
+ }
+ break;
default:
break;
@@ -1644,14 +1911,6 @@ void MaglevGraphBuilder::VisitDefineKeyedOwnPropertyInLiteral() {
{object, name, value, flags, feedback_vector, slot}));
}
-void MaglevGraphBuilder::VisitCollectTypeProfile() {
- ValueNode* position = GetSmiConstant(GetFlag8Operand(0));
- ValueNode* value = GetAccumulatorTagged();
- ValueNode* feedback_vector = GetConstant(feedback());
- SetAccumulator(BuildCallRuntime(Runtime::kCollectTypeProfile,
- {position, value, feedback_vector}));
-}
-
void MaglevGraphBuilder::VisitAdd() { VisitBinaryOperation<Operation::kAdd>(); }
void MaglevGraphBuilder::VisitSub() {
VisitBinaryOperation<Operation::kSubtract>();
@@ -1802,9 +2061,15 @@ void MaglevGraphBuilder::VisitGetSuperConstructor() {
StoreRegister(iterator_.GetRegisterOperand(0), map_proto);
}
-void MaglevGraphBuilder::VisitFindNonDefaultConstructor() {
- // TODO(v8:13091): Implement.
- CHECK(false);
+void MaglevGraphBuilder::VisitFindNonDefaultConstructorOrConstruct() {
+ ValueNode* this_function = LoadRegisterTagged(0);
+ ValueNode* new_target = LoadRegisterTagged(1);
+
+ CallBuiltin* call_builtin =
+ BuildCallBuiltin<Builtin::kFindNonDefaultConstructorOrConstruct>(
+ {this_function, new_target});
+ auto result = iterator_.GetRegisterPairOperand(2);
+ StoreRegisterPair(result, call_builtin);
}
void MaglevGraphBuilder::InlineCallFromRegisters(
@@ -1826,8 +2091,7 @@ void MaglevGraphBuilder::InlineCallFromRegisters(
// Finish the current block with a jump to the inlined function.
BasicBlockRef start_ref, end_ref;
- BasicBlock* block = CreateBlock<JumpToInlined>({}, &start_ref, inner_unit);
- ResolveJumpsToBlockAtOffset(block, block_offset_);
+ BasicBlock* block = FinishBlock<JumpToInlined>({}, &start_ref, inner_unit);
// Manually create the prologue of the inner function graph, so that we
// can manually set up the arguments.
@@ -1877,10 +2141,7 @@ void MaglevGraphBuilder::InlineCallFromRegisters(
inner_graph_builder.ProcessMergePoint(
inner_graph_builder.inline_exit_offset());
inner_graph_builder.StartNewBlock(inner_graph_builder.inline_exit_offset());
- BasicBlock* end_block =
- inner_graph_builder.CreateBlock<JumpFromInlined>({}, &end_ref);
- inner_graph_builder.ResolveJumpsToBlockAtOffset(
- end_block, inner_graph_builder.inline_exit_offset());
+ inner_graph_builder.FinishBlock<JumpFromInlined>({}, &end_ref);
// Pull the returned accumulator value out of the inlined function's final
// merged return state.
@@ -1893,7 +2154,6 @@ void MaglevGraphBuilder::InlineCallFromRegisters(
current_block_ = zone()->New<BasicBlock>(MergePointInterpreterFrameState::New(
*compilation_unit_, current_interpreter_frame_,
iterator_.current_offset(), 1, block, GetInLiveness()));
- block_offset_ = iterator_.current_offset();
// Set the exit JumpFromInlined to jump to this resume block.
// TODO(leszeks): Passing start_ref to JumpFromInlined creates a two-element
// linked list of refs. Consider adding a helper to explicitly set the target
@@ -1952,7 +2212,7 @@ void MaglevGraphBuilder::BuildCallFromRegisters(
return;
case compiler::ProcessedFeedback::kCall: {
- if (!FLAG_maglev_inlining) break;
+ if (!v8_flags.maglev_inlining) break;
const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
CallFeedbackContent content = call_feedback.call_feedback_content();
@@ -2245,10 +2505,10 @@ void MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorResolve(
GetTaggedValue(args[2])}));
}
-void MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorYield(
+void MaglevGraphBuilder::VisitIntrinsicAsyncGeneratorYieldWithAwait(
interpreter::RegisterList args) {
DCHECK_EQ(args.register_count(), 3);
- SetAccumulator(BuildCallBuiltin<Builtin::kAsyncGeneratorYield>(
+ SetAccumulator(BuildCallBuiltin<Builtin::kAsyncGeneratorYieldWithAwait>(
{GetTaggedValue(args[0]), GetTaggedValue(args[1]),
GetTaggedValue(args[2])}));
}
@@ -2258,10 +2518,12 @@ void MaglevGraphBuilder::VisitConstruct() {
ValueNode* constructor = LoadRegisterTagged(0);
interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
ValueNode* context = GetContext();
+ FeedbackSlot slot = GetSlotOperand(3);
+ compiler::FeedbackSource feedback_source{feedback(), slot};
size_t input_count = args.register_count() + 1 + Construct::kFixedInputCount;
- Construct* construct =
- CreateNewNode<Construct>(input_count, constructor, new_target, context);
+ Construct* construct = CreateNewNode<Construct>(
+ input_count, feedback_source, constructor, new_target, context);
int arg_index = 0;
// Add undefined receiver.
construct->set_arg(arg_index++, GetRootConstant(RootIndex::kUndefinedValue));
@@ -2319,10 +2581,9 @@ void MaglevGraphBuilder::VisitTestInstanceOf() {
// TODO(victorgomes): Check feedback slot and a do static lookup for
// @@hasInstance.
- USE(feedback_source);
-
ValueNode* context = GetContext();
- SetAccumulator(AddNewNode<TestInstanceOf>({context, object, callable}));
+ SetAccumulator(
+ AddNewNode<TestInstanceOf>({context, object, callable}, feedback_source));
}
void MaglevGraphBuilder::VisitTestIn() {
@@ -2358,6 +2619,7 @@ void MaglevGraphBuilder::BuildToNumberOrToNumeric(Object::Conversion mode) {
UNREACHABLE();
case BinaryOperationHint::kNumber:
case BinaryOperationHint::kBigInt:
+ case BinaryOperationHint::kBigInt64:
AddNewNode<CheckNumber>({value}, mode);
break;
default:
@@ -2514,6 +2776,8 @@ void MaglevGraphBuilder::VisitCreateClosure() {
void MaglevGraphBuilder::VisitCreateBlockContext() {
// TODO(v8:7700): Inline allocation when context is small.
+ // TODO(v8:7700): Update TryGetParentContext if this ever emits its own Node
+ // type.
// CreateBlockContext <scope_info_idx>
ValueNode* scope_info = GetConstant(GetRefOperand<ScopeInfo>(0));
SetAccumulator(BuildCallRuntime(Runtime::kPushBlockContext, {scope_info}));
@@ -2521,6 +2785,8 @@ void MaglevGraphBuilder::VisitCreateBlockContext() {
void MaglevGraphBuilder::VisitCreateCatchContext() {
// TODO(v8:7700): Inline allocation when context is small.
+ // TODO(v8:7700): Update TryGetParentContext if this ever emits its own Node
+ // type.
// CreateCatchContext <exception> <scope_info_idx>
ValueNode* exception = LoadRegisterTagged(0);
ValueNode* scope_info = GetConstant(GetRefOperand<ScopeInfo>(1));
@@ -2536,6 +2802,8 @@ void MaglevGraphBuilder::VisitCreateFunctionContext() {
}
void MaglevGraphBuilder::VisitCreateEvalContext() {
+ // TODO(v8:7700): Update TryGetParentContext if this ever emits its own Node
+ // type.
compiler::ScopeInfoRef info = GetRefOperand<ScopeInfo>(0);
uint32_t slot_count = iterator_.GetUnsignedImmediateOperand(1);
if (slot_count <= static_cast<uint32_t>(
@@ -2593,10 +2861,7 @@ void MaglevGraphBuilder::VisitJumpLoop() {
BytecodeOffset(iterator_.current_offset()),
compilation_unit_);
BasicBlock* block =
- target == block_offset_
- ? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target])
- : FinishBlock<JumpLoop>(next_offset(), {},
- jump_targets_[target].block_ptr());
+ FinishBlock<JumpLoop>({}, jump_targets_[target].block_ptr());
merge_states_[target]->MergeLoop(*compilation_unit_,
current_interpreter_frame_, block, target);
@@ -2608,8 +2873,8 @@ void MaglevGraphBuilder::VisitJump() {
if (relative_jump_bytecode_offset > 0) {
AddNewNode<IncreaseInterruptBudget>({}, relative_jump_bytecode_offset);
}
- BasicBlock* block = FinishBlock<Jump>(
- next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
+ BasicBlock* block =
+ FinishBlock<Jump>({}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
DCHECK_LT(next_offset(), bytecode().length());
}
@@ -2663,7 +2928,7 @@ void MaglevGraphBuilder::MergeDeadIntoFrameState(int target) {
// If this merge is the last one which kills a loop merge, remove that
// merge state.
if (merge_states_[target]->is_unreachable_loop()) {
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << "! Killing loop merge state at @" << target << std::endl;
}
merge_states_[target] = nullptr;
@@ -2705,83 +2970,107 @@ void MaglevGraphBuilder::MergeIntoInlinedReturnFrameState(
}
void MaglevGraphBuilder::BuildBranchIfRootConstant(ValueNode* node,
- int true_target,
- int false_target,
+ JumpType jump_type,
RootIndex root_index) {
+ int fallthrough_offset = next_offset();
+ int jump_offset = iterator_.GetJumpTargetOffset();
+ BasicBlockRef* true_target = jump_type == kJumpIfTrue
+ ? &jump_targets_[jump_offset]
+ : &jump_targets_[fallthrough_offset];
+ BasicBlockRef* false_target = jump_type == kJumpIfFalse
+ ? &jump_targets_[jump_offset]
+ : &jump_targets_[fallthrough_offset];
BasicBlock* block = FinishBlock<BranchIfRootConstant>(
- next_offset(), {node}, &jump_targets_[true_target],
- &jump_targets_[false_target], root_index);
- MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ {node}, true_target, false_target, root_index);
+ if (jump_type == kJumpIfTrue) {
+ block->control_node()
+ ->Cast<BranchControlNode>()
+ ->set_true_interrupt_correction(
+ iterator_.GetRelativeJumpTargetOffset());
+ } else {
+ block->control_node()
+ ->Cast<BranchControlNode>()
+ ->set_false_interrupt_correction(
+ iterator_.GetRelativeJumpTargetOffset());
+ }
+ MergeIntoFrameState(block, jump_offset);
+ StartFallthroughBlock(fallthrough_offset, block);
}
-void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
- int false_target) {
- BuildBranchIfRootConstant(node, true_target, false_target,
- RootIndex::kTrueValue);
+void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node,
+ JumpType jump_type) {
+ BuildBranchIfRootConstant(node, jump_type, RootIndex::kTrueValue);
}
-void MaglevGraphBuilder::BuildBranchIfNull(ValueNode* node, int true_target,
- int false_target) {
- BuildBranchIfRootConstant(node, true_target, false_target,
- RootIndex::kNullValue);
+void MaglevGraphBuilder::BuildBranchIfNull(ValueNode* node,
+ JumpType jump_type) {
+ BuildBranchIfRootConstant(node, jump_type, RootIndex::kNullValue);
}
void MaglevGraphBuilder::BuildBranchIfUndefined(ValueNode* node,
- int true_target,
- int false_target) {
- BuildBranchIfRootConstant(node, true_target, false_target,
- RootIndex::kUndefinedValue);
+ JumpType jump_type) {
+ BuildBranchIfRootConstant(node, jump_type, RootIndex::kUndefinedValue);
}
void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
- int true_target,
- int false_target) {
- BasicBlock* block = FinishBlock<BranchIfToBooleanTrue>(
- next_offset(), {node}, &jump_targets_[true_target],
- &jump_targets_[false_target]);
- MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ JumpType jump_type) {
+ int fallthrough_offset = next_offset();
+ int jump_offset = iterator_.GetJumpTargetOffset();
+ BasicBlockRef* true_target = jump_type == kJumpIfTrue
+ ? &jump_targets_[jump_offset]
+ : &jump_targets_[fallthrough_offset];
+ BasicBlockRef* false_target = jump_type == kJumpIfFalse
+ ? &jump_targets_[jump_offset]
+ : &jump_targets_[fallthrough_offset];
+ BasicBlock* block =
+ FinishBlock<BranchIfToBooleanTrue>({node}, true_target, false_target);
+ if (jump_type == kJumpIfTrue) {
+ block->control_node()
+ ->Cast<BranchControlNode>()
+ ->set_true_interrupt_correction(
+ iterator_.GetRelativeJumpTargetOffset());
+ } else {
+ block->control_node()
+ ->Cast<BranchControlNode>()
+ ->set_false_interrupt_correction(
+ iterator_.GetRelativeJumpTargetOffset());
+ }
+ MergeIntoFrameState(block, jump_offset);
+ StartFallthroughBlock(fallthrough_offset, block);
}
void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
- BuildBranchIfToBooleanTrue(GetAccumulatorTagged(),
- iterator_.GetJumpTargetOffset(), next_offset());
+ BuildBranchIfToBooleanTrue(GetAccumulatorTagged(), kJumpIfTrue);
}
void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
- BuildBranchIfToBooleanTrue(GetAccumulatorTagged(), next_offset(),
- iterator_.GetJumpTargetOffset());
+ BuildBranchIfToBooleanTrue(GetAccumulatorTagged(), kJumpIfFalse);
}
void MaglevGraphBuilder::VisitJumpIfTrue() {
- BuildBranchIfTrue(GetAccumulatorTagged(), iterator_.GetJumpTargetOffset(),
- next_offset());
+ BuildBranchIfTrue(GetAccumulatorTagged(), kJumpIfTrue);
}
void MaglevGraphBuilder::VisitJumpIfFalse() {
- BuildBranchIfTrue(GetAccumulatorTagged(), next_offset(),
- iterator_.GetJumpTargetOffset());
+ BuildBranchIfTrue(GetAccumulatorTagged(), kJumpIfFalse);
}
void MaglevGraphBuilder::VisitJumpIfNull() {
- BuildBranchIfNull(GetAccumulatorTagged(), iterator_.GetJumpTargetOffset(),
- next_offset());
+ BuildBranchIfNull(GetAccumulatorTagged(), kJumpIfTrue);
}
void MaglevGraphBuilder::VisitJumpIfNotNull() {
- BuildBranchIfNull(GetAccumulatorTagged(), next_offset(),
- iterator_.GetJumpTargetOffset());
+ BuildBranchIfNull(GetAccumulatorTagged(), kJumpIfFalse);
}
void MaglevGraphBuilder::VisitJumpIfUndefined() {
- BuildBranchIfUndefined(GetAccumulatorTagged(),
- iterator_.GetJumpTargetOffset(), next_offset());
+ BuildBranchIfUndefined(GetAccumulatorTagged(), kJumpIfTrue);
}
void MaglevGraphBuilder::VisitJumpIfNotUndefined() {
- BuildBranchIfUndefined(GetAccumulatorTagged(), next_offset(),
- iterator_.GetJumpTargetOffset());
+ BuildBranchIfUndefined(GetAccumulatorTagged(), kJumpIfFalse);
}
void MaglevGraphBuilder::VisitJumpIfUndefinedOrNull() {
BasicBlock* block = FinishBlock<BranchIfUndefinedOrNull>(
- next_offset(), {GetAccumulatorTagged()},
- &jump_targets_[iterator_.GetJumpTargetOffset()],
+ {GetAccumulatorTagged()}, &jump_targets_[iterator_.GetJumpTargetOffset()],
&jump_targets_[next_offset()]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ StartFallthroughBlock(next_offset(), block);
}
void MaglevGraphBuilder::VisitJumpIfJSReceiver() {
BasicBlock* block = FinishBlock<BranchIfJSReceiver>(
- next_offset(), {GetAccumulatorTagged()},
- &jump_targets_[iterator_.GetJumpTargetOffset()],
+ {GetAccumulatorTagged()}, &jump_targets_[iterator_.GetJumpTargetOffset()],
&jump_targets_[next_offset()]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
+ StartFallthroughBlock(next_offset(), block);
}
void MaglevGraphBuilder::VisitSwitchOnSmiNoFeedback() {
@@ -2800,11 +3089,12 @@ void MaglevGraphBuilder::VisitSwitchOnSmiNoFeedback() {
ValueNode* case_value = GetAccumulatorInt32();
BasicBlock* block =
- FinishBlock<Switch>(next_offset(), {case_value}, case_value_base, targets,
+ FinishBlock<Switch>({case_value}, case_value_base, targets,
offsets.size(), &jump_targets_[next_offset()]);
for (interpreter::JumpTableTargetOffset offset : offsets) {
MergeIntoFrameState(block, offset.target_offset);
}
+ StartFallthroughBlock(next_offset(), block);
}
void MaglevGraphBuilder::VisitForInEnumerate() {
@@ -2889,7 +3179,7 @@ void MaglevGraphBuilder::VisitReturn() {
}
if (!is_inline()) {
- FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()});
+ FinishBlock<Return>({GetAccumulatorTagged()});
return;
}
@@ -2898,8 +3188,8 @@ void MaglevGraphBuilder::VisitReturn() {
// execution of the caller.
// TODO(leszeks): Consider shortcutting this Jump for cases where there is
// only one return and no need to merge return states.
- BasicBlock* block = FinishBlock<Jump>(next_offset(), {},
- &jump_targets_[inline_exit_offset()]);
+ BasicBlock* block =
+ FinishBlock<Jump>({}, &jump_targets_[inline_exit_offset()]);
MergeIntoInlinedReturnFrameState(block);
}
@@ -2955,22 +3245,26 @@ void MaglevGraphBuilder::VisitThrowIfNotSuperConstructor() {
void MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
// SwitchOnGeneratorState <generator> <table_start> <table_length>
// It should be the first bytecode in the bytecode array.
- DCHECK_EQ(block_offset_, 0);
- int generator_prologue_block_offset = block_offset_ + 1;
+ DCHECK_EQ(iterator_.current_offset(), 0);
+ int generator_prologue_block_offset = 1;
DCHECK_LT(generator_prologue_block_offset, next_offset());
+ interpreter::JumpTableTargetOffsets offsets =
+ iterator_.GetJumpTableTargetOffsets();
+ // If there are no jump offsets, then this generator is not resumable, which
+ // means we can skip checking for it and switching on its state.
+ if (offsets.size() == 0) return;
+
// We create an initial block that checks if the generator is undefined.
ValueNode* maybe_generator = LoadRegisterTagged(0);
- BasicBlock* block_is_generator_undefined = CreateBlock<BranchIfRootConstant>(
+ BasicBlock* block_is_generator_undefined = FinishBlock<BranchIfRootConstant>(
{maybe_generator}, &jump_targets_[next_offset()],
&jump_targets_[generator_prologue_block_offset],
RootIndex::kUndefinedValue);
MergeIntoFrameState(block_is_generator_undefined, next_offset());
- ResolveJumpsToBlockAtOffset(block_is_generator_undefined, block_offset_);
// We create the generator prologue block.
StartNewBlock(generator_prologue_block_offset);
- DCHECK_EQ(generator_prologue_block_offset, block_offset_);
// Generator prologue.
ValueNode* generator = maybe_generator;
@@ -2988,9 +3282,6 @@ void MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
interpreter::Register::virtual_accumulator());
// Switch on generator state.
- interpreter::JumpTableTargetOffsets offsets =
- iterator_.GetJumpTableTargetOffsets();
- DCHECK_NE(offsets.size(), 0);
int case_value_base = (*offsets.begin()).case_value;
BasicBlockRef* targets = zone()->NewArray<BasicBlockRef>(offsets.size());
for (interpreter::JumpTableTargetOffset offset : offsets) {
@@ -2998,12 +3289,11 @@ void MaglevGraphBuilder::VisitSwitchOnGeneratorState() {
new (ref) BasicBlockRef(&jump_targets_[offset.target_offset]);
}
ValueNode* case_value = AddNewNode<CheckedSmiUntag>({state});
- BasicBlock* generator_prologue_block = CreateBlock<Switch>(
+ BasicBlock* generator_prologue_block = FinishBlock<Switch>(
{case_value}, case_value_base, targets, offsets.size());
for (interpreter::JumpTableTargetOffset offset : offsets) {
MergeIntoFrameState(generator_prologue_block, offset.target_offset);
}
- ResolveJumpsToBlockAtOffset(generator_prologue_block, block_offset_);
}
void MaglevGraphBuilder::VisitSuspendGenerator() {
@@ -3035,7 +3325,7 @@ void MaglevGraphBuilder::VisitSuspendGenerator() {
if (relative_jump_bytecode_offset > 0) {
AddNewNode<ReduceInterruptBudget>({}, relative_jump_bytecode_offset);
}
- FinishBlock<Return>(next_offset(), {GetAccumulatorTagged()});
+ FinishBlock<Return>({GetAccumulatorTagged()});
}
void MaglevGraphBuilder::VisitResumeGenerator() {
@@ -3045,7 +3335,7 @@ void MaglevGraphBuilder::VisitResumeGenerator() {
{generator}, JSGeneratorObject::kParametersAndRegistersOffset);
interpreter::RegisterList registers = iterator_.GetRegisterListOperand(1);
- if (FLAG_maglev_assert) {
+ if (v8_flags.maglev_assert) {
// Check if register count is invalid, that is, larger than the
// register file length.
ValueNode* array_length_smi =
@@ -3104,6 +3394,4 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
#undef DEBUG_BREAK
void MaglevGraphBuilder::VisitIllegal() { UNREACHABLE(); }
-} // namespace maglev
-} // namespace internal
-} // namespace v8
+} // namespace v8::internal::maglev
diff --git a/deps/v8/src/maglev/maglev-graph-builder.h b/deps/v8/src/maglev/maglev-graph-builder.h
index 93634d79c3..621a23b015 100644
--- a/deps/v8/src/maglev/maglev-graph-builder.h
+++ b/deps/v8/src/maglev/maglev-graph-builder.h
@@ -16,7 +16,9 @@
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/processed-feedback.h"
#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/flags/flags.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/bytecode-register.h"
@@ -73,11 +75,14 @@ class MaglevGraphBuilder {
Graph* graph() const { return graph_; }
private:
- BasicBlock* CreateEmptyBlock(int offset, BasicBlock* predecessor) {
+ BasicBlock* CreateEmptyBlock(int offset) {
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << "== New empty block ==" << std::endl;
+ }
DCHECK_NULL(current_block_);
current_block_ = zone()->New<BasicBlock>(nullptr);
- BasicBlock* result = CreateBlock<Jump>({}, &jump_targets_[offset]);
- result->set_empty_block_predecessor(predecessor);
+ BasicBlock* result = FinishBlock<Jump>({}, &jump_targets_[offset]);
+ result->set_empty_block();
return result;
}
@@ -96,7 +101,7 @@ class MaglevGraphBuilder {
if (has_graph_labeller()) {
for (Phi* phi : *merge_states_[offset]->phis()) {
graph_labeller()->RegisterNode(phi);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " " << phi << " "
<< PrintNodeLabel(graph_labeller(), phi) << ": "
<< PrintNode(graph_labeller(), phi) << std::endl;
@@ -131,7 +136,7 @@ class MaglevGraphBuilder {
ControlNode* control = predecessor->control_node();
if (control->Is<ConditionalControlNode>()) {
// CreateEmptyBlock automatically registers itself with the offset.
- predecessor = CreateEmptyBlock(offset, predecessor);
+ predecessor = CreateEmptyBlock(offset);
// Set the old predecessor's (the conditional block) reference to
// point to the new empty predecessor block.
old_jump_targets =
@@ -147,7 +152,7 @@ class MaglevGraphBuilder {
if (has_graph_labeller()) {
for (Phi* phi : *merge_states_[offset]->phis()) {
graph_labeller()->RegisterNode(phi);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " " << phi << " "
<< PrintNodeLabel(graph_labeller(), phi) << ": "
<< PrintNode(graph_labeller(), phi) << std::endl;
@@ -166,15 +171,13 @@ class MaglevGraphBuilder {
void EmitUnconditionalDeopt(DeoptimizeReason reason) {
// Create a block rather than calling finish, since we don't yet know the
// next block's offset before the loop skipping the rest of the bytecodes.
- BasicBlock* block = CreateBlock<Deopt>({}, reason);
- ResolveJumpsToBlockAtOffset(block, block_offset_);
-
+ FinishBlock<Deopt>({}, reason);
MarkBytecodeDead();
}
void MarkBytecodeDead() {
DCHECK_NULL(current_block_);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << "== Dead ==\n"
<< std::setw(4) << iterator_.current_offset() << " : ";
interpreter::BytecodeDecoder::Decode(std::cout,
@@ -207,31 +210,6 @@ class MaglevGraphBuilder {
// Any other bytecode that doesn't return or throw will merge into the
// fallthrough.
MergeDeadIntoFrameState(iterator_.next_offset());
- } else if (bytecode == interpreter::Bytecode::kSuspendGenerator) {
- // Extra special case for SuspendGenerator, if the suspend is dead then
- // the resume has to be dead too. However, the resume already has a merge
- // state, with exactly one predecessor (the generator switch), so it will
- // be revived along the standard path. This can cause havoc if e.g. the
- // suspend/resume are inside a dead loop, because the JumpLoop can become
- // live again.
- //
- // So, manually advance the iterator to the resume, go through the motions
- // of processing the merge state, but immediately emit an unconditional
- // deopt (which also kills the resume).
- iterator_.Advance();
- DCHECK_EQ(iterator_.current_bytecode(),
- interpreter::Bytecode::kResumeGenerator);
- int resume_offset = iterator_.current_offset();
- DCHECK_EQ(NumPredecessors(resume_offset), 1);
- ProcessMergePoint(resume_offset);
- StartNewBlock(resume_offset);
- // TODO(v8:7700): This approach is not ideal. We can create a deopt-reopt
- // loop: the interpreted code runs, creates a generator while feedback is
- // still not yet allocated, then suspends the generator, tiers up to
- // maglev, and reaches this deopt. We then deopt, but since the generator
- // is never created again, we re-opt without the suspend part and we loop!
- EmitUnconditionalDeopt(DeoptimizeReason::kSuspendGeneratorIsDead);
- return;
}
// TODO(leszeks): We could now continue iterating the bytecode
@@ -245,12 +223,11 @@ class MaglevGraphBuilder {
// TODO(leszeks): Re-evaluate this DCHECK, we might hit it if the only
// bytecodes in this basic block were only register juggling.
// DCHECK(!current_block_->nodes().is_empty());
- FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
-
+ BasicBlock* predecessor = FinishBlock<Jump>({}, &jump_targets_[offset]);
merge_state->Merge(*compilation_unit_, current_interpreter_frame_,
- graph()->last_block(), offset);
+ predecessor, offset);
}
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
auto detail = merge_state->is_exception_handler() ? "exception handler"
: merge_state->is_loop() ? "loop header"
: "merge";
@@ -316,7 +293,7 @@ class MaglevGraphBuilder {
}
DCHECK_NOT_NULL(current_block_);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << std::setw(4) << iterator_.current_offset() << " : ";
interpreter::BytecodeDecoder::Decode(std::cout,
iterator_.current_address());
@@ -352,7 +329,7 @@ class MaglevGraphBuilder {
}
current_block_->nodes().Add(node);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " " << node << " "
<< PrintNodeLabel(graph_labeller(), node) << ": "
<< PrintNode(graph_labeller(), node) << std::endl;
@@ -408,6 +385,13 @@ class MaglevGraphBuilder {
return node;
}
+ enum ContextSlotMutability { kImmutable, kMutable };
+ bool TrySpecializeLoadContextSlotToFunctionContext(
+ ValueNode** context, size_t* depth, int slot_index,
+ ContextSlotMutability slot_mutability);
+ void BuildLoadContextSlot(ValueNode* context, size_t depth, int slot_index,
+ ContextSlotMutability slot_mutability);
+
template <Builtin kBuiltin>
CallBuiltin* BuildCallBuiltin(std::initializer_list<ValueNode*> inputs) {
using Descriptor = typename CallInterfaceDescriptorFor<kBuiltin>::type;
@@ -466,8 +450,7 @@ class MaglevGraphBuilder {
void BuildAbort(AbortReason reason) {
// Create a block rather than calling finish, since we don't yet know the
// next block's offset before the loop skipping the rest of the bytecodes.
- BasicBlock* block = CreateBlock<Abort>({}, reason);
- ResolveJumpsToBlockAtOffset(block, block_offset_);
+ FinishBlock<Abort>({}, reason);
MarkBytecodeDead();
}
@@ -621,78 +604,52 @@ class MaglevGraphBuilder {
current_interpreter_frame_.set(dst, current_interpreter_frame_.get(src));
}
- template <typename NodeT>
- ValueNode* AddNewConversionNode(interpreter::Register reg, ValueNode* node) {
- // TODO(v8:7700): Use a canonical conversion node. Maybe like in Phi nodes
- // where we always add a the conversion immediately after the ValueNode.
- DCHECK(NodeT::kProperties.is_conversion());
- ValueNode* result = AddNewNode<NodeT>({node});
- current_interpreter_frame_.set(reg, result);
- return result;
- }
-
- ValueNode* GetTaggedValueHelper(interpreter::Register reg, ValueNode* value) {
- // TODO(victorgomes): Consider adding the representation in the
- // InterpreterFrameState, so that we don't need to derefence a node.
+ ValueNode* GetTaggedValue(interpreter::Register reg) {
+ ValueNode* value = current_interpreter_frame_.get(reg);
switch (value->properties().value_representation()) {
case ValueRepresentation::kTagged:
return value;
case ValueRepresentation::kInt32: {
- if (value->Is<CheckedSmiUntag>()) {
- return value->input(0).node();
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->tagged_alternative == nullptr) {
+ node_info->tagged_alternative = AddNewNode<CheckedSmiTag>({value});
}
- return AddNewConversionNode<CheckedSmiTag>(reg, value);
+ return node_info->tagged_alternative;
}
case ValueRepresentation::kFloat64: {
- if (value->Is<CheckedFloat64Unbox>()) {
- return value->input(0).node();
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->tagged_alternative == nullptr) {
+ node_info->tagged_alternative = AddNewNode<Float64Box>({value});
}
- if (value->Is<ChangeInt32ToFloat64>()) {
- ValueNode* int32_value = value->input(0).node();
- return GetTaggedValueHelper(reg, int32_value);
- }
- return AddNewConversionNode<Float64Box>(reg, value);
+ return node_info->tagged_alternative;
}
}
UNREACHABLE();
}
- ValueNode* GetTaggedValue(interpreter::Register reg) {
- ValueNode* value = current_interpreter_frame_.get(reg);
- return GetTaggedValueHelper(reg, value);
- }
-
- template <typename ConversionNodeT>
- ValueNode* GetValue(interpreter::Register reg) {
- ValueNode* value = current_interpreter_frame_.get(reg);
- return AddNewConversionNode<ConversionNodeT>(reg, value);
- }
-
ValueNode* GetInt32(interpreter::Register reg) {
ValueNode* value = current_interpreter_frame_.get(reg);
switch (value->properties().value_representation()) {
case ValueRepresentation::kTagged: {
- if (value->Is<CheckedSmiTag>()) {
- return value->input(0).node();
- } else if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
+ if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
return GetInt32Constant(constant->value().value());
}
- return AddNewConversionNode<CheckedSmiUntag>(reg, value);
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative == nullptr) {
+ node_info->int32_alternative = AddNewNode<CheckedSmiUntag>({value});
+ }
+ return node_info->int32_alternative;
}
case ValueRepresentation::kInt32:
return value;
- case ValueRepresentation::kFloat64:
- // We should not be able to request an Int32 from a Float64 input,
- // unless it's an unboxing of a tagged value or a conversion from int32.
- if (value->Is<CheckedFloat64Unbox>()) {
- // TODO(leszeks): Maybe convert the CheckedFloat64Unbox to
- // ChangeInt32ToFloat64 with this CheckedSmiUntag as the input.
- return AddNewConversionNode<CheckedSmiUntag>(reg,
- value->input(0).node());
- } else if (value->Is<ChangeInt32ToFloat64>()) {
- return value->input(0).node();
+ case ValueRepresentation::kFloat64: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->int32_alternative == nullptr) {
+ node_info->int32_alternative =
+ AddNewNode<CheckedTruncateFloat64ToInt32>({value});
}
- UNREACHABLE();
+ return node_info->int32_alternative;
+ }
}
UNREACHABLE();
}
@@ -701,25 +658,27 @@ class MaglevGraphBuilder {
ValueNode* value = current_interpreter_frame_.get(reg);
switch (value->properties().value_representation()) {
case ValueRepresentation::kTagged: {
- if (value->Is<Float64Box>()) {
- return value->input(0).node();
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->float64_alternative == nullptr) {
+ node_info->float64_alternative =
+ AddNewNode<CheckedFloat64Unbox>({value});
}
- return AddNewConversionNode<CheckedFloat64Unbox>(reg, value);
+ return node_info->float64_alternative;
+ }
+ case ValueRepresentation::kInt32: {
+ NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
+ if (node_info->float64_alternative == nullptr) {
+ node_info->float64_alternative =
+ AddNewNode<ChangeInt32ToFloat64>({value});
+ }
+ return node_info->float64_alternative;
}
- case ValueRepresentation::kInt32:
- return AddNewConversionNode<ChangeInt32ToFloat64>(reg, value);
case ValueRepresentation::kFloat64:
return value;
}
UNREACHABLE();
}
- template <typename ConversionNodeT>
- ValueNode* GetAccumulator() {
- return GetValue<ConversionNodeT>(
- interpreter::Register::virtual_accumulator());
- }
-
ValueNode* GetAccumulatorTagged() {
return GetTaggedValue(interpreter::Register::virtual_accumulator());
}
@@ -738,12 +697,6 @@ class MaglevGraphBuilder {
current_interpreter_frame_.accumulator();
}
- template <typename ConversionNodeT>
- ValueNode* LoadRegister(int operand_index) {
- return GetValue<ConversionNodeT>(
- iterator_.GetRegisterOperand(operand_index));
- }
-
ValueNode* LoadRegisterTagged(int operand_index) {
return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
}
@@ -776,6 +729,8 @@ class MaglevGraphBuilder {
// would be emitted between these two nodes.
if (result->opcode() == Opcode::kCallRuntime) {
DCHECK_EQ(result->Cast<CallRuntime>()->ReturnCount(), 2);
+ } else if (result->opcode() == Opcode::kCallBuiltin) {
+ DCHECK_EQ(result->Cast<CallBuiltin>()->ReturnCount(), 2);
} else {
DCHECK_EQ(result->opcode(), Opcode::kForInPrepare);
}
@@ -886,11 +841,11 @@ class MaglevGraphBuilder {
void StartNewBlock(int offset) {
DCHECK_NULL(current_block_);
current_block_ = zone()->New<BasicBlock>(merge_states_[offset]);
- block_offset_ = offset;
+ ResolveJumpsToBlockAtOffset(current_block_, offset);
}
template <typename ControlNodeT, typename... Args>
- BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
+ BasicBlock* FinishBlock(std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
ControlNode* control_node = CreateNewNode<ControlNodeT>(
control_inputs, std::forward<Args>(args)...);
@@ -902,7 +857,7 @@ class MaglevGraphBuilder {
graph()->Add(block);
if (has_graph_labeller()) {
graph_labeller()->RegisterBasicBlock(block);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
bool kSkipTargets = true;
std::cout << " " << control_node << " "
<< PrintNodeLabel(graph_labeller(), control_node) << ": "
@@ -915,40 +870,40 @@ class MaglevGraphBuilder {
// Update all jumps which were targetting the not-yet-created block at the
// given `block_offset`, to now point to the given `block`.
- void ResolveJumpsToBlockAtOffset(BasicBlock* block, int block_offset) const {
+ void ResolveJumpsToBlockAtOffset(BasicBlock* block, int block_offset) {
+ int interrupt_budget_correction = 0;
BasicBlockRef* jump_target_refs_head =
jump_targets_[block_offset].SetToBlockAndReturnNext(block);
while (jump_target_refs_head != nullptr) {
+ // Only one jump target should ever set the interrupt budget correction.
+ DCHECK_EQ(interrupt_budget_correction, 0);
+ interrupt_budget_correction =
+ jump_target_refs_head->interrupt_budget_correction();
jump_target_refs_head =
jump_target_refs_head->SetToBlockAndReturnNext(block);
}
+ if (interrupt_budget_correction != 0) {
+ DCHECK_GT(interrupt_budget_correction, 0);
+ AddNewNode<IncreaseInterruptBudget>({}, interrupt_budget_correction);
+ }
DCHECK_EQ(jump_targets_[block_offset].block_ptr(), block);
}
- template <typename ControlNodeT, typename... Args>
- BasicBlock* FinishBlock(int next_block_offset,
- std::initializer_list<ValueNode*> control_inputs,
- Args&&... args) {
- BasicBlock* block =
- CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
- ResolveJumpsToBlockAtOffset(block, block_offset_);
-
+ void StartFallthroughBlock(int next_block_offset, BasicBlock* predecessor) {
// Start a new block for the fallthrough path, unless it's a merge point, in
// which case we merge our state into it. That merge-point could also be a
// loop header, in which case the merge state might not exist yet (if the
// only predecessors are this path and the JumpLoop).
DCHECK_NULL(current_block_);
- if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
- if (NumPredecessors(next_block_offset) == 1) {
- if (FLAG_trace_maglev_graph_building) {
- std::cout << "== New block (single fallthrough) ==" << std::endl;
- }
- StartNewBlock(next_block_offset);
- } else {
- MergeIntoFrameState(block, next_block_offset);
+
+ if (NumPredecessors(next_block_offset) == 1) {
+ if (v8_flags.trace_maglev_graph_building) {
+ std::cout << "== New block (single fallthrough) ==" << std::endl;
}
+ StartNewBlock(next_block_offset);
+ } else {
+ MergeIntoFrameState(predecessor, next_block_offset);
}
- return block;
}
void InlineCallFromRegisters(int argc_count,
@@ -968,32 +923,33 @@ class MaglevGraphBuilder {
void BuildCheckSymbol(ValueNode* object);
void BuildMapCheck(ValueNode* object, const compiler::MapRef& map);
- bool TryBuildMonomorphicLoad(ValueNode* receiver,
- ValueNode* lookup_start_object,
- const compiler::MapRef& map,
- MaybeObjectHandle handler);
- bool TryBuildMonomorphicLoadFromSmiHandler(ValueNode* receiver,
- ValueNode* lookup_start_object,
- const compiler::MapRef& map,
- int32_t handler);
- bool TryBuildMonomorphicLoadFromLoadHandler(ValueNode* receiver,
- ValueNode* lookup_start_object,
- const compiler::MapRef& map,
- LoadHandler handler);
-
- bool TryBuildMonomorphicElementLoad(ValueNode* object, ValueNode* index,
- const compiler::MapRef& map,
- MaybeObjectHandle handler);
- bool TryBuildMonomorphicElementLoadFromSmiHandler(ValueNode* object,
- ValueNode* index,
- const compiler::MapRef& map,
- int32_t handler);
-
- bool TryBuildMonomorphicStore(ValueNode* object, const compiler::MapRef& map,
- MaybeObjectHandle handler);
- bool TryBuildMonomorphicStoreFromSmiHandler(ValueNode* object,
- const compiler::MapRef& map,
- int32_t handler);
+ bool TryFoldLoadDictPrototypeConstant(
+ compiler::PropertyAccessInfo access_info);
+ bool TryFoldLoadConstantDataField(compiler::PropertyAccessInfo access_info);
+
+ void BuildLoadField(compiler::PropertyAccessInfo access_info,
+ ValueNode* lookup_start_object);
+ bool TryBuildStoreField(compiler::PropertyAccessInfo access_info,
+ ValueNode* receiver);
+ bool TryBuildPropertyGetterCall(compiler::PropertyAccessInfo access_info,
+ ValueNode* receiver);
+ bool TryBuildPropertySetterCall(compiler::PropertyAccessInfo access_info,
+ ValueNode* receiver, ValueNode* value);
+
+ bool TryBuildPropertyLoad(ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::PropertyAccessInfo const& access_info);
+ bool TryBuildPropertyStore(ValueNode* receiver,
+ compiler::PropertyAccessInfo const& access_info);
+ bool TryBuildPropertyAccess(ValueNode* receiver,
+ ValueNode* lookup_start_object,
+ compiler::PropertyAccessInfo const& access_info,
+ compiler::AccessMode access_mode);
+
+ bool TryBuildNamedAccess(ValueNode* receiver, ValueNode* lookup_start_object,
+ compiler::NamedAccessFeedback const& feedback,
+ compiler::AccessMode access_mode);
+ bool TryBuildElementAccess(ValueNode* object, ValueNode* index,
+ compiler::ElementAccessFeedback const& feedback);
template <Operation kOperation>
void BuildGenericUnaryOperationNode();
@@ -1035,14 +991,14 @@ class MaglevGraphBuilder {
void MergeDeadIntoFrameState(int target);
void MergeDeadLoopIntoFrameState(int target);
void MergeIntoInlinedReturnFrameState(BasicBlock* block);
- void BuildBranchIfRootConstant(ValueNode* node, int true_target,
- int false_target, RootIndex root_index);
- void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
- void BuildBranchIfNull(ValueNode* node, int true_target, int false_target);
- void BuildBranchIfUndefined(ValueNode* node, int true_target,
- int false_target);
- void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target,
- int false_target);
+
+ enum JumpType { kJumpIfTrue, kJumpIfFalse };
+ void BuildBranchIfRootConstant(ValueNode* node, JumpType jump_type,
+ RootIndex root_index);
+ void BuildBranchIfTrue(ValueNode* node, JumpType jump_type);
+ void BuildBranchIfNull(ValueNode* node, JumpType jump_type);
+ void BuildBranchIfUndefined(ValueNode* node, JumpType jump_type);
+ void BuildBranchIfToBooleanTrue(ValueNode* node, JumpType jump_type);
void BuildToNumberOrToNumeric(Object::Conversion mode);
@@ -1136,7 +1092,6 @@ class MaglevGraphBuilder {
// Current block information.
BasicBlock* current_block_ = nullptr;
- int block_offset_ = 0;
base::Optional<CheckpointedInterpreterState> latest_checkpointed_state_;
BasicBlockRef* jump_targets_;
diff --git a/deps/v8/src/maglev/maglev-graph-printer.cc b/deps/v8/src/maglev/maglev-graph-printer.cc
index 3f4cec406c..02e809b73a 100644
--- a/deps/v8/src/maglev/maglev-graph-printer.cc
+++ b/deps/v8/src/maglev/maglev-graph-printer.cc
@@ -45,7 +45,7 @@ void PrintPaddedId(std::ostream& os, MaglevGraphLabeller* graph_labeller,
for (int i = 0; i < padding_width; ++i) {
os << padding;
}
- if (FLAG_log_colour) os << "\033[0m";
+ if (v8_flags.log_colour) os << "\033[0m";
if (node->has_id()) {
os << node->id() << "/";
}
@@ -158,7 +158,7 @@ void PrintVerticalArrows(std::ostream& os,
desired_color = (i % 6) + 1;
c.AddVertical();
}
- if (FLAG_log_colour && desired_color != current_color &&
+ if (v8_flags.log_colour && desired_color != current_color &&
desired_color != -1) {
os << "\033[0;3" << desired_color << "m";
current_color = desired_color;
@@ -167,7 +167,7 @@ void PrintVerticalArrows(std::ostream& os,
}
// If there are no arrows starting here, clear the color. Otherwise,
// PrintPaddedId will clear it.
- if (FLAG_log_colour && arrows_starting_here.empty() &&
+ if (v8_flags.log_colour && arrows_starting_here.empty() &&
targets_starting_here.empty()) {
os << "\033[0m";
}
@@ -342,7 +342,7 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(BasicBlock* block) {
desired_color = (i % 6) + 1;
c.AddVertical();
}
- if (FLAG_log_colour && current_color != desired_color &&
+ if (v8_flags.log_colour && current_color != desired_color &&
desired_color != -1) {
os_ << "\033[0;3" << desired_color << "m";
current_color = desired_color;
@@ -350,7 +350,7 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(BasicBlock* block) {
os_ << c;
}
os_ << (saw_start ? "►" : " ");
- if (FLAG_log_colour) os_ << "\033[0m";
+ if (v8_flags.log_colour) os_ << "\033[0m";
}
int block_id = graph_labeller_->BlockId(block);
@@ -429,8 +429,8 @@ void PrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
} else {
os << PrintNodeLabel(graph_labeller, node) << ":"
<< deopt_info->input_locations[index].operand();
+ index++;
}
- index++;
});
os << "}\n";
}
diff --git a/deps/v8/src/maglev/maglev-graph-verifier.h b/deps/v8/src/maglev/maglev-graph-verifier.h
index af7c716c79..5675d53be5 100644
--- a/deps/v8/src/maglev/maglev-graph-verifier.h
+++ b/deps/v8/src/maglev/maglev-graph-verifier.h
@@ -86,6 +86,7 @@ class MaglevGraphVerifier {
case Opcode::kCreateObjectLiteral:
case Opcode::kCreateShallowObjectLiteral:
case Opcode::kCreateRegExpLiteral:
+ case Opcode::kDebugBreak:
case Opcode::kDeopt:
case Opcode::kFloat64Constant:
case Opcode::kGapMove:
@@ -122,6 +123,7 @@ class MaglevGraphVerifier {
case Opcode::kCheckString:
case Opcode::kCheckSymbol:
case Opcode::kCheckedInternalizedString:
+ case Opcode::kCheckedObjectToIndex:
// TODO(victorgomes): Can we check that the input is Boolean?
case Opcode::kBranchIfToBooleanTrue:
case Opcode::kBranchIfRootConstant:
@@ -135,6 +137,7 @@ class MaglevGraphVerifier {
case Opcode::kGetTemplateObject:
case Opcode::kLogicalNot:
case Opcode::kSetPendingMessage:
+ case Opcode::kStringLength:
case Opcode::kToBooleanLogicalNot:
case Opcode::kTestUndetectable:
case Opcode::kTestTypeOf:
@@ -147,11 +150,13 @@ class MaglevGraphVerifier {
break;
case Opcode::kSwitch:
case Opcode::kCheckedSmiTag:
+ case Opcode::kUnsafeSmiTag:
case Opcode::kChangeInt32ToFloat64:
DCHECK_EQ(node->input_count(), 1);
CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
break;
case Opcode::kFloat64Box:
+ case Opcode::kCheckedTruncateFloat64ToInt32:
DCHECK_EQ(node->input_count(), 1);
CheckValueInputIs(node, 0, ValueRepresentation::kFloat64);
break;
@@ -176,10 +181,6 @@ class MaglevGraphVerifier {
case Opcode::kGenericLessThan:
case Opcode::kGenericLessThanOrEqual:
case Opcode::kGenericStrictEqual:
- case Opcode::kCheckJSArrayBounds:
- case Opcode::kCheckJSObjectElementsBounds:
- case Opcode::kLoadTaggedElement:
- case Opcode::kLoadDoubleElement:
case Opcode::kGetIterator:
case Opcode::kTaggedEqual:
case Opcode::kTaggedNotEqual:
@@ -275,6 +276,14 @@ class MaglevGraphVerifier {
CheckValueInputIs(node, i, ValueRepresentation::kTagged);
}
break;
+ case Opcode::kCheckJSArrayBounds:
+ case Opcode::kCheckJSObjectElementsBounds:
+ case Opcode::kLoadTaggedElement:
+ case Opcode::kLoadDoubleElement:
+ DCHECK_EQ(node->input_count(), 2);
+ CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
+ CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
+ break;
case Opcode::kCallBuiltin: {
CallBuiltin* call_builtin = node->Cast<CallBuiltin>();
auto descriptor =
diff --git a/deps/v8/src/maglev/maglev-interpreter-frame-state.h b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
index 8ddda35edd..2db6a4ee3e 100644
--- a/deps/v8/src/maglev/maglev-interpreter-frame-state.h
+++ b/deps/v8/src/maglev/maglev-interpreter-frame-state.h
@@ -55,6 +55,11 @@ void DestructivelyIntersect(ZoneMap<ValueNode*, Value>& lhs_map,
++rhs_it;
}
}
+ // If we haven't reached the end of LHS by now, then we have reached the end
+ // of RHS, and the remaining items are therefore not in RHS. Remove them.
+ if (lhs_it != lhs_map.end()) {
+ lhs_map.erase(lhs_it, lhs_map.end());
+ }
}
// The intersection (using `&`) of any two NodeTypes must be a valid NodeType
@@ -72,34 +77,52 @@ enum class NodeType {
kHeapObjectWithKnownMap = (1 << 5) | kAnyHeapObject,
};
+inline bool NodeTypeIsSmi(NodeType type) { return type == NodeType::kSmi; }
+inline bool NodeTypeIsAnyHeapObject(NodeType type) {
+ return static_cast<int>(type) & static_cast<int>(NodeType::kAnyHeapObject);
+}
+inline bool NodeTypeIsString(NodeType type) {
+ return type == NodeType::kString;
+}
+inline bool NodeTypeIsSymbol(NodeType type) {
+ return type == NodeType::kSymbol;
+}
+
struct NodeInfo {
- NodeType type;
- // TODO(leszeks): Consider adding more info for nodes here, e.g. alternative
- // representations or previously loaded fields.
+ NodeType type = NodeType::kUnknown;
- static bool IsSmi(const NodeInfo* info) {
- if (!info) return false;
- return info->type == NodeType::kSmi;
- }
- static bool IsAnyHeapObject(const NodeInfo* info) {
- if (!info) return false;
- return static_cast<int>(info->type) &
- static_cast<int>(NodeType::kAnyHeapObject);
- }
- static bool IsString(const NodeInfo* info) {
- if (!info) return false;
- return info->type == NodeType::kString;
- }
- static bool IsSymbol(const NodeInfo* info) {
- if (!info) return false;
- return info->type == NodeType::kSymbol;
+ // Optional alternative nodes with the equivalent value but a different
+ // representation.
+ // TODO(leszeks): At least one of these is redundant for every node, consider
+ // a more compressed form or even linked list.
+ ValueNode* tagged_alternative = nullptr;
+ ValueNode* int32_alternative = nullptr;
+ ValueNode* float64_alternative = nullptr;
+
+ bool is_empty() {
+ return type == NodeType::kUnknown && tagged_alternative == nullptr &&
+ int32_alternative == nullptr && float64_alternative == nullptr;
}
+ bool is_smi() const { return NodeTypeIsSmi(type); }
+ bool is_any_heap_object() const { return NodeTypeIsAnyHeapObject(type); }
+ bool is_string() const { return NodeTypeIsString(type); }
+ bool is_symbol() const { return NodeTypeIsSymbol(type); }
+
// Mutate this node info by merging in another node info, with the result
// being a node info that is the subset of information valid in both inputs.
void MergeWith(const NodeInfo& other) {
type = static_cast<NodeType>(static_cast<int>(type) &
static_cast<int>(other.type));
+ tagged_alternative = tagged_alternative == other.tagged_alternative
+ ? tagged_alternative
+ : nullptr;
+ int32_alternative = int32_alternative == other.int32_alternative
+ ? int32_alternative
+ : nullptr;
+ float64_alternative = float64_alternative == other.float64_alternative
+ ? float64_alternative
+ : nullptr;
}
};
@@ -131,28 +154,13 @@ struct KnownNodeAspects {
return clone;
}
- NodeInfo* GetInfoFor(ValueNode* node) {
- auto it = node_infos.find(node);
- if (it == node_infos.end()) return nullptr;
- return &it->second;
- }
-
- void InsertOrUpdateNodeType(ValueNode* node, NodeInfo* existing_info,
- NodeType new_type) {
- if (existing_info == nullptr) {
- DCHECK_EQ(node_infos.find(node), node_infos.end());
- node_infos.emplace(node, NodeInfo{new_type});
- } else {
- DCHECK_EQ(&node_infos.find(node)->second, existing_info);
- existing_info->type = new_type;
- }
- }
+ NodeInfo* GetOrCreateInfoFor(ValueNode* node) { return &node_infos[node]; }
void Merge(const KnownNodeAspects& other) {
DestructivelyIntersect(node_infos, other.node_infos,
[](NodeInfo& lhs, const NodeInfo& rhs) {
lhs.MergeWith(rhs);
- return lhs.type != NodeType::kUnknown;
+ return !lhs.is_empty();
});
DestructivelyIntersect(stable_maps, other.stable_maps,
[](compiler::MapRef lhs, compiler::MapRef rhs) {
@@ -186,6 +194,8 @@ class InterpreterFrameState {
const MergePointInterpreterFrameState& state);
void set_accumulator(ValueNode* value) {
+ // Conversions should be stored in known_node_aspects/NodeInfo.
+ DCHECK(!value->properties().is_conversion());
frame_[interpreter::Register::virtual_accumulator()] = value;
}
ValueNode* accumulator() const {
@@ -198,6 +208,8 @@ class InterpreterFrameState {
reg == interpreter::Register::function_closure() ||
reg == interpreter::Register::virtual_accumulator() ||
reg.ToParameterIndex() >= 0);
+ // Conversions should be stored in known_node_aspects/NodeInfo.
+ DCHECK(!value->properties().is_conversion());
frame_[reg] = value;
}
ValueNode* get(interpreter::Register reg) const {
@@ -444,11 +456,12 @@ class MergePointInterpreterFrameState {
});
merge_state->predecessors_[0] = predecessor;
merge_state->known_node_aspects_ =
- info.zone()->New<KnownNodeAspects>(info.zone());
+ state.known_node_aspects().Clone(info.zone());
return merge_state;
}
static MergePointInterpreterFrameState* NewForLoop(
+ const InterpreterFrameState& start_state,
const MaglevCompilationUnit& info, int merge_offset,
int predecessor_count, const compiler::BytecodeLivenessState* liveness,
const compiler::LoopInfo* loop_info) {
@@ -457,6 +470,11 @@ class MergePointInterpreterFrameState {
info, predecessor_count, 0,
info.zone()->NewArray<BasicBlock*>(predecessor_count),
BasicBlockType::kLoopHeader, liveness);
+ if (loop_info->resumable()) {
+ state->known_node_aspects_ =
+ info.zone()->New<KnownNodeAspects>(info.zone());
+ state->is_resumable_loop_ = true;
+ }
auto& assignments = loop_info->assignments();
auto& frame_state = state->frame_state_;
frame_state.ForEachParameter(
@@ -464,6 +482,10 @@ class MergePointInterpreterFrameState {
entry = nullptr;
if (assignments.ContainsParameter(reg.ToParameterIndex())) {
entry = state->NewLoopPhi(info.zone(), reg, merge_offset);
+ } else if (state->is_resumable_loop()) {
+ // Copy initial values out of the start state.
+ entry = start_state.get(reg);
+ DCHECK(entry->Is<InitialValue>());
}
});
// TODO(v8:7700): Add contexts into assignment analysis.
@@ -488,45 +510,46 @@ class MergePointInterpreterFrameState {
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void Merge(MaglevCompilationUnit& compilation_unit,
- const InterpreterFrameState& unmerged, BasicBlock* predecessor,
+ InterpreterFrameState& unmerged, BasicBlock* predecessor,
int merge_offset) {
DCHECK_GT(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = predecessor;
- if (known_node_aspects_ == nullptr) {
- DCHECK(is_unmerged_loop());
- DCHECK_EQ(predecessors_so_far_, 0);
- known_node_aspects_ =
- unmerged.known_node_aspects().CloneWithoutUnstableMaps(
- compilation_unit.zone());
- } else {
- known_node_aspects_->Merge(unmerged.known_node_aspects());
- }
-
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << "Merging..." << std::endl;
}
frame_state_.ForEachValue(compilation_unit, [&](ValueNode*& value,
interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " " << reg.ToString() << ": "
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
<< " <- "
<< PrintNodeLabel(compilation_unit.graph_labeller(),
unmerged.get(reg));
}
- value = MergeValue(compilation_unit, reg, value, unmerged.get(reg),
- merge_offset);
- if (FLAG_trace_maglev_graph_building) {
+ value = MergeValue(compilation_unit, reg, unmerged.known_node_aspects(),
+ value, unmerged.get(reg), merge_offset);
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " => "
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
<< ": " << PrintNode(compilation_unit.graph_labeller(), value)
<< std::endl;
}
});
+
+ if (known_node_aspects_ == nullptr) {
+ DCHECK(is_unmerged_loop());
+ DCHECK_EQ(predecessors_so_far_, 0);
+ known_node_aspects_ =
+ unmerged.known_node_aspects().CloneWithoutUnstableMaps(
+ compilation_unit.zone());
+ } else {
+ known_node_aspects_->Merge(unmerged.known_node_aspects());
+ }
+
predecessors_so_far_++;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
}
@@ -534,30 +557,30 @@ class MergePointInterpreterFrameState {
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void MergeLoop(MaglevCompilationUnit& compilation_unit,
- const InterpreterFrameState& loop_end_state,
+ InterpreterFrameState& loop_end_state,
BasicBlock* loop_end_block, int merge_offset) {
// This should be the last predecessor we try to merge.
DCHECK_EQ(predecessors_so_far_, predecessor_count_ - 1);
DCHECK(is_unmerged_loop());
predecessors_[predecessor_count_ - 1] = loop_end_block;
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << "Merging loop backedge..." << std::endl;
}
frame_state_.ForEachValue(compilation_unit, [&](ValueNode* value,
interpreter::Register reg) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " " << reg.ToString() << ": "
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
<< " <- "
<< PrintNodeLabel(compilation_unit.graph_labeller(),
loop_end_state.get(reg));
}
- MergeLoopValue(compilation_unit, reg, value, loop_end_state.get(reg),
- merge_offset);
- if (FLAG_trace_maglev_graph_building) {
+ MergeLoopValue(compilation_unit, reg, loop_end_state.known_node_aspects(),
+ value, loop_end_state.get(reg), merge_offset);
+ if (v8_flags.trace_maglev_graph_building) {
std::cout << " => "
<< PrintNodeLabel(compilation_unit.graph_labeller(), value)
<< ": " << PrintNode(compilation_unit.graph_labeller(), value)
@@ -572,7 +595,7 @@ class MergePointInterpreterFrameState {
// deopt).
void MergeDead(const MaglevCompilationUnit& compilation_unit,
int merge_offset) {
- DCHECK_GT(predecessor_count_, 1);
+ DCHECK_GE(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessor_count_--;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
@@ -636,9 +659,12 @@ class MergePointInterpreterFrameState {
bool is_unreachable_loop() const {
// If there is only one predecessor, and it's not set, then this is a loop
// merge with no forward control flow entering it.
- return is_loop() && predecessor_count_ == 1 && predecessors_so_far_ == 0;
+ return is_loop() && !is_resumable_loop() && predecessor_count_ == 1 &&
+ predecessors_so_far_ == 0;
}
+ bool is_resumable_loop() const { return is_resumable_loop_; }
+
private:
friend void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
@@ -658,44 +684,43 @@ class MergePointInterpreterFrameState {
frame_state_(info, liveness) {}
ValueNode* FromInt32ToTagged(MaglevCompilationUnit& compilation_unit,
+ KnownNodeAspects& known_node_aspects,
ValueNode* value) {
DCHECK_EQ(value->properties().value_representation(),
ValueRepresentation::kInt32);
- if (value->Is<CheckedSmiUntag>()) {
- return value->input(0).node();
- }
+ DCHECK(!value->properties().is_conversion());
#define IS_INT32_OP_NODE(Name) || value->Is<Name>()
- DCHECK(value->Is<Int32Constant>()
+ DCHECK(value->Is<Int32Constant>() ||
+ value->Is<StringLength>()
INT32_OPERATIONS_NODE_LIST(IS_INT32_OP_NODE));
#undef IS_INT32_OP_NODE
- // Check if the next Node in the block after value is its CheckedSmiTag
- // version and reuse it.
- if (value->NextNode()) {
- CheckedSmiTag* tagged = value->NextNode()->TryCast<CheckedSmiTag>();
- if (tagged != nullptr && value == tagged->input().node()) {
- return tagged;
- }
- }
- // Otherwise create a tagged version.
- ValueNode* tagged =
- Node::New<CheckedSmiTag, std::initializer_list<ValueNode*>>(
+ NodeInfo* node_info = known_node_aspects.GetOrCreateInfoFor(value);
+ if (!node_info->tagged_alternative) {
+ // Create a tagged version.
+ ValueNode* tagged;
+ if (value->Is<StringLength>()) {
+ static_assert(String::kMaxLength <= kSmiMaxValue,
+ "String length must fit into a Smi");
+ tagged = Node::New<UnsafeSmiTag>(compilation_unit.zone(), {value});
+ } else {
+ tagged = Node::New<CheckedSmiTag, std::initializer_list<ValueNode*>>(
compilation_unit.zone(), compilation_unit,
value->eager_deopt_info()->state, {value});
- Node::List::AddAfter(value, tagged);
- compilation_unit.RegisterNodeInGraphLabeller(tagged);
- return tagged;
+ }
+
+ Node::List::AddAfter(value, tagged);
+ compilation_unit.RegisterNodeInGraphLabeller(tagged);
+ node_info->tagged_alternative = tagged;
+ }
+ return node_info->tagged_alternative;
}
ValueNode* FromFloat64ToTagged(MaglevCompilationUnit& compilation_unit,
+ KnownNodeAspects& known_node_aspects,
ValueNode* value) {
DCHECK_EQ(value->properties().value_representation(),
ValueRepresentation::kFloat64);
- if (value->Is<CheckedFloat64Unbox>()) {
- return value->input(0).node();
- }
- if (value->Is<ChangeInt32ToFloat64>()) {
- return FromInt32ToTagged(compilation_unit, value->input(0).node());
- }
+ DCHECK(!value->properties().is_conversion());
// Check if the next Node in the block after value is its Float64Box
// version and reuse it.
if (value->NextNode()) {
@@ -714,19 +739,21 @@ class MergePointInterpreterFrameState {
// TODO(victorgomes): Consider refactor this function to share code with
// MaglevGraphBuilder::GetTagged.
ValueNode* EnsureTagged(MaglevCompilationUnit& compilation_unit,
+ KnownNodeAspects& known_node_aspects,
ValueNode* value) {
switch (value->properties().value_representation()) {
case ValueRepresentation::kTagged:
return value;
case ValueRepresentation::kInt32:
- return FromInt32ToTagged(compilation_unit, value);
+ return FromInt32ToTagged(compilation_unit, known_node_aspects, value);
case ValueRepresentation::kFloat64:
- return FromFloat64ToTagged(compilation_unit, value);
+ return FromFloat64ToTagged(compilation_unit, known_node_aspects, value);
}
}
ValueNode* MergeValue(MaglevCompilationUnit& compilation_unit,
- interpreter::Register owner, ValueNode* merged,
+ interpreter::Register owner,
+ KnownNodeAspects& unmerged_aspects, ValueNode* merged,
ValueNode* unmerged, int merge_offset) {
// If the merged node is null, this is a pre-created loop header merge
// frame will null values for anything that isn't a loop Phi.
@@ -741,7 +768,7 @@ class MergePointInterpreterFrameState {
// It's possible that merged == unmerged at this point since loop-phis are
// not dropped if they are only assigned to themselves in the loop.
DCHECK_EQ(result->owner(), owner);
- unmerged = EnsureTagged(compilation_unit, unmerged);
+ unmerged = EnsureTagged(compilation_unit, unmerged_aspects, unmerged);
result->set_input(predecessors_so_far_, unmerged);
return result;
}
@@ -750,8 +777,8 @@ class MergePointInterpreterFrameState {
// We guarantee that the values are tagged.
// TODO(victorgomes): Support Phi nodes of untagged values.
- merged = EnsureTagged(compilation_unit, merged);
- unmerged = EnsureTagged(compilation_unit, unmerged);
+ merged = EnsureTagged(compilation_unit, *known_node_aspects_, merged);
+ unmerged = EnsureTagged(compilation_unit, unmerged_aspects, unmerged);
// Tagged versions could point to the same value, avoid Phi nodes in this
// case.
@@ -770,7 +797,7 @@ class MergePointInterpreterFrameState {
for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
result->set_input(predecessors_so_far_, unmerged);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
for (int i = predecessors_so_far_ + 1; i < predecessor_count_; i++) {
result->set_input(i, nullptr);
}
@@ -800,7 +827,8 @@ class MergePointInterpreterFrameState {
}
void MergeLoopValue(MaglevCompilationUnit& compilation_unit,
- interpreter::Register owner, ValueNode* merged,
+ interpreter::Register owner,
+ KnownNodeAspects& unmerged_aspects, ValueNode* merged,
ValueNode* unmerged, int merge_offset) {
Phi* result = merged->TryCast<Phi>();
if (result == nullptr || result->merge_offset() != merge_offset) {
@@ -814,7 +842,7 @@ class MergePointInterpreterFrameState {
return;
}
DCHECK_EQ(result->owner(), owner);
- unmerged = EnsureTagged(compilation_unit, unmerged);
+ unmerged = EnsureTagged(compilation_unit, unmerged_aspects, unmerged);
result->set_input(predecessor_count_ - 1, unmerged);
}
@@ -823,7 +851,7 @@ class MergePointInterpreterFrameState {
DCHECK_EQ(predecessors_so_far_, 0);
// Create a new loop phi, which for now is empty.
Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
- if (FLAG_trace_maglev_graph_building) {
+ if (v8_flags.trace_maglev_graph_building) {
for (int i = 0; i < predecessor_count_; i++) {
result->set_input(i, nullptr);
}
@@ -844,6 +872,7 @@ class MergePointInterpreterFrameState {
int predecessor_count_;
int predecessors_so_far_;
+ bool is_resumable_loop_ = false;
BasicBlock** predecessors_;
BasicBlockType basic_block_type_;
diff --git a/deps/v8/src/maglev/maglev-ir-inl.h b/deps/v8/src/maglev/maglev-ir-inl.h
index 836f46f605..e9fe230ee5 100644
--- a/deps/v8/src/maglev/maglev-ir-inl.h
+++ b/deps/v8/src/maglev/maglev-ir-inl.h
@@ -30,10 +30,28 @@ void DeepForEachInputImpl(const MaglevCompilationUnit& unit,
}
template <typename Function>
-void DeepForEachInput(const EagerDeoptInfo* node, Function&& f) {
+void DeepForEachInput(const EagerDeoptInfo* deopt_info, Function&& f) {
int index = 0;
- DeepForEachInputImpl(node->unit, &node->state, node->input_locations, index,
- f);
+ DeepForEachInputImpl(deopt_info->unit, &deopt_info->state,
+ deopt_info->input_locations, index, f);
+}
+
+template <typename Function>
+void DeepForEachInput(const LazyDeoptInfo* deopt_info, Function&& f) {
+ int index = 0;
+ if (deopt_info->state.parent) {
+ DeepForEachInputImpl(*deopt_info->unit.caller(), deopt_info->state.parent,
+ deopt_info->input_locations, index, f);
+ }
+ // Handle the top-of-frame info separately, since we have to skip the result
+ // location.
+ deopt_info->state.register_frame->ForEachValue(
+ deopt_info->unit, [&](ValueNode* node, interpreter::Register reg) {
+ // Skip over the result location since it is irrelevant for lazy deopts
+ // (unoptimized code will recreate the result).
+ if (deopt_info->IsResultRegister(reg)) return;
+ f(node, reg, &deopt_info->input_locations[index++]);
+ });
}
} // namespace detail
diff --git a/deps/v8/src/maglev/maglev-ir.cc b/deps/v8/src/maglev/maglev-ir.cc
index 3c32641ac4..6a53070819 100644
--- a/deps/v8/src/maglev/maglev-ir.cc
+++ b/deps/v8/src/maglev/maglev-ir.cc
@@ -140,6 +140,24 @@ class SaveRegisterStateForCall {
RegisterSnapshot snapshot_;
};
+#ifdef DEBUG
+RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
+ RegList regs;
+ detail::DeepForEachInput(deopt_info,
+ [&regs](ValueNode* value, interpreter::Register reg,
+ InputLocation* input) {
+ if (input->IsGeneralRegister()) {
+ regs.set(input->AssignedGeneralRegister());
+ }
+ });
+ return regs;
+}
+#endif // DEBUG
+
+// Helper macro for checking that a reglist is empty which prints the contents
+// when non-empty.
+#define DCHECK_REGLIST_EMPTY(...) DCHECK_EQ((__VA_ARGS__), RegList{})
+
// ---
// Inlined computations.
// ---
@@ -151,7 +169,8 @@ void AllocateRaw(MaglevAssembler* masm, RegisterSnapshot& register_snapshot,
// TODO(victorgomes): Call the runtime for large object allocation.
// TODO(victorgomes): Support double alignment.
DCHECK_EQ(alignment, kTaggedAligned);
- if (FLAG_single_generation) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
+ if (v8_flags.single_generation) {
alloc_type = AllocationType::kOld;
}
bool in_new_space = alloc_type == AllocationType::kYoung;
@@ -165,7 +184,7 @@ void AllocateRaw(MaglevAssembler* masm, RegisterSnapshot& register_snapshot,
? ExternalReference::new_space_allocation_limit_address(isolate)
: ExternalReference::old_space_allocation_limit_address(isolate);
- ZoneLabelRef done(masm->compilation_info()->zone());
+ ZoneLabelRef done(masm);
Register new_top = kScratchRegister;
// Check if there is enough space.
__ Move(object, __ ExternalReferenceAsOperand(top));
@@ -174,12 +193,13 @@ void AllocateRaw(MaglevAssembler* masm, RegisterSnapshot& register_snapshot,
// Otherwise call runtime.
__ JumpToDeferredIf(
greater_equal,
- [](MaglevAssembler* masm, Label* return_label,
- RegisterSnapshot register_snapshot, Register object, Builtin builtin,
- int size_in_bytes, ZoneLabelRef done) {
+ [](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
+ Register object, Builtin builtin, int size_in_bytes,
+ ZoneLabelRef done) {
// Remove {object} from snapshot, since it is the returned allocated
// HeapObject.
register_snapshot.live_registers.clear(object);
+ register_snapshot.live_tagged_registers.clear(object);
{
SaveRegisterStateForCall save_register_state(masm, register_snapshot);
using D = AllocateDescriptor;
@@ -208,8 +228,8 @@ void ToBoolean(MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
__ CheckSmi(value);
__ JumpToDeferredIf(
zero,
- [](MaglevAssembler* masm, Label* return_label, Register value,
- ZoneLabelRef is_true, ZoneLabelRef is_false) {
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
// Check if {value} is not zero.
__ SmiCompare(value, Smi::FromInt(0));
__ j(equal, *is_false);
@@ -235,8 +255,8 @@ void ToBoolean(MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
__ CompareRoot(map, RootIndex::kHeapNumberMap);
__ JumpToDeferredIf(
equal,
- [](MaglevAssembler* masm, Label* return_label, Register value,
- ZoneLabelRef is_true, ZoneLabelRef is_false) {
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
// Sets scratch register to 0.0.
__ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
// Sets ZF if equal to 0.0, -0.0 or NaN.
@@ -251,8 +271,8 @@ void ToBoolean(MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
__ CompareRoot(map, RootIndex::kBigIntMap);
__ JumpToDeferredIf(
equal,
- [](MaglevAssembler* masm, Label* return_label, Register value,
- ZoneLabelRef is_true, ZoneLabelRef is_false) {
+ [](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
+ ZoneLabelRef is_false) {
__ testl(FieldOperand(value, BigInt::kBitfieldOffset),
Immediate(BigInt::LengthBits::kMask));
__ j(zero, *is_false);
@@ -576,8 +596,9 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
__ FromAnyToRegister(parameters_and_registers(i),
WriteBarrierDescriptor::SlotAddressRegister());
+ ZoneLabelRef done(masm);
DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, Label* return_label, Register value,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register value,
Register array, GeneratorStore* node, int32_t offset) {
ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
// Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
@@ -585,7 +606,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
__ CheckPageFlag(
value, WriteBarrierDescriptor::SlotAddressRegister(),
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
- zero, return_label);
+ zero, *done);
Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
@@ -600,13 +621,13 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
__ CallRecordWriteStub(array, slot_reg, save_fp_mode);
- __ jmp(return_label);
+ __ jmp(*done);
},
- value, array, this, FixedArray::OffsetOfElementAt(i));
+ done, value, array, this, FixedArray::OffsetOfElementAt(i));
__ StoreTaggedField(FieldOperand(array, FixedArray::OffsetOfElementAt(i)),
value);
- __ JumpIfSmi(value, &deferred_write_barrier->return_label, Label::kNear);
+ __ JumpIfSmi(value, *done, Label::kNear);
// TODO(leszeks): This will stay either false or true throughout this loop.
// Consider hoisting the check out of the loop and duplicating the loop into
// with and without write barrier.
@@ -614,7 +635,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
&deferred_write_barrier->deferred_code_label);
- __ bind(&deferred_write_barrier->return_label);
+ __ bind(*done);
}
// Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
@@ -622,19 +643,20 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
Register context = __ FromAnyToRegister(
context_input(), WriteBarrierDescriptor::SlotAddressRegister());
+ ZoneLabelRef done(masm);
DeferredCodeInfo* deferred_context_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, Label* return_label, Register context,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register context,
Register generator, GeneratorStore* node) {
ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
// Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch
// register, see comment above.
- // TODO(leszeks): The context is almost always going to be in old-space,
- // consider moving this check to the fast path, maybe even as the first
- // bailout.
+ // TODO(leszeks): The context is almost always going to be in
+ // old-space, consider moving this check to the fast path, maybe even
+ // as the first bailout.
__ CheckPageFlag(
context, WriteBarrierDescriptor::SlotAddressRegister(),
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- return_label);
+ *done);
__ Move(WriteBarrierDescriptor::ObjectRegister(), generator);
generator = WriteBarrierDescriptor::ObjectRegister();
@@ -652,16 +674,16 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm,
__ CallRecordWriteStub(generator, slot_reg, save_fp_mode);
- __ jmp(return_label);
+ __ jmp(*done);
},
- context, generator, this);
+ done, context, generator, this);
__ StoreTaggedField(
FieldOperand(generator, JSGeneratorObject::kContextOffset), context);
__ AssertNotSmi(context);
__ CheckPageFlag(generator, kScratchRegister,
MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
&deferred_context_write_barrier->deferred_code_label);
- __ bind(&deferred_context_write_barrier->return_label);
+ __ bind(*done);
__ StoreTaggedSignedField(
FieldOperand(generator, JSGeneratorObject::kContinuationOffset),
@@ -681,7 +703,7 @@ void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register array = ToRegister(array_input());
Register result_reg = ToRegister(result());
- Register temp = temporaries().PopFirst();
+ Register temp = general_temporaries().PopFirst();
// The input and the output can alias, if that happen we use a temporary
// register and a move at the end.
@@ -931,6 +953,7 @@ void CreateArrayLiteral::GenerateCode(MaglevAssembler* masm,
__ Push(constant_elements().object());
__ Push(Smi::FromInt(flags()));
__ CallRuntime(Runtime::kCreateArrayLiteral);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
void CreateShallowArrayLiteral::AllocateVreg(
@@ -962,6 +985,7 @@ void CreateObjectLiteral::GenerateCode(MaglevAssembler* masm,
__ Push(boilerplate_descriptor().object());
__ Push(Smi::FromInt(flags()));
__ CallRuntime(Runtime::kCreateObjectLiteral);
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
void CreateEmptyObjectLiteral::AllocateVreg(
@@ -1317,9 +1341,10 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
}
__ Cmp(FieldOperand(object, HeapObject::kMapOffset), map().object());
+ ZoneLabelRef migration_done(masm);
__ JumpToDeferredIf(
not_equal,
- [](MaglevAssembler* masm, Label* return_label, Register object,
+ [](MaglevAssembler* masm, ZoneLabelRef migration_done, Register object,
CheckMapsWithMigration* node, EagerDeoptInfo* deopt_info) {
__ RegisterEagerDeopt(deopt_info, DeoptimizeReason::kWrongMap);
@@ -1365,10 +1390,11 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm,
// Manually load the map pointer without uncompressing it.
__ Cmp(FieldOperand(object, HeapObject::kMapOffset),
node->map().object());
- __ j(equal, return_label);
+ __ j(equal, *migration_done);
__ jmp(&deopt_info->deopt_entry_label);
},
- object, this, eager_deopt_info());
+ migration_done, object, this, eager_deopt_info());
+ __ bind(*migration_done);
}
void CheckMapsWithMigration::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
@@ -1384,15 +1410,14 @@ void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
Register object = ToRegister(receiver_input());
Register index = ToRegister(index_input());
__ AssertNotSmi(object);
- __ AssertSmi(index);
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
}
- TaggedRegister length(kScratchRegister);
- __ LoadAnyTaggedField(length, FieldOperand(object, JSArray::kLengthOffset));
- __ cmp_tagged(index, length.reg());
+ __ SmiUntagField(kScratchRegister,
+ FieldOperand(object, JSArray::kLengthOffset));
+ __ cmpl(index, kScratchRegister);
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
}
@@ -1406,24 +1431,28 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
Register object = ToRegister(receiver_input());
Register index = ToRegister(index_input());
__ AssertNotSmi(object);
- __ AssertSmi(index);
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
__ Assert(greater_equal, AbortReason::kUnexpectedValue);
}
__ LoadAnyTaggedField(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ AssertNotSmi(kScratchRegister);
}
- TaggedRegister length(kScratchRegister);
- __ LoadAnyTaggedField(
- length, FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
- __ cmp_tagged(index, length.reg());
+ __ SmiUntagField(kScratchRegister,
+ FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
+ __ cmpl(index, kScratchRegister);
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
}
+void DebugBreak::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
+void DebugBreak::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ __ int3();
+}
+
void CheckedInternalizedString::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
UseRegister(object_input());
@@ -1433,7 +1462,7 @@ void CheckedInternalizedString::AllocateVreg(
void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(object_input());
- RegList temps = temporaries();
+ RegList temps = general_temporaries();
Register map_tmp = temps.PopFirst();
if (check_type_ == CheckType::kOmitHeapObjectCheck) {
@@ -1449,9 +1478,10 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
Immediate(kIsNotStringMask | kIsNotInternalizedMask));
static_assert((kStringTag | kInternalizedTag) == 0);
+ ZoneLabelRef done(masm);
__ JumpToDeferredIf(
not_zero,
- [](MaglevAssembler* masm, Label* return_label, Register object,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register object,
CheckedInternalizedString* node, EagerDeoptInfo* deopt_info,
Register map_tmp) {
__ RecordComment("Deferred Test IsThinString");
@@ -1465,7 +1495,7 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
__ j(zero, &deopt_info->deopt_entry_label);
__ LoadTaggedPointerField(
object, FieldOperand(object, ThinString::kActualOffset));
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ RecordComment("DCHECK IsInternalizedString");
__ LoadMap(map_tmp, object);
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
@@ -1473,9 +1503,85 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
static_assert((kStringTag | kInternalizedTag) == 0);
__ Check(zero, AbortReason::kUnexpectedValue);
}
- __ jmp(return_label);
+ __ jmp(*done);
},
- object, this, eager_deopt_info(), map_tmp);
+ done, object, this, eager_deopt_info(), map_tmp);
+ __ bind(*done);
+}
+
+void CheckedObjectToIndex::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+ UseRegister(object_input());
+ DefineAsRegister(vreg_state, this);
+ set_double_temporaries_needed(1);
+}
+void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ Register result_reg = ToRegister(result());
+
+ ZoneLabelRef done(masm);
+ Condition is_smi = __ CheckSmi(object);
+ __ JumpToDeferredIf(
+ NegateCondition(is_smi),
+ [](MaglevAssembler* masm, Register object, Register result_reg,
+ ZoneLabelRef done, CheckedObjectToIndex* node) {
+ Label is_string;
+ __ LoadMap(kScratchRegister, object);
+ __ CmpInstanceTypeRange(kScratchRegister, kScratchRegister,
+ FIRST_STRING_TYPE, LAST_STRING_TYPE);
+ __ j(below_equal, &is_string);
+
+ __ cmpl(kScratchRegister, Immediate(HEAP_NUMBER_TYPE));
+ // The IC will go generic if it encounters something other than a
+ // Number or String key.
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, node);
+
+ // Heap Number.
+ {
+ DoubleRegister number_value = node->double_temporaries().first();
+ DoubleRegister converted_back = kScratchDoubleReg;
+ // Convert the input float64 value to int32.
+ __ Cvttsd2si(result_reg, number_value);
+ // Convert that int32 value back to float64.
+ __ Cvtlsi2sd(converted_back, result_reg);
+ // Check that the result of the float64->int32->float64 is equal to
+ // the input (i.e. that the conversion didn't truncate.
+ __ Ucomisd(number_value, converted_back);
+ __ j(equal, *done);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ }
+
+ // String.
+ __ bind(&is_string);
+ {
+ RegisterSnapshot snapshot = node->register_snapshot();
+ snapshot.live_registers.clear(result_reg);
+ DCHECK(!snapshot.live_tagged_registers.has(result_reg));
+ {
+ SaveRegisterStateForCall save_register_state(masm, snapshot);
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(1);
+ __ Move(arg_reg_1, object);
+ __ CallCFunction(
+ ExternalReference::string_to_array_index_function(), 1);
+ // No need for safepoint since this is a fast C call.
+ __ Move(result_reg, kReturnRegister0);
+ }
+ __ cmpl(result_reg, Immediate(0));
+ __ j(greater_equal, *done);
+ __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
+ }
+ },
+ object, result_reg, done, this);
+
+ // If we didn't enter the deferred block, we're a Smi.
+ if (result_reg == object) {
+ __ SmiUntag(object);
+ } else {
+ __ SmiUntag(result_reg, object);
+ }
+
+ __ bind(*done);
}
void LoadTaggedField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
@@ -1500,7 +1606,7 @@ void LoadDoubleField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void LoadDoubleField::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register tmp = temporaries().PopFirst();
+ Register tmp = general_temporaries().PopFirst();
Register object = ToRegister(object_input());
__ AssertNotSmi(object);
__ DecompressAnyTagged(tmp, FieldOperand(object, offset()));
@@ -1524,33 +1630,22 @@ void LoadTaggedElement::GenerateCode(MaglevAssembler* masm,
Register index = ToRegister(index_input());
Register result_reg = ToRegister(result());
__ AssertNotSmi(object);
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
__ Assert(above_equal, AbortReason::kUnexpectedValue);
}
__ DecompressAnyTagged(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
// Reload since CmpObjectType clobbered the scratch register.
__ DecompressAnyTagged(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
}
- __ AssertSmi(index);
- // Zero out top bits of index reg (these were previously either zero already,
- // or the cage base). This technically mutates it, but since it's a Smi, that
- // doesn't matter.
- __ movl(index, index);
- static_assert(kSmiTagSize + kSmiShiftSize < times_tagged_size,
- "Folding the Smi shift into the FixedArray entry size shift "
- "only works if the shift is small");
__ DecompressAnyTagged(
- result_reg,
- FieldOperand(kScratchRegister, index,
- static_cast<ScaleFactor>(times_tagged_size -
- (kSmiTagSize + kSmiShiftSize)),
- FixedArray::kHeaderSize));
+ result_reg, FieldOperand(kScratchRegister, index, times_tagged_size,
+ FixedArray::kHeaderSize));
}
void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) {
@@ -1564,13 +1659,13 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm,
Register index = ToRegister(index_input());
DoubleRegister result_reg = ToDoubleRegister(result());
__ AssertNotSmi(object);
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
__ Assert(above_equal, AbortReason::kUnexpectedValue);
}
__ DecompressAnyTagged(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
+ if (v8_flags.debug_code) {
__ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE,
kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
@@ -1578,19 +1673,8 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm,
__ DecompressAnyTagged(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
}
- __ AssertSmi(index);
- // Zero out top bits of index reg (these were previously either zero already,
- // or the cage base). This technically mutates it, but since it's a Smi, that
- // doesn't matter.
- __ movl(index, index);
- static_assert(kSmiTagSize + kSmiShiftSize < times_8,
- "Folding the Smi shift into the FixedArray entry size shift "
- "only works if the shift is small");
- __ Movsd(result_reg,
- FieldOperand(kScratchRegister, index,
- static_cast<ScaleFactor>(times_8 -
- (kSmiTagSize + kSmiShiftSize)),
- FixedDoubleArray::kHeaderSize));
+ __ Movsd(result_reg, FieldOperand(kScratchRegister, index, times_8,
+ FixedDoubleArray::kHeaderSize));
}
void StoreTaggedFieldNoWriteBarrier::AllocateVreg(
@@ -1628,14 +1712,15 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode(
__ AssertNotSmi(object);
__ StoreTaggedField(FieldOperand(object, offset()), value);
+ ZoneLabelRef done(masm);
DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode(
- [](MaglevAssembler* masm, Label* return_label, Register value,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register value,
Register object, StoreTaggedFieldWithWriteBarrier* node) {
ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path");
__ CheckPageFlag(
value, kScratchRegister,
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
- return_label);
+ *done);
Register slot_reg = WriteBarrierDescriptor::SlotAddressRegister();
RegList saved;
@@ -1654,15 +1739,15 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode(
__ CallRecordWriteStub(object, slot_reg, save_fp_mode);
__ PopAll(saved);
- __ jmp(return_label);
+ __ jmp(*done);
},
- value, object, this);
+ done, value, object, this);
- __ JumpIfSmi(value, &deferred_write_barrier->return_label);
+ __ JumpIfSmi(value, *done);
__ CheckPageFlag(object, kScratchRegister,
MemoryChunk::kPointersFromHereAreInterestingMask, not_zero,
&deferred_write_barrier->deferred_code_label);
- __ bind(&deferred_write_barrier->return_label);
+ __ bind(*done);
}
void StoreTaggedFieldWithWriteBarrier::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
@@ -1745,6 +1830,25 @@ void SetNamedGeneric::PrintParams(std::ostream& os,
os << "(" << name_ << ")";
}
+void StringLength::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+ UseRegister(object_input());
+ DefineAsRegister(vreg_state, this);
+}
+void StringLength::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register object = ToRegister(object_input());
+ if (v8_flags.debug_code) {
+ // Use return register as temporary.
+ Register tmp = ToRegister(result());
+ // Check if {object} is a string.
+ __ AssertNotSmi(object);
+ __ LoadMap(tmp, object);
+ __ CmpInstanceTypeRange(tmp, tmp, FIRST_STRING_TYPE, LAST_STRING_TYPE);
+ __ Check(below_equal, AbortReason::kUnexpectedValue);
+ }
+ __ movl(ToRegister(result()), FieldOperand(object, String::kLengthOffset));
+}
+
void DefineNamedOwnGeneric::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type;
@@ -2016,6 +2120,10 @@ void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm,
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ addl(left, right);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{left} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
@@ -2031,6 +2139,10 @@ void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm,
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ subl(left, right);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{left} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
@@ -2048,10 +2160,14 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm,
Register right = ToRegister(right_input());
DCHECK_EQ(result, ToRegister(left_input()));
- Register saved_left = temporaries().first();
+ Register saved_left = general_temporaries().first();
__ movl(saved_left, result);
// TODO(leszeks): peephole optimise multiplication by a constant.
__ imull(result, right);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{saved_left, result} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
// If the result is zero, check if either lhs or rhs is negative.
@@ -2082,8 +2198,8 @@ void Int32DivideWithOverflow::AllocateVreg(
void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- DCHECK(temporaries().has(rax));
- DCHECK(temporaries().has(rdx));
+ DCHECK(general_temporaries().has(rax));
+ DCHECK(general_temporaries().has(rdx));
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
__ movl(rax, left);
@@ -2099,9 +2215,10 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
// Check if {right} is positive (and not zero).
__ cmpl(right, Immediate(0));
+ ZoneLabelRef done(masm);
__ JumpToDeferredIf(
less_equal,
- [](MaglevAssembler* masm, Label* return_label, Register right,
+ [](MaglevAssembler* masm, ZoneLabelRef done, Register right,
Int32DivideWithOverflow* node) {
// {right} is negative or zero.
@@ -2122,20 +2239,25 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm,
// Check if {left} is kMinInt and {right} is -1, in which case we'd have
// to return -kMinInt, which is not representable as Int32.
__ cmpl(rax, Immediate(kMinInt));
- __ j(not_equal, return_label);
+ __ j(not_equal, *done);
__ cmpl(right, Immediate(-1));
- __ j(not_equal, return_label);
+ __ j(not_equal, *done);
// TODO(leszeks): Better DeoptimizeReason = kOverflow, but
// eager_deopt_info is already configured as kNotInt32.
__ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
},
- right, this);
+ done, right, this);
+ __ bind(*done);
// Perform the actual integer division.
__ idivl(right);
// Check that the remainder is zero.
__ cmpl(rdx, Immediate(0));
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{rax, rdx} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
DCHECK_EQ(ToRegister(result()), rax);
}
@@ -2229,6 +2351,10 @@ void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm,
// TODO(jgruber): Properly track signed/unsigned representations and
// allocated a heap number if the result is outside smi range.
__ testl(left, Immediate((1 << 31) | (1 << 30)));
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{left} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kOverflow, this);
}
@@ -2422,9 +2548,26 @@ void CheckedSmiTag::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register reg = ToRegister(input());
__ addl(reg, reg);
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(RegList{reg} &
+ GetGeneralRegistersUsedAsInputs(eager_deopt_info()));
__ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this);
}
+void UnsafeSmiTag::AllocateVreg(MaglevVregAllocationState* vreg_state) {
+ UseRegister(input());
+ DefineSameAsFirst(vreg_state, this);
+}
+void UnsafeSmiTag::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ Register reg = ToRegister(input());
+ __ addl(reg, reg);
+ if (v8_flags.debug_code) {
+ __ Check(no_overflow, AbortReason::kInputDoesNotFitSmi);
+ }
+}
+
void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
DefineAsConstant(vreg_state, this);
}
@@ -2499,41 +2642,53 @@ void LogicalNot::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(value());
Register return_value = ToRegister(result());
- Label not_equal_true;
- // We load the constant true to the return value and we return it if the
- // object is not equal to it. Otherwise we load the constant false.
- __ LoadRoot(return_value, RootIndex::kTrueValue);
- __ cmp_tagged(return_value, object);
- __ j(not_equal, &not_equal_true);
- __ LoadRoot(return_value, RootIndex::kFalseValue);
- if (FLAG_debug_code) {
- Label is_equal_true;
- __ jmp(&is_equal_true);
- __ bind(&not_equal_true);
- // LogicalNot expects either the constants true or false.
- // We know it is not true, so it must be false!
+
+ if (v8_flags.debug_code) {
+ // LogicalNot expects either TrueValue or FalseValue.
+ Label next;
__ CompareRoot(object, RootIndex::kFalseValue);
+ __ j(equal, &next);
+ __ CompareRoot(object, RootIndex::kTrueValue);
__ Check(equal, AbortReason::kUnexpectedValue);
- __ bind(&is_equal_true);
- } else {
- __ bind(&not_equal_true);
+ __ bind(&next);
}
+
+ Label return_false, done;
+ __ CompareRoot(object, RootIndex::kTrueValue);
+ __ j(equal, &return_false, Label::kNear);
+ __ LoadRoot(return_value, RootIndex::kTrueValue);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&return_false);
+ __ LoadRoot(return_value, RootIndex::kFalseValue);
+
+ __ bind(&done);
}
void SetPendingMessage::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(value());
+ set_temporaries_needed(1);
DefineAsRegister(vreg_state, this);
}
void SetPendingMessage::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register message = ToRegister(value());
+ Register new_message = ToRegister(value());
Register return_value = ToRegister(result());
- Isolate* isolate = masm->isolate();
- MemOperand message_op = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_pending_message(isolate), kScratchRegister);
- __ Move(return_value, message_op);
- __ movq(message_op, message);
+
+ MemOperand pending_message_operand = __ ExternalReferenceAsOperand(
+ ExternalReference::address_of_pending_message(masm->isolate()),
+ kScratchRegister);
+
+ if (new_message != return_value) {
+ __ Move(return_value, pending_message_operand);
+ __ movq(pending_message_operand, new_message);
+ } else {
+ Register scratch = general_temporaries().PopFirst();
+ __ Move(scratch, pending_message_operand);
+ __ movq(pending_message_operand, new_message);
+ __ Move(return_value, scratch);
+ }
}
void ToBooleanLogicalNot::AllocateVreg(MaglevVregAllocationState* vreg_state) {
@@ -2550,7 +2705,7 @@ void ToBooleanLogicalNot::GenerateCode(MaglevAssembler* masm,
ToBoolean(masm, object, object_is_true, object_is_false, true);
__ bind(*object_is_true);
__ LoadRoot(return_value, RootIndex::kFalseValue);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(*object_is_false);
__ LoadRoot(return_value, RootIndex::kTrueValue);
__ bind(&done);
@@ -2591,7 +2746,7 @@ void TaggedNotEqual::GenerateCode(MaglevAssembler* masm,
}
void TestInstanceOf::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf>::type;
+ using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf_WithFeedback>::type;
UseFixed(context(), kContextRegister);
UseFixed(object(), D::GetRegisterParameter(D::kLeft));
UseFixed(callable(), D::GetRegisterParameter(D::kRight));
@@ -2599,13 +2754,15 @@ void TestInstanceOf::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void TestInstanceOf::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
+ using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf_WithFeedback>::type;
#ifdef DEBUG
- using D = CallInterfaceDescriptorFor<Builtin::kInstanceOf>::type;
DCHECK_EQ(ToRegister(context()), kContextRegister);
DCHECK_EQ(ToRegister(object()), D::GetRegisterParameter(D::kLeft));
DCHECK_EQ(ToRegister(callable()), D::GetRegisterParameter(D::kRight));
#endif
- __ CallBuiltin(Builtin::kInstanceOf);
+ __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
+ __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
+ __ CallBuiltin(Builtin::kInstanceOf_WithFeedback);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
@@ -2618,18 +2775,22 @@ void TestUndetectable::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(value());
Register return_value = ToRegister(result());
- RegList temps = temporaries();
- Register tmp = temps.PopFirst();
- Label done;
- __ LoadRoot(return_value, RootIndex::kFalseValue);
- // If the object is an Smi, return false.
- __ JumpIfSmi(object, &done);
- // If it is a HeapObject, load the map and check for the undetectable bit.
- __ LoadMap(tmp, object);
- __ testl(FieldOperand(tmp, Map::kBitFieldOffset),
+ Register scratch = general_temporaries().PopFirst();
+
+ Label return_false, done;
+ __ JumpIfSmi(object, &return_false, Label::kNear);
+ // For heap objects, check the map's undetectable bit.
+ __ LoadMap(scratch, object);
+ __ testl(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsUndetectableBit::kMask));
- __ j(zero, &done);
+ __ j(zero, &return_false, Label::kNear);
+
__ LoadRoot(return_value, RootIndex::kTrueValue);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&return_false);
+ __ LoadRoot(return_value, RootIndex::kFalseValue);
+
__ bind(&done);
}
@@ -2646,80 +2807,80 @@ void TestTypeOf::GenerateCode(MaglevAssembler* masm,
Label is_true, is_false, done;
switch (literal_) {
case LiteralFlag::kNumber:
- __ JumpIfSmi(object, &is_true);
+ __ JumpIfSmi(object, &is_true, Label::kNear);
__ CompareRoot(FieldOperand(object, HeapObject::kMapOffset),
RootIndex::kHeapNumberMap);
- __ j(not_equal, &is_false);
+ __ j(not_equal, &is_false, Label::kNear);
break;
case LiteralFlag::kString:
- __ JumpIfSmi(object, &is_false);
+ __ JumpIfSmi(object, &is_false, Label::kNear);
__ LoadMap(tmp, object);
__ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(FIRST_NONSTRING_TYPE));
- __ j(greater_equal, &is_false);
+ __ j(greater_equal, &is_false, Label::kNear);
break;
case LiteralFlag::kSymbol:
- __ JumpIfSmi(object, &is_false);
+ __ JumpIfSmi(object, &is_false, Label::kNear);
__ LoadMap(tmp, object);
__ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(SYMBOL_TYPE));
- __ j(not_equal, &is_false);
+ __ j(not_equal, &is_false, Label::kNear);
break;
case LiteralFlag::kBoolean:
__ CompareRoot(object, RootIndex::kTrueValue);
- __ j(equal, &is_true);
+ __ j(equal, &is_true, Label::kNear);
__ CompareRoot(object, RootIndex::kFalseValue);
- __ j(not_equal, &is_false);
+ __ j(not_equal, &is_false, Label::kNear);
break;
case LiteralFlag::kBigInt:
- __ JumpIfSmi(object, &is_false);
+ __ JumpIfSmi(object, &is_false, Label::kNear);
__ LoadMap(tmp, object);
__ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(BIGINT_TYPE));
- __ j(not_equal, &is_false);
+ __ j(not_equal, &is_false, Label::kNear);
break;
case LiteralFlag::kUndefined:
- __ JumpIfSmi(object, &is_false);
+ __ JumpIfSmi(object, &is_false, Label::kNear);
// Check it has the undetectable bit set and it is not null.
__ LoadMap(tmp, object);
__ testl(FieldOperand(tmp, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsUndetectableBit::kMask));
- __ j(zero, &is_false);
+ __ j(zero, &is_false, Label::kNear);
__ CompareRoot(object, RootIndex::kNullValue);
- __ j(equal, &is_false);
+ __ j(equal, &is_false, Label::kNear);
break;
case LiteralFlag::kFunction:
- __ JumpIfSmi(object, &is_false);
+ __ JumpIfSmi(object, &is_false, Label::kNear);
// Check if callable bit is set and not undetectable.
__ LoadMap(tmp, object);
__ movl(tmp, FieldOperand(tmp, Map::kBitFieldOffset));
__ andl(tmp, Immediate(Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask));
__ cmpl(tmp, Immediate(Map::Bits1::IsCallableBit::kMask));
- __ j(not_equal, &is_false);
+ __ j(not_equal, &is_false, Label::kNear);
break;
case LiteralFlag::kObject:
- __ JumpIfSmi(object, &is_false);
+ __ JumpIfSmi(object, &is_false, Label::kNear);
// If the object is null then return true.
__ CompareRoot(object, RootIndex::kNullValue);
- __ j(equal, &is_true);
+ __ j(equal, &is_true, Label::kNear);
// Check if the object is a receiver type,
__ LoadMap(tmp, object);
__ cmpw(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(FIRST_JS_RECEIVER_TYPE));
- __ j(less, &is_false);
+ __ j(less, &is_false, Label::kNear);
// ... and is not undefined (undetectable) nor callable.
__ testl(FieldOperand(tmp, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask));
- __ j(not_zero, &is_false);
+ __ j(not_zero, &is_false, Label::kNear);
break;
case LiteralFlag::kOther:
UNREACHABLE();
}
__ bind(&is_true);
__ LoadRoot(ToRegister(result()), RootIndex::kTrueValue);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(ToRegister(result()), RootIndex::kFalseValue);
__ bind(&done);
@@ -2823,6 +2984,40 @@ void ChangeInt32ToFloat64::GenerateCode(MaglevAssembler* masm,
__ Cvtlsi2sd(ToDoubleRegister(result()), ToRegister(input()));
}
+void CheckedTruncateFloat64ToInt32::AllocateVreg(
+ MaglevVregAllocationState* vreg_state) {
+ UseRegister(input());
+ DefineAsRegister(vreg_state, this);
+}
+void CheckedTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm,
+ const ProcessingState& state) {
+ DoubleRegister input_reg = ToDoubleRegister(input());
+ Register result_reg = ToRegister(result());
+ DoubleRegister converted_back = kScratchDoubleReg;
+
+ // Convert the input float64 value to int32.
+ __ Cvttsd2si(result_reg, input_reg);
+ // Convert that int32 value back to float64.
+ __ Cvtlsi2sd(converted_back, result_reg);
+ // Check that the result of the float64->int32->float64 is equal to the input
+ // (i.e. that the conversion didn't truncate.
+ __ Ucomisd(input_reg, converted_back);
+ __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this);
+
+ // Check if {input} is -0.
+ Label check_done;
+ __ cmpl(result_reg, Immediate(0));
+ __ j(not_equal, &check_done);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Register high_word32_of_input = kScratchRegister;
+ __ Pextrd(high_word32_of_input, input_reg, 1);
+ __ cmpl(high_word32_of_input, Immediate(0));
+ __ EmitEagerDeoptIf(less, DeoptimizeReason::kNotInt32, this);
+
+ __ bind(&check_done);
+}
+
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state) {
// Phi inputs are processed in the post-process, once loop phis' inputs'
// v-regs are allocated.
@@ -2892,7 +3087,7 @@ void Call::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {
}
void Construct::AllocateVreg(MaglevVregAllocationState* vreg_state) {
- using D = ConstructStubDescriptor;
+ using D = Construct_WithFeedbackDescriptor;
UseFixed(function(), D::GetRegisterParameter(D::kTarget));
UseFixed(new_target(), D::GetRegisterParameter(D::kNewTarget));
UseFixed(context(), kContextRegister);
@@ -2903,7 +3098,7 @@ void Construct::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void Construct::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- using D = ConstructStubDescriptor;
+ using D = Construct_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(function()), D::GetRegisterParameter(D::kTarget));
DCHECK_EQ(ToRegister(new_target()), D::GetRegisterParameter(D::kNewTarget));
DCHECK_EQ(ToRegister(context()), kContextRegister);
@@ -2911,13 +3106,14 @@ void Construct::GenerateCode(MaglevAssembler* masm,
for (int i = num_args() - 1; i >= 0; --i) {
__ PushInput(arg(i));
}
+ __ Push(feedback().vector);
uint32_t arg_count = num_args();
__ Move(D::GetRegisterParameter(D::kActualArgumentsCount),
Immediate(arg_count));
+ __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
- __ CallBuiltin(Builtin::kConstruct);
-
+ __ CallBuiltin(Builtin::kConstruct_WithFeedback);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
}
@@ -2934,7 +3130,6 @@ void CallBuiltin::AllocateVreg(MaglevVregAllocationState* vreg_state) {
if (has_context) {
UseFixed(input(i), kContextRegister);
}
- DCHECK_EQ(descriptor.GetReturnCount(), 1);
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
@@ -3110,7 +3305,7 @@ void IncreaseInterruptBudget::AllocateVreg(
}
void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register scratch = temporaries().first();
+ Register scratch = general_temporaries().first();
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
@@ -3128,15 +3323,16 @@ void ReduceInterruptBudget::AllocateVreg(
}
void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register scratch = temporaries().first();
+ Register scratch = general_temporaries().first();
__ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset));
__ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset),
Immediate(amount()));
+ ZoneLabelRef done(masm);
__ JumpToDeferredIf(
less,
- [](MaglevAssembler* masm, Label* return_label,
+ [](MaglevAssembler* masm, ZoneLabelRef done,
ReduceInterruptBudget* node) {
{
SaveRegisterStateForCall save_register_state(
@@ -3148,9 +3344,10 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm,
save_register_state.DefineSafepointWithLazyDeopt(
node->lazy_deopt_info());
}
- __ jmp(return_label);
+ __ jmp(*done);
},
- this);
+ done, this);
+ __ bind(*done);
}
void ReduceInterruptBudget::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
@@ -3171,12 +3368,11 @@ void ThrowReferenceErrorIfHole::GenerateCode(MaglevAssembler* masm,
}
__ JumpToDeferredIf(
equal,
- [](MaglevAssembler* masm, Label* return_label,
- ThrowReferenceErrorIfHole* node) {
+ [](MaglevAssembler* masm, ThrowReferenceErrorIfHole* node) {
__ Move(kContextRegister, masm->native_context().object());
__ Push(node->name().object());
__ CallRuntime(Runtime::kThrowAccessedUninitializedVariable, 1);
- masm->DefineLazyDeoptPoint(node->lazy_deopt_info());
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
__ Abort(AbortReason::kUnexpectedReturnFromThrow);
},
this);
@@ -3196,11 +3392,10 @@ void ThrowSuperNotCalledIfHole::GenerateCode(MaglevAssembler* masm,
}
__ JumpToDeferredIf(
equal,
- [](MaglevAssembler* masm, Label* return_label,
- ThrowSuperNotCalledIfHole* node) {
+ [](MaglevAssembler* masm, ThrowSuperNotCalledIfHole* node) {
__ Move(kContextRegister, masm->native_context().object());
__ CallRuntime(Runtime::kThrowSuperNotCalled, 0);
- masm->DefineLazyDeoptPoint(node->lazy_deopt_info());
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
__ Abort(AbortReason::kUnexpectedReturnFromThrow);
},
this);
@@ -3220,11 +3415,10 @@ void ThrowSuperAlreadyCalledIfNotHole::GenerateCode(
}
__ JumpToDeferredIf(
not_equal,
- [](MaglevAssembler* masm, Label* return_label,
- ThrowSuperAlreadyCalledIfNotHole* node) {
+ [](MaglevAssembler* masm, ThrowSuperAlreadyCalledIfNotHole* node) {
__ Move(kContextRegister, masm->native_context().object());
__ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, 0);
- masm->DefineLazyDeoptPoint(node->lazy_deopt_info());
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
__ Abort(AbortReason::kUnexpectedReturnFromThrow);
},
this);
@@ -3242,13 +3436,12 @@ void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm,
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ JumpToDeferredIf(
equal,
- [](MaglevAssembler* masm, Label* return_label,
- ThrowIfNotSuperConstructor* node) {
- __ Move(kContextRegister, masm->native_context().object());
+ [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) {
__ Push(ToRegister(node->constructor()));
__ Push(ToRegister(node->function()));
+ __ Move(kContextRegister, masm->native_context().object());
__ CallRuntime(Runtime::kThrowNotSuperConstructor, 2);
- masm->DefineLazyDeoptPoint(node->lazy_deopt_info());
+ masm->DefineExceptionHandlerAndLazyDeoptPoint(node);
__ Abort(AbortReason::kUnexpectedReturnFromThrow);
},
this);
@@ -3356,7 +3549,8 @@ void JumpFromInlined::GenerateCode(MaglevAssembler* masm,
namespace {
-void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label,
+void AttemptOnStackReplacement(MaglevAssembler* masm,
+ ZoneLabelRef no_code_for_osr,
JumpLoopPrologue* node, Register scratch0,
Register scratch1, int32_t loop_depth,
FeedbackSlot feedback_slot,
@@ -3370,6 +3564,7 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label,
// See also: InterpreterAssembler::OnStackReplacement.
baseline::BaselineAssembler basm(masm);
+ __ AssertFeedbackVector(scratch0);
// Case 1).
Label deopt;
@@ -3381,11 +3576,10 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label,
// Case 2).
{
- __ AssertFeedbackVector(scratch0);
__ movb(scratch0, FieldOperand(scratch0, FeedbackVector::kOsrStateOffset));
__ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch0);
basm.JumpIfByte(baseline::Condition::kUnsignedLessThanEqual, scratch0,
- loop_depth, return_label, Label::kNear);
+ loop_depth, *no_code_for_osr, Label::kNear);
// The osr_urgency exceeds the current loop_depth, signaling an OSR
// request. Call into runtime to compile.
@@ -3413,23 +3607,29 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label,
}
}
});
+ DCHECK(!snapshot.live_registers.has(maybe_target_code));
SaveRegisterStateForCall save_register_state(masm, snapshot);
__ Move(kContextRegister, masm->native_context().object());
__ Push(Smi::FromInt(osr_offset.ToInt()));
__ CallRuntime(Runtime::kCompileOptimizedOSRFromMaglev, 1);
save_register_state.DefineSafepoint();
- __ Move(scratch0, rax);
+ __ Move(maybe_target_code, kReturnRegister0);
}
// A `0` return value means there is no OSR code available yet. Fall
// through for now, OSR code will be picked up once it exists and is
// cached on the feedback vector.
- __ testq(scratch0, scratch0);
- __ j(equal, return_label, Label::kNear);
+ __ Cmp(maybe_target_code, 0);
+ __ j(equal, *no_code_for_osr, Label::kNear);
}
__ bind(&deopt);
- if (V8_LIKELY(FLAG_turbofan)) {
+ if (V8_LIKELY(v8_flags.turbofan)) {
+ // None of the mutated input registers should be a register input into the
+ // eager deopt info.
+ DCHECK_REGLIST_EMPTY(
+ RegList{scratch0, scratch1} &
+ GetGeneralRegistersUsedAsInputs(node->eager_deopt_info()));
__ EmitEagerDeopt(node, DeoptimizeReason::kPrepareForOnStackReplacement);
} else {
// Fall through. With TF disabled we cannot OSR and thus it doesn't make
@@ -3446,8 +3646,8 @@ void JumpLoopPrologue::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void JumpLoopPrologue::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
- Register scratch0 = temporaries().PopFirst();
- Register scratch1 = temporaries().PopFirst();
+ Register scratch0 = general_temporaries().PopFirst();
+ Register scratch1 = general_temporaries().PopFirst();
const Register osr_state = scratch1;
__ Move(scratch0, unit_->feedback().object());
@@ -3459,8 +3659,11 @@ void JumpLoopPrologue::GenerateCode(MaglevAssembler* masm,
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
FeedbackVector::kMaxOsrUrgency);
__ cmpl(osr_state, Immediate(loop_depth_));
- __ JumpToDeferredIf(above, AttemptOnStackReplacement, this, scratch0,
- scratch1, loop_depth_, feedback_slot_, osr_offset_);
+ ZoneLabelRef no_code_for_osr(masm);
+ __ JumpToDeferredIf(above, AttemptOnStackReplacement, no_code_for_osr, this,
+ scratch0, scratch1, loop_depth_, feedback_slot_,
+ osr_offset_);
+ __ bind(*no_code_for_osr);
}
void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h
index a1766807f9..7616941df1 100644
--- a/deps/v8/src/maglev/maglev-ir.h
+++ b/deps/v8/src/maglev/maglev-ir.h
@@ -15,6 +15,7 @@
#include "src/common/globals.h"
#include "src/common/operation.h"
#include "src/compiler/backend/instruction.h"
+#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/interpreter/bytecode-flags.h"
@@ -117,68 +118,72 @@ class CompactInterpreterFrameState;
V(RootConstant) \
V(SmiConstant)
-#define VALUE_NODE_LIST(V) \
- V(Call) \
- V(CallBuiltin) \
- V(CallRuntime) \
- V(CallWithSpread) \
- V(Construct) \
- V(ConstructWithSpread) \
- V(CreateEmptyArrayLiteral) \
- V(CreateArrayLiteral) \
- V(CreateShallowArrayLiteral) \
- V(CreateObjectLiteral) \
- V(CreateEmptyObjectLiteral) \
- V(CreateShallowObjectLiteral) \
- V(CreateFunctionContext) \
- V(CreateClosure) \
- V(FastCreateClosure) \
- V(CreateRegExpLiteral) \
- V(DeleteProperty) \
- V(ForInPrepare) \
- V(ForInNext) \
- V(GeneratorRestoreRegister) \
- V(GetIterator) \
- V(GetSecondReturnedValue) \
- V(GetTemplateObject) \
- V(InitialValue) \
- V(LoadTaggedField) \
- V(LoadDoubleField) \
- V(LoadTaggedElement) \
- V(LoadDoubleElement) \
- V(LoadGlobal) \
- V(LoadNamedGeneric) \
- V(LoadNamedFromSuperGeneric) \
- V(SetNamedGeneric) \
- V(DefineNamedOwnGeneric) \
- V(StoreInArrayLiteralGeneric) \
- V(StoreGlobal) \
- V(GetKeyedGeneric) \
- V(SetKeyedGeneric) \
- V(DefineKeyedOwnGeneric) \
- V(Phi) \
- V(RegisterInput) \
- V(CheckedSmiTag) \
- V(CheckedSmiUntag) \
- V(CheckedInternalizedString) \
- V(ChangeInt32ToFloat64) \
- V(Float64Box) \
- V(CheckedFloat64Unbox) \
- V(LogicalNot) \
- V(SetPendingMessage) \
- V(ToBooleanLogicalNot) \
- V(TaggedEqual) \
- V(TaggedNotEqual) \
- V(TestInstanceOf) \
- V(TestUndetectable) \
- V(TestTypeOf) \
- V(ToName) \
- V(ToNumberOrNumeric) \
- V(ToObject) \
- V(ToString) \
- CONSTANT_VALUE_NODE_LIST(V) \
- INT32_OPERATIONS_NODE_LIST(V) \
- FLOAT64_OPERATIONS_NODE_LIST(V) \
+#define VALUE_NODE_LIST(V) \
+ V(Call) \
+ V(CallBuiltin) \
+ V(CallRuntime) \
+ V(CallWithSpread) \
+ V(Construct) \
+ V(ConstructWithSpread) \
+ V(CreateEmptyArrayLiteral) \
+ V(CreateArrayLiteral) \
+ V(CreateShallowArrayLiteral) \
+ V(CreateObjectLiteral) \
+ V(CreateEmptyObjectLiteral) \
+ V(CreateShallowObjectLiteral) \
+ V(CreateFunctionContext) \
+ V(CreateClosure) \
+ V(FastCreateClosure) \
+ V(CreateRegExpLiteral) \
+ V(DeleteProperty) \
+ V(ForInPrepare) \
+ V(ForInNext) \
+ V(GeneratorRestoreRegister) \
+ V(GetIterator) \
+ V(GetSecondReturnedValue) \
+ V(GetTemplateObject) \
+ V(InitialValue) \
+ V(LoadTaggedField) \
+ V(LoadDoubleField) \
+ V(LoadTaggedElement) \
+ V(LoadDoubleElement) \
+ V(LoadGlobal) \
+ V(LoadNamedGeneric) \
+ V(LoadNamedFromSuperGeneric) \
+ V(SetNamedGeneric) \
+ V(DefineNamedOwnGeneric) \
+ V(StoreInArrayLiteralGeneric) \
+ V(StoreGlobal) \
+ V(GetKeyedGeneric) \
+ V(SetKeyedGeneric) \
+ V(DefineKeyedOwnGeneric) \
+ V(Phi) \
+ V(RegisterInput) \
+ V(CheckedSmiTag) \
+ V(UnsafeSmiTag) \
+ V(CheckedSmiUntag) \
+ V(CheckedInternalizedString) \
+ V(CheckedObjectToIndex) \
+ V(ChangeInt32ToFloat64) \
+ V(CheckedTruncateFloat64ToInt32) \
+ V(Float64Box) \
+ V(CheckedFloat64Unbox) \
+ V(LogicalNot) \
+ V(SetPendingMessage) \
+ V(StringLength) \
+ V(ToBooleanLogicalNot) \
+ V(TaggedEqual) \
+ V(TaggedNotEqual) \
+ V(TestInstanceOf) \
+ V(TestUndetectable) \
+ V(TestTypeOf) \
+ V(ToName) \
+ V(ToNumberOrNumeric) \
+ V(ToObject) \
+ V(ToString) \
+ CONSTANT_VALUE_NODE_LIST(V) \
+ INT32_OPERATIONS_NODE_LIST(V) \
+ FLOAT64_OPERATIONS_NODE_LIST(V) \
GENERIC_OPERATIONS_NODE_LIST(V)
#define GAP_MOVE_NODE_LIST(V) \
@@ -196,6 +201,7 @@ class CompactInterpreterFrameState;
V(CheckMapsWithMigration) \
V(CheckJSArrayBounds) \
V(CheckJSObjectElementsBounds) \
+ V(DebugBreak) \
V(GeneratorStore) \
V(JumpLoopPrologue) \
V(StoreTaggedFieldNoWriteBarrier) \
@@ -442,11 +448,22 @@ class BasicBlockRef {
return next_ref_ != nullptr;
}
+ int interrupt_budget_correction() const {
+ DCHECK_EQ(state_, kRefList);
+ return interrupt_budget_correction_;
+ }
+
+ void set_interrupt_budget_correction(int interrupt_budget_correction) {
+ DCHECK_EQ(state_, kRefList);
+ interrupt_budget_correction_ = interrupt_budget_correction;
+ }
+
private:
union {
BasicBlock* block_ptr_;
BasicBlockRef* next_ref_;
};
+ int interrupt_budget_correction_ = 0;
#ifdef DEBUG
enum { kBlockPointer, kRefList } state_;
#endif // DEBUG
@@ -616,6 +633,7 @@ class ValueLocation {
}
bool IsAnyRegister() const { return operand_.IsAnyRegister(); }
+ bool IsGeneralRegister() const { return operand_.IsRegister(); }
bool IsDoubleRegister() const { return operand_.IsDoubleRegister(); }
const compiler::InstructionOperand& operand() const { return operand_; }
@@ -761,8 +779,10 @@ class NodeBase : public ZoneObject {
using OpPropertiesField =
OpcodeField::Next<OpProperties, OpProperties::kSize>;
using NumTemporariesNeededField = OpPropertiesField::Next<uint8_t, 2>;
+ using NumDoubleTemporariesNeededField =
+ NumTemporariesNeededField::Next<uint8_t, 1>;
// Align input count to 32-bit.
- using UnusedField = NumTemporariesNeededField::Next<uint8_t, 3>;
+ using UnusedField = NumDoubleTemporariesNeededField::Next<uint8_t, 2>;
using InputCountField = UnusedField::Next<size_t, 17>;
static_assert(InputCountField::kShift == 32);
@@ -874,13 +894,35 @@ class NodeBase : public ZoneObject {
id_ = id;
}
+ template <typename RegisterT>
uint8_t num_temporaries_needed() const {
- return NumTemporariesNeededField::decode(bitfield_);
+ if constexpr (std::is_same_v<RegisterT, Register>) {
+ return NumTemporariesNeededField::decode(bitfield_);
+ } else {
+ return NumDoubleTemporariesNeededField::decode(bitfield_);
+ }
+ }
+
+ template <typename RegisterT>
+ RegListBase<RegisterT>& temporaries() {
+ if constexpr (std::is_same_v<RegisterT, Register>) {
+ return temporaries_;
+ } else {
+ return double_temporaries_;
+ }
}
- RegList& temporaries() { return temporaries_; }
+ RegList& general_temporaries() { return temporaries_; }
+ DoubleRegList& double_temporaries() { return double_temporaries_; }
- void assign_temporaries(RegList list) { temporaries_ = list; }
+ template <typename RegisterT>
+ void assign_temporaries(RegListBase<RegisterT> list) {
+ if constexpr (std::is_same_v<RegisterT, Register>) {
+ temporaries_ = list;
+ } else {
+ double_temporaries_ = list;
+ }
+ }
void Print(std::ostream& os, MaglevGraphLabeller*,
bool skip_targets = false) const;
@@ -949,14 +991,23 @@ class NodeBase : public ZoneObject {
//
// Does not include any registers requested by RequireSpecificTemporary.
void set_temporaries_needed(uint8_t value) {
- DCHECK_EQ(num_temporaries_needed(), 0);
+ DCHECK_EQ(num_temporaries_needed<Register>(), 0);
bitfield_ = NumTemporariesNeededField::update(bitfield_, value);
}
+ void set_double_temporaries_needed(uint8_t value) {
+ DCHECK_EQ(num_temporaries_needed<DoubleRegister>(), 0);
+ bitfield_ = NumDoubleTemporariesNeededField::update(bitfield_, value);
+ }
+
// Require that a specific register is free (and therefore clobberable) by the
// entry into this node.
void RequireSpecificTemporary(Register reg) { temporaries_.set(reg); }
+ void RequireSpecificDoubleTemporary(DoubleRegister reg) {
+ double_temporaries_.set(reg);
+ }
+
private:
template <class Derived, typename... Args>
static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
@@ -1021,6 +1072,7 @@ class NodeBase : public ZoneObject {
uint64_t bitfield_;
NodeIdT id_ = kInvalidNodeId;
RegList temporaries_;
+ DoubleRegList double_temporaries_;
NodeBase() = delete;
NodeBase(const NodeBase&) = delete;
@@ -1147,7 +1199,7 @@ class ValueNode : public Node {
struct LiveRange {
NodeIdT start = kInvalidNodeId;
- NodeIdT end = kInvalidNodeId;
+ NodeIdT end = kInvalidNodeId; // Inclusive.
};
bool has_valid_live_range() const { return end_id_ != 0; }
@@ -1652,6 +1704,20 @@ class CheckedSmiTag : public FixedInputValueNodeT<1, CheckedSmiTag> {
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
};
+// Input must guarantee to fit in a Smi.
+class UnsafeSmiTag : public FixedInputValueNodeT<1, UnsafeSmiTag> {
+ using Base = FixedInputValueNodeT<1, UnsafeSmiTag>;
+
+ public:
+ explicit UnsafeSmiTag(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::ConversionNode();
+
+ Input& input() { return Node::input(0); }
+
+ DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+};
+
class CheckedSmiUntag : public FixedInputValueNodeT<1, CheckedSmiUntag> {
using Base = FixedInputValueNodeT<1, CheckedSmiUntag>;
@@ -1746,6 +1812,22 @@ class ChangeInt32ToFloat64
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
};
+class CheckedTruncateFloat64ToInt32
+ : public FixedInputValueNodeT<1, CheckedTruncateFloat64ToInt32> {
+ using Base = FixedInputValueNodeT<1, CheckedTruncateFloat64ToInt32>;
+
+ public:
+ explicit CheckedTruncateFloat64ToInt32(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
+ OpProperties::Int32() |
+ OpProperties::ConversionNode();
+
+ Input& input() { return Node::input(0); }
+
+ DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+};
+
class CheckedFloat64Unbox
: public FixedInputValueNodeT<1, CheckedFloat64Unbox> {
using Base = FixedInputValueNodeT<1, CheckedFloat64Unbox>;
@@ -1824,7 +1906,8 @@ class TestInstanceOf : public FixedInputValueNodeT<3, TestInstanceOf> {
using Base = FixedInputValueNodeT<3, TestInstanceOf>;
public:
- explicit TestInstanceOf(uint64_t bitfield) : Base(bitfield) {}
+ explicit TestInstanceOf(uint64_t bitfield, compiler::FeedbackSource feedback)
+ : Base(bitfield), feedback_(feedback) {}
// The implementation currently calls runtime.
static constexpr OpProperties kProperties = OpProperties::JSCall();
@@ -1832,8 +1915,12 @@ class TestInstanceOf : public FixedInputValueNodeT<3, TestInstanceOf> {
Input& context() { return input(0); }
Input& object() { return input(1); }
Input& callable() { return input(2); }
+ compiler::FeedbackSource feedback() const { return feedback_; }
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+
+ private:
+ const compiler::FeedbackSource feedback_;
};
class TestUndetectable : public FixedInputValueNodeT<1, TestUndetectable> {
@@ -2197,9 +2284,13 @@ class Constant : public FixedInputValueNodeT<0, Constant> {
DECL_NODE_INTERFACE()
+ compiler::HeapObjectRef object() { return object_; }
+
void DoLoadToRegister(MaglevAssembler*, OutputRegister);
Handle<Object> DoReify(LocalIsolate* isolate);
+ const compiler::HeapObjectRef& ref() const { return object_; }
+
private:
const compiler::HeapObjectRef object_;
};
@@ -2265,7 +2356,8 @@ class CreateArrayLiteral : public FixedInputValueNodeT<0, CreateArrayLiteral> {
int flags() const { return flags_; }
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties =
+ OpProperties::Call() | OpProperties::Throw() | OpProperties::LazyDeopt();
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
@@ -2325,7 +2417,8 @@ class CreateObjectLiteral
int flags() const { return flags_; }
// The implementation currently calls runtime.
- static constexpr OpProperties kProperties = OpProperties::Call();
+ static constexpr OpProperties kProperties =
+ OpProperties::Call() | OpProperties::Throw() | OpProperties::LazyDeopt();
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
@@ -2700,6 +2793,15 @@ class CheckJSObjectElementsBounds
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
};
+class DebugBreak : public FixedInputNodeT<0, DebugBreak> {
+ using Base = FixedInputNodeT<0, DebugBreak>;
+
+ public:
+ explicit DebugBreak(uint64_t bitfield) : Base(bitfield) {}
+
+ DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+};
+
class CheckedInternalizedString
: public FixedInputValueNodeT<1, CheckedInternalizedString> {
using Base = FixedInputValueNodeT<1, CheckedInternalizedString>;
@@ -2711,9 +2813,8 @@ class CheckedInternalizedString
CHECK_EQ(properties().value_representation(), ValueRepresentation::kTagged);
}
- static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
- OpProperties::TaggedValue() |
- OpProperties::ConversionNode();
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::TaggedValue();
static constexpr int kObjectIndex = 0;
Input& object_input() { return Node::input(kObjectIndex); }
@@ -2724,6 +2825,23 @@ class CheckedInternalizedString
const CheckType check_type_;
};
+class CheckedObjectToIndex
+ : public FixedInputValueNodeT<1, CheckedObjectToIndex> {
+ using Base = FixedInputValueNodeT<1, CheckedObjectToIndex>;
+
+ public:
+ explicit CheckedObjectToIndex(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::EagerDeopt() | OpProperties::Int32() |
+ OpProperties::DeferredCall() | OpProperties::ConversionNode();
+
+ static constexpr int kObjectIndex = 0;
+ Input& object_input() { return Node::input(kObjectIndex); }
+
+ DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+};
+
class GetTemplateObject : public FixedInputValueNodeT<1, GetTemplateObject> {
using Base = FixedInputValueNodeT<1, GetTemplateObject>;
@@ -3012,6 +3130,21 @@ class SetNamedGeneric : public FixedInputValueNodeT<3, SetNamedGeneric> {
const compiler::FeedbackSource feedback_;
};
+class StringLength : public FixedInputValueNodeT<1, StringLength> {
+ using Base = FixedInputValueNodeT<1, StringLength>;
+
+ public:
+ explicit StringLength(uint64_t bitfield) : Base(bitfield) {}
+
+ static constexpr OpProperties kProperties =
+ OpProperties::Reading() | OpProperties::Int32();
+
+ static constexpr int kObjectIndex = 0;
+ Input& object_input() { return input(kObjectIndex); }
+
+ DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+};
+
class DefineNamedOwnGeneric
: public FixedInputValueNodeT<3, DefineNamedOwnGeneric> {
using Base = FixedInputValueNodeT<3, DefineNamedOwnGeneric>;
@@ -3210,6 +3343,8 @@ class Phi : public ValueNodeT<Phi> {
using Node::reduce_input_count;
using Node::set_input;
+ bool is_exception_phi() const { return input_count() == 0; }
+
DECL_NODE_INTERFACE()
void AllocateVregInPostProcess(MaglevVregAllocationState*);
@@ -3279,9 +3414,9 @@ class Construct : public ValueNodeT<Construct> {
// This ctor is used when for variable input counts.
// Inputs must be initialized manually.
- Construct(uint64_t bitfield, ValueNode* function, ValueNode* new_target,
- ValueNode* context)
- : Base(bitfield) {
+ Construct(uint64_t bitfield, const compiler::FeedbackSource& feedback,
+ ValueNode* function, ValueNode* new_target, ValueNode* context)
+ : Base(bitfield), feedback_(feedback) {
set_input(kFunctionIndex, function);
set_input(kNewTargetIndex, new_target);
set_input(kContextIndex, context);
@@ -3300,8 +3435,12 @@ class Construct : public ValueNodeT<Construct> {
void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node);
}
+ compiler::FeedbackSource feedback() const { return feedback_; }
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
+
+ private:
+ const compiler::FeedbackSource feedback_;
};
class CallBuiltin : public ValueNodeT<CallBuiltin> {
@@ -3377,6 +3516,10 @@ class CallBuiltin : public ValueNodeT<CallBuiltin> {
void set_arg(int i, ValueNode* node) { set_input(i, node); }
+ int ReturnCount() const {
+ return Builtins::CallInterfaceDescriptorFor(builtin_).GetReturnCount();
+ }
+
DECL_NODE_INTERFACE()
private:
@@ -3556,7 +3699,7 @@ class ThrowReferenceErrorIfHole
: Base(bitfield), name_(name) {}
static constexpr OpProperties kProperties =
- OpProperties::LazyDeopt() | OpProperties::DeferredCall();
+ OpProperties::Throw() | OpProperties::DeferredCall();
const compiler::NameRef& name() const { return name_; }
@@ -3576,7 +3719,7 @@ class ThrowSuperNotCalledIfHole
explicit ThrowSuperNotCalledIfHole(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
- OpProperties::LazyDeopt() | OpProperties::DeferredCall();
+ OpProperties::Throw() | OpProperties::DeferredCall();
Input& value() { return Node::input(0); }
@@ -3592,7 +3735,7 @@ class ThrowSuperAlreadyCalledIfNotHole
: Base(bitfield) {}
static constexpr OpProperties kProperties =
- OpProperties::LazyDeopt() | OpProperties::DeferredCall();
+ OpProperties::Throw() | OpProperties::DeferredCall();
Input& value() { return Node::input(0); }
@@ -3607,7 +3750,7 @@ class ThrowIfNotSuperConstructor
explicit ThrowIfNotSuperConstructor(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
- OpProperties::LazyDeopt() | OpProperties::DeferredCall();
+ OpProperties::Throw() | OpProperties::DeferredCall();
Input& constructor() { return Node::input(0); }
Input& function() { return Node::input(1); }
@@ -3701,6 +3844,12 @@ class BranchControlNode : public ConditionalControlNode {
BasicBlock* if_true() const { return if_true_.block_ptr(); }
BasicBlock* if_false() const { return if_false_.block_ptr(); }
+ void set_true_interrupt_correction(int interrupt_budget_correction) {
+ if_true_.set_interrupt_budget_correction(interrupt_budget_correction);
+ }
+ void set_false_interrupt_correction(int interrupt_budget_correction) {
+ if_false_.set_interrupt_budget_correction(interrupt_budget_correction);
+ }
private:
BasicBlockRef if_true_;
diff --git a/deps/v8/src/maglev/maglev-regalloc.cc b/deps/v8/src/maglev/maglev-regalloc.cc
index 9cc1d02636..b6189dc6ce 100644
--- a/deps/v8/src/maglev/maglev-regalloc.cc
+++ b/deps/v8/src/maglev/maglev-regalloc.cc
@@ -283,7 +283,7 @@ void StraightForwardRegisterAllocator::PrintLiveRegs() const {
}
void StraightForwardRegisterAllocator::AllocateRegisters() {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_.reset(new MaglevPrintingVisitor(
compilation_info_->graph_labeller(), std::cout));
printing_visitor_->PreProcessGraph(graph_);
@@ -319,6 +319,14 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
if (block->state()->is_exception_handler()) {
// Exceptions start from a blank state of register values.
ClearRegisterValues();
+ } else if (block->state()->is_resumable_loop() &&
+ block->state()->predecessor_count() <= 1) {
+ // Loops that are only reachable through JumpLoop start from a blank
+ // state of register values.
+ // This should actually only support predecessor_count == 1, but we
+ // currently don't eliminate resumable loop headers (and subsequent code
+ // until the next resume) that end up being unreachable from JumpLoop.
+ ClearRegisterValues();
} else {
InitializeRegisterValues(block->state()->register_state());
}
@@ -326,7 +334,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
InitializeRegisterValues(block->empty_block_register_state());
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->PreProcessBasicBlock(block);
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
@@ -391,7 +399,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
if (phi->owner() == interpreter::Register::virtual_accumulator() &&
!phi->is_dead()) {
phi->result().SetAllocated(ForceAllocate(kReturnRegister0, phi));
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(phi, ProcessingState(block_it_));
printing_visitor_->os() << "phi (exception message object) "
<< phi->result().operand() << std::endl;
@@ -411,7 +419,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
compiler::AllocatedOperand allocation =
general_registers_.AllocateRegister(phi);
phi->result().SetAllocated(allocation);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(phi, ProcessingState(block_it_));
printing_visitor_->os()
<< "phi (new reg) " << phi->result().operand() << std::endl;
@@ -428,14 +436,14 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
AllocateSpillSlot(phi);
// TODO(verwaest): Will this be used at all?
phi->result().SetAllocated(phi->spill_slot());
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(phi, ProcessingState(block_it_));
printing_visitor_->os()
<< "phi (stack) " << phi->result().operand() << std::endl;
}
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << std::endl;
@@ -470,7 +478,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
if (!node->is_dead()) return;
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " freeing " << PrintNodeLabel(graph_labeller(), node) << "\n";
}
@@ -498,7 +506,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
detail::DeepForEachInput(
&deopt_info,
[&](ValueNode* node, interpreter::Register reg, InputLocation* input) {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- using " << PrintNodeLabel(graph_labeller(), node) << "\n";
}
@@ -513,26 +521,17 @@ void StraightForwardRegisterAllocator::UpdateUse(
void StraightForwardRegisterAllocator::UpdateUse(
const LazyDeoptInfo& deopt_info) {
- const CompactInterpreterFrameState* checkpoint_state =
- deopt_info.state.register_frame;
- int index = 0;
- // TODO(leszeks): This is missing parent recursion, fix it.
- // See also: UpdateUse(EagerDeoptInfo&).
- checkpoint_state->ForEachValue(
- deopt_info.unit, [&](ValueNode* node, interpreter::Register reg) {
- // Skip over the result location since it is irrelevant for lazy deopts
- // (unoptimized code will recreate the result).
- if (deopt_info.IsResultRegister(reg)) return;
- if (FLAG_trace_maglev_regalloc) {
+ detail::DeepForEachInput(
+ &deopt_info,
+ [&](ValueNode* node, interpreter::Register reg, InputLocation* input) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- using " << PrintNodeLabel(graph_labeller(), node) << "\n";
}
- InputLocation* input = &deopt_info.input_locations[index++];
- // We might have dropped this node without spilling it. Spill it now.
- if (!node->has_register() && !node->is_loadable()) {
- Spill(node);
- }
- input->InjectLocation(node->allocation());
+ // Lazy deopts always need spilling, and should always be loaded from
+ // their loadable slot.
+ Spill(node);
+ input->InjectLocation(node->loadable_slot());
UpdateUse(node, input);
});
}
@@ -555,7 +554,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
DCHECK(!node->Is<ConstantGapMove>());
current_node_ = node;
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "Allocating " << PrintNodeLabel(graph_labeller(), node)
<< " inputs...\n";
@@ -567,26 +566,26 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
// Allocate node output.
if (node->Is<ValueNode>()) {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << "Allocating result...\n";
}
AllocateNodeResult(node->Cast<ValueNode>());
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << "Updating uses...\n";
}
// Update uses only after allocating the node result. This order is necessary
// to avoid emitting input-clobbering gap moves during node result allocation.
if (node->properties().can_eager_deopt()) {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << "Using eager deopt nodes...\n";
}
UpdateUse(*node->eager_deopt_info());
}
for (Input& input : *node) {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "Using input " << PrintNodeLabel(graph_labeller(), input.node())
<< "...\n";
@@ -596,7 +595,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
// Lazy deopts are semantically after the node, so update them last.
if (node->properties().can_lazy_deopt()) {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << "Using lazy deopt nodes...\n";
}
UpdateUse(*node->lazy_deopt_info());
@@ -604,7 +603,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
if (node->properties().needs_register_snapshot()) SaveRegisterSnapshot(node);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
@@ -615,8 +614,10 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
// result, which could be written into a register that was previously
// considered a temporary.
DCHECK_EQ(general_registers_.free() |
- (node->temporaries() - GetNodeResultRegister(node)),
+ (node->general_temporaries() - GetNodeResultRegister(node)),
general_registers_.free());
+ DCHECK_EQ(double_registers_.free() | node->double_temporaries(),
+ double_registers_.free());
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
@@ -628,7 +629,8 @@ void StraightForwardRegisterAllocator::DropRegisterValueAtEnd(RegisterT reg) {
list.unblock(reg);
if (!list.free().has(reg)) {
ValueNode* node = list.GetValue(reg);
- // If the is not live after the current node, just remove its value.
+ // If the register is not live after the current node, just remove its
+ // value.
if (node->live_range().end == current_node_->id()) {
node->RemoveRegister(reg);
} else {
@@ -716,7 +718,7 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
ValueNode* node = registers.GetValue(reg);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " dropping " << reg << " value "
<< PrintNodeLabel(graph_labeller(), node) << "\n";
}
@@ -798,71 +800,6 @@ void StraightForwardRegisterAllocator::InitializeConditionalBranchTarget(
target);
}
-#ifdef DEBUG
-namespace {
-
-bool IsReachable(BasicBlock* source_block, BasicBlock* target_block,
- std::set<BasicBlock*>& visited) {
- if (source_block == target_block) return true;
- if (!visited.insert(source_block).second) return false;
-
- ControlNode* control_node = source_block->control_node();
- if (UnconditionalControlNode* unconditional =
- control_node->TryCast<UnconditionalControlNode>()) {
- return IsReachable(unconditional->target(), target_block, visited);
- }
- if (BranchControlNode* branch = control_node->TryCast<BranchControlNode>()) {
- return IsReachable(branch->if_true(), target_block, visited) ||
- IsReachable(branch->if_true(), target_block, visited);
- }
- if (Switch* switch_node = control_node->TryCast<Switch>()) {
- const BasicBlockRef* targets = switch_node->targets();
- for (int i = 0; i < switch_node->size(); i++) {
- if (IsReachable(source_block, targets[i].block_ptr(), visited)) {
- return true;
- }
- }
- if (switch_node->has_fallthrough()) {
- if (IsReachable(source_block, switch_node->fallthrough(), visited)) {
- return true;
- }
- }
- return false;
- }
- return false;
-}
-
-// Complex predicate for a JumpLoop lifetime extension DCHECK, see comments
-// in AllocateControlNode.
-bool IsValueFromGeneratorResumeThatDoesNotReachJumpLoop(
- Graph* graph, ValueNode* input_node, BasicBlock* jump_loop_block) {
- // The given node _must_ be created in the generator resume block. This is
- // always the third block -- the first is inital values, the second is the
- // test for an undefined generator, and the third is the generator resume
- // machinery.
- DCHECK_GE(graph->num_blocks(), 3);
- BasicBlock* generator_block = *(graph->begin() + 2);
- DCHECK_EQ(generator_block->control_node()->opcode(), Opcode::kSwitch);
-
- bool found_node = false;
- for (Node* node : generator_block->nodes()) {
- if (node == input_node) {
- found_node = true;
- break;
- }
- }
- DCHECK(found_node);
-
- std::set<BasicBlock*> visited;
- bool jump_loop_block_is_reachable_from_generator_block =
- IsReachable(generator_block, jump_loop_block, visited);
- DCHECK(!jump_loop_block_is_reachable_from_generator_block);
-
- return true;
-}
-} // namespace
-#endif
-
void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) {
current_node_ = node;
@@ -872,30 +809,36 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
if (node->Is<JumpToInlined>() || node->Is<Abort>()) {
// Do nothing.
- DCHECK(node->temporaries().is_empty());
- DCHECK_EQ(node->num_temporaries_needed(), 0);
+ DCHECK(node->general_temporaries().is_empty());
+ DCHECK(node->double_temporaries().is_empty());
+ DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
+ DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties(0));
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
}
} else if (node->Is<Deopt>()) {
- // No fixed temporaries.
- DCHECK(node->temporaries().is_empty());
- DCHECK_EQ(node->num_temporaries_needed(), 0);
+ // No temporaries.
+ DCHECK(node->general_temporaries().is_empty());
+ DCHECK(node->double_temporaries().is_empty());
+ DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
+ DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK_EQ(node->properties(), OpProperties::EagerDeopt());
UpdateUse(*node->eager_deopt_info());
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
}
} else if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
- // No fixed temporaries.
- DCHECK(node->temporaries().is_empty());
- DCHECK_EQ(node->num_temporaries_needed(), 0);
+ // No temporaries.
+ DCHECK(node->general_temporaries().is_empty());
+ DCHECK(node->double_temporaries().is_empty());
+ DCHECK_EQ(node->num_temporaries_needed<Register>(), 0);
+ DCHECK_EQ(node->num_temporaries_needed<DoubleRegister>(), 0);
DCHECK_EQ(node->input_count(), 0);
DCHECK(!node->properties().can_eager_deopt());
DCHECK(!node->properties().can_lazy_deopt());
@@ -915,19 +858,17 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
// extended lifetime nodes are dead.
if (auto jump_loop = node->TryCast<JumpLoop>()) {
for (Input& input : jump_loop->used_nodes()) {
- // Since the value is used by the loop, it must be live somewhere (
- // either in a register or loadable). The exception is when this value
- // is created in a generator resume, and the use of it cannot reach the
- // JumpLoop (e.g. because it returns or deopts on resume).
- DCHECK_IMPLIES(
- !input.node()->has_register() && !input.node()->is_loadable(),
- IsValueFromGeneratorResumeThatDoesNotReachJumpLoop(
- graph_, input.node(), block));
+ if (!input.node()->has_register() && !input.node()->is_loadable()) {
+ // If the value isn't loadable by the end of a loop (this can happen
+ // e.g. when a deferred throw doesn't spill it, and an exception
+ // handler drops the value)
+ Spill(input.node());
+ }
UpdateUse(&input);
}
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
}
} else {
@@ -943,14 +884,16 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
DCHECK(!node->properties().needs_register_snapshot());
- DCHECK_EQ(general_registers_.free() | node->temporaries(),
+ DCHECK_EQ(general_registers_.free() | node->general_temporaries(),
general_registers_.free());
+ DCHECK_EQ(double_registers_.free() | node->double_temporaries(),
+ double_registers_.free());
general_registers_.clear_blocked();
double_registers_.clear_blocked();
VerifyRegisterState();
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(node, ProcessingState(block_it_));
}
@@ -984,7 +927,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
if (general_registers_.unblocked_free().has(reg)) {
phi->result().SetAllocated(ForceAllocate(reg, phi));
DCHECK_EQ(general_registers_.GetValue(reg), phi);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->Process(phi, ProcessingState(block_it_));
printing_visitor_->os()
<< "phi (reuse) " << input.operand() << std::endl;
@@ -1001,7 +944,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
Node* gap_move;
if (source.IsConstant()) {
DCHECK(IsConstantNode(node->opcode()));
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " constant gap move: " << target << " ← "
<< PrintNodeLabel(graph_labeller(), node) << std::endl;
@@ -1009,7 +952,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
gap_move =
Node::New<ConstantGapMove>(compilation_info_->zone(), {}, node, target);
} else {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " gap move: " << target << " ← "
<< PrintNodeLabel(graph_labeller(), node) << ":"
<< source << std::endl;
@@ -1037,7 +980,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
if (node->is_loadable()) return;
AllocateSpillSlot(node);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " spill: " << node->spill_slot() << " ← "
<< PrintNodeLabel(graph_labeller(), node) << std::endl;
@@ -1053,7 +996,7 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
switch (operand.extended_policy()) {
case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
// Allocated in AssignArbitraryRegisterInput.
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
<< " has arbitrary register\n";
@@ -1062,7 +1005,7 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
// Allocated in AssignAnyInput.
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
<< " has arbitrary location\n";
@@ -1088,7 +1031,7 @@ void StraightForwardRegisterAllocator::AssignFixedInput(Input& input) {
case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
UNREACHABLE();
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
<< " in forced " << input.operand() << "\n";
@@ -1120,7 +1063,7 @@ void StraightForwardRegisterAllocator::AssignArbitraryRegisterInput(
ValueNode* node = input.node();
compiler::InstructionOperand location = node->allocation();
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- " << PrintNodeLabel(graph_labeller(), input.node()) << " in "
<< location << "\n";
@@ -1152,7 +1095,7 @@ void StraightForwardRegisterAllocator::AssignAnyInput(Input& input) {
compiler::InstructionOperand location = node->allocation();
input.InjectLocation(location);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< "- " << PrintNodeLabel(graph_labeller(), input.node())
<< " in original " << location << "\n";
@@ -1291,7 +1234,7 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters(
while (registers.used() != registers.empty()) {
RegisterT reg = registers.used().first();
ValueNode* node = registers.GetValue(reg);
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " clearing registers with "
<< PrintNodeLabel(graph_labeller(), node) << "\n";
}
@@ -1328,7 +1271,7 @@ void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
// architectures.
SpillSlots& slots = is_tagged ? tagged_ : untagged_;
MachineRepresentation representation = node->GetMachineRepresentation();
- if (!FLAG_maglev_reuse_stack_slots || slots.free_slots.empty()) {
+ if (!v8_flags.maglev_reuse_stack_slots || slots.free_slots.empty()) {
free_slot = slots.top++;
} else {
NodeIdT start = node->live_range().start;
@@ -1352,7 +1295,7 @@ template <typename RegisterT>
RegisterT StraightForwardRegisterAllocator::PickRegisterToFree(
RegListBase<RegisterT> reserved) {
RegisterFrameState<RegisterT>& registers = GetRegisterFrameState<RegisterT>();
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " need to free a register... ";
}
int furthest_use = 0;
@@ -1373,7 +1316,7 @@ RegisterT StraightForwardRegisterAllocator::PickRegisterToFree(
best = reg;
}
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " chose " << best << " with next use " << furthest_use << "\n";
}
@@ -1448,7 +1391,7 @@ template <typename RegisterT>
compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
RegisterFrameState<RegisterT>& registers, RegisterT reg, ValueNode* node) {
DCHECK(!registers.is_blocked(reg));
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " forcing " << reg << " to "
<< PrintNodeLabel(graph_labeller(), node) << "...\n";
@@ -1533,57 +1476,81 @@ compiler::AllocatedOperand RegisterFrameState<RegisterT>::AllocateRegister(
reg.code());
}
-void StraightForwardRegisterAllocator::AssignFixedTemporaries(NodeBase* node) {
- // TODO(victorgomes): Support double registers as temporaries.
- RegList fixed_temporaries = node->temporaries();
+template <typename RegisterT>
+void StraightForwardRegisterAllocator::AssignFixedTemporaries(
+ RegisterFrameState<RegisterT>& registers, NodeBase* node) {
+ RegListBase<RegisterT> fixed_temporaries = node->temporaries<RegisterT>();
// Make sure that any initially set temporaries are definitely free.
- for (Register reg : fixed_temporaries) {
- DCHECK(!general_registers_.is_blocked(reg));
- if (!general_registers_.free().has(reg)) {
- DropRegisterValue(general_registers_, reg);
- general_registers_.AddToFree(reg);
+ for (RegisterT reg : fixed_temporaries) {
+ DCHECK(!registers.is_blocked(reg));
+ if (!registers.free().has(reg)) {
+ DropRegisterValue(registers, reg);
+ registers.AddToFree(reg);
}
- general_registers_.block(reg);
+ registers.block(reg);
}
- if (FLAG_trace_maglev_regalloc) {
- printing_visitor_->os()
- << "Fixed temporaries: " << fixed_temporaries << "\n";
+ if (v8_flags.trace_maglev_regalloc && !fixed_temporaries.is_empty()) {
+ if constexpr (std::is_same_v<RegisterT, Register>) {
+ printing_visitor_->os()
+ << "Fixed Temporaries: " << fixed_temporaries << "\n";
+ } else {
+ printing_visitor_->os()
+ << "Fixed Double Temporaries: " << fixed_temporaries << "\n";
+ }
}
}
+void StraightForwardRegisterAllocator::AssignFixedTemporaries(NodeBase* node) {
+ AssignFixedTemporaries(general_registers_, node);
+ AssignFixedTemporaries(double_registers_, node);
+}
+
+template <typename RegisterT>
void StraightForwardRegisterAllocator::AssignArbitraryTemporaries(
- NodeBase* node) {
- int num_temporaries_needed = node->num_temporaries_needed();
+ RegisterFrameState<RegisterT>& registers, NodeBase* node) {
+ int num_temporaries_needed = node->num_temporaries_needed<RegisterT>();
if (num_temporaries_needed == 0) return;
- RegList temporaries = node->temporaries();
+ DCHECK_GT(num_temporaries_needed, 0);
+ RegListBase<RegisterT> temporaries = node->temporaries<RegisterT>();
+ int remaining_temporaries_needed = num_temporaries_needed;
- // TODO(victorgomes): Support double registers as temporaries.
- for (Register reg : general_registers_.unblocked_free()) {
- general_registers_.block(reg);
+ for (RegisterT reg : registers.unblocked_free()) {
+ registers.block(reg);
DCHECK(!temporaries.has(reg));
temporaries.set(reg);
- if (--num_temporaries_needed == 0) break;
+ if (--remaining_temporaries_needed == 0) break;
}
// Free extra registers if necessary.
- for (int i = 0; i < num_temporaries_needed; ++i) {
- DCHECK(general_registers_.UnblockedFreeIsEmpty());
- Register reg = FreeUnblockedRegister<Register>();
- general_registers_.block(reg);
+ for (int i = 0; i < remaining_temporaries_needed; ++i) {
+ DCHECK(registers.UnblockedFreeIsEmpty());
+ RegisterT reg = FreeUnblockedRegister<RegisterT>();
+ registers.block(reg);
DCHECK(!temporaries.has(reg));
temporaries.set(reg);
}
- DCHECK_GE(temporaries.Count(), node->num_temporaries_needed());
+ DCHECK_GE(temporaries.Count(), num_temporaries_needed);
+
node->assign_temporaries(temporaries);
- if (FLAG_trace_maglev_regalloc) {
- printing_visitor_->os() << "Temporaries: " << temporaries << "\n";
+ if (v8_flags.trace_maglev_regalloc) {
+ if constexpr (std::is_same_v<RegisterT, Register>) {
+ printing_visitor_->os() << "Temporaries: " << temporaries << "\n";
+ } else {
+ printing_visitor_->os() << "Double Temporaries: " << temporaries << "\n";
+ }
}
}
+void StraightForwardRegisterAllocator::AssignArbitraryTemporaries(
+ NodeBase* node) {
+ AssignArbitraryTemporaries(general_registers_, node);
+ AssignArbitraryTemporaries(double_registers_, node);
+}
+
namespace {
template <typename RegisterT>
void ClearRegisterState(RegisterFrameState<RegisterT>& registers) {
@@ -1711,7 +1678,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
return InitializeBranchTargetRegisterValues(control, target);
}
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << "Merging registers...\n";
}
@@ -1735,7 +1702,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
if (!registers.free().has(reg)) {
incoming = registers.GetValue(reg);
if (!IsLiveAtTarget(incoming, control, target)) {
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " " << reg << " - incoming node "
<< PrintNodeLabel(graph_labeller(), incoming)
<< " dead at target\n";
@@ -1747,7 +1714,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
if (incoming == node) {
// We're using the same register as the target already has. If registers
// are merged, add input information.
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
if (node) {
printing_visitor_->os()
<< " " << reg << " - incoming node same as node: "
@@ -1762,7 +1729,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
// The register is already occupied with a different node. Figure out
// where that node is allocated on the incoming branch.
merge->operand(predecessor_id) = node->allocation();
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " " << reg << " - merge: loading "
<< PrintNodeLabel(graph_labeller(), node)
<< " from " << node->allocation() << " \n";
@@ -1787,7 +1754,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
// containing conversion nodes.
// DCHECK_IMPLIES(!IsInRegister(target_state, incoming),
// incoming->properties().is_conversion());
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os()
<< " " << reg << " - can't load incoming "
<< PrintNodeLabel(graph_labeller(), node) << ", bailing out\n";
@@ -1802,7 +1769,7 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
// over the liveness of the node they are converting.
// TODO(v8:7700): Overeager DCHECK.
// DCHECK(node->properties().is_conversion());
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " " << reg << " - can't load "
<< PrintNodeLabel(graph_labeller(), node)
<< ", dropping the merge\n";
@@ -1834,14 +1801,14 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
// state.
if (node == nullptr) {
merge->operand(predecessor_id) = register_info;
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " " << reg << " - new merge: loading new "
<< PrintNodeLabel(graph_labeller(), incoming)
<< " from " << register_info << " \n";
}
} else {
merge->operand(predecessor_id) = node->allocation();
- if (FLAG_trace_maglev_regalloc) {
+ if (v8_flags.trace_maglev_regalloc) {
printing_visitor_->os() << " " << reg << " - new merge: loading "
<< PrintNodeLabel(graph_labeller(), node)
<< " from " << node->allocation() << " \n";
diff --git a/deps/v8/src/maglev/maglev-regalloc.h b/deps/v8/src/maglev/maglev-regalloc.h
index 081383dd82..4d6d4cf521 100644
--- a/deps/v8/src/maglev/maglev-regalloc.h
+++ b/deps/v8/src/maglev/maglev-regalloc.h
@@ -142,7 +142,13 @@ class StraightForwardRegisterAllocator {
void AssignArbitraryRegisterInput(Input& input);
void AssignAnyInput(Input& input);
void AssignInputs(NodeBase* node);
+ template <typename RegisterT>
+ void AssignFixedTemporaries(RegisterFrameState<RegisterT>& registers,
+ NodeBase* node);
void AssignFixedTemporaries(NodeBase* node);
+ template <typename RegisterT>
+ void AssignArbitraryTemporaries(RegisterFrameState<RegisterT>& registers,
+ NodeBase* node);
void AssignArbitraryTemporaries(NodeBase* node);
void TryAllocateToInput(Phi* phi);
diff --git a/deps/v8/src/maglev/maglev.cc b/deps/v8/src/maglev/maglev.cc
index f4e2275945..024175c840 100644
--- a/deps/v8/src/maglev/maglev.cc
+++ b/deps/v8/src/maglev/maglev.cc
@@ -13,12 +13,12 @@ namespace internal {
MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
Handle<JSFunction> function) {
- DCHECK(FLAG_maglev);
+ DCHECK(v8_flags.maglev);
std::unique_ptr<maglev::MaglevCompilationInfo> info =
maglev::MaglevCompilationInfo::New(isolate, function);
maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(),
info.get());
- return maglev::MaglevCompiler::GenerateCode(info.get());
+ return maglev::MaglevCompiler::GenerateCode(isolate, info.get());
}
} // namespace internal
diff --git a/deps/v8/src/maglev/maglev.h b/deps/v8/src/maglev/maglev.h
index e55df23b15..7207fdec5e 100644
--- a/deps/v8/src/maglev/maglev.h
+++ b/deps/v8/src/maglev/maglev.h
@@ -17,6 +17,8 @@ class JSFunction;
class Maglev : public AllStatic {
public:
+ // TODO(v8:7700): This entry point is only used for testing. Consider
+ // removing it once BenchMaglev runtime functions are no longer useful.
static MaybeHandle<CodeT> Compile(Isolate* isolate,
Handle<JSFunction> function);
};