summaryrefslogtreecommitdiff
path: root/chromium/v8/src/baseline/riscv64
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/baseline/riscv64')
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h615
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h112
2 files changed, 727 insertions, 0 deletions
diff --git a/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
new file mode 100644
index 00000000000..e0667d3472b
--- /dev/null
+++ b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -0,0 +1,615 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
+#define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/interface-descriptors.h"
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+constexpr Register kTestReg = t0;
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(t2, t4);
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+enum class Condition : uint32_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.is_reg() && op.rm() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // Nop
+}
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ jmp(target);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), kTestReg, Operand((int64_t)0));
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ if (masm()->options().short_builtin_calls) {
+ __ CallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ Register temp = t6;
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Call(temp);
+ __ RecordComment("]");
+ }
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative jump.
+ __ TailCallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ // t6 be used for function call in RISCV64
+ // For example 'jalr t6' or 'jal t6'
+ Register temp = t6;
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Jump(temp);
+ __ RecordComment("]");
+ }
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ __ And(kTestReg, value, Operand(mask));
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ GetObjectType(object, map, type);
+ __ Sub64(kTestReg, type, Operand(instance_type));
+}
+void BaselineAssembler::CmpInstanceType(Register value,
+ InstanceType instance_type) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ Ld(type, FieldMemOperand(value, Map::kInstanceTypeOffset));
+ __ Sub64(kTestReg, type, Operand(instance_type));
+}
+
+void BaselineAssembler::Cmp(Register value, Smi smi) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ li(temp, Operand(smi));
+ __ SmiUntag(temp);
+ __ Sub64(kTestReg, value, temp);
+}
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ Ld(temp, operand);
+ __ Sub64(kTestReg, value, temp);
+}
+
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, lhs, rhs);
+ } else {
+ __ Sub64(kTestReg, lhs, rhs);
+ }
+}
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ld(tmp, operand);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, value, tmp);
+ } else {
+ __ Sub64(kTestReg, value, tmp);
+ }
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ld(tmp, operand);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, tmp, value);
+ } else {
+ __ Sub64(kTestReg, tmp, value);
+ }
+}
+
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ Sub64(kTestReg, value, Operand(byte));
+}
+
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ li(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ Sd(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ li(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct CountPushHelper;
+template <>
+struct CountPushHelper<> {
+ static int Count() { return 0; }
+};
+template <typename Arg, typename... Args>
+struct CountPushHelper<Arg, Args...> {
+ static int Count(Arg arg, Args... args) {
+ return 1 + CountPushHelper<Args...>::Count(args...);
+ }
+};
+template <typename... Args>
+struct CountPushHelper<interpreter::RegisterList, Args...> {
+ static int Count(interpreter::RegisterList list, Args... args) {
+ return list.register_count() + CountPushHelper<Args...>::Count(args...);
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+template <typename... Args>
+void PushAll(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::Push(basm, args...);
+}
+template <typename... Args>
+void PushAllReverse(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::PushReverse(basm, args...);
+}
+
+template <>
+struct PushAllHelper<> {
+ static void Push(BaselineAssembler* basm) {}
+ static void PushReverse(BaselineAssembler* basm) {}
+};
+
+inline void PushSingle(MacroAssembler* masm, RootIndex source) {
+ masm->PushRoot(source);
+}
+inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
+
+inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
+inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
+ masm->Push(object);
+}
+inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
+ masm->li(kScratchReg, (int64_t)(immediate));
+ PushSingle(masm, kScratchReg);
+}
+
+inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
+ masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
+ PushSingle(masm, kScratchReg);
+}
+inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
+ masm->Ld(kScratchReg, operand);
+ PushSingle(masm, kScratchReg);
+}
+inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
+ return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
+}
+
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static void Push(BaselineAssembler* basm, Arg arg) {
+ PushSingle(basm->masm(), arg);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg) {
+ // Push the padding register to round up the amount of values pushed.
+ return Push(basm, arg);
+ }
+};
+template <typename Arg1, typename Arg2, typename... Args>
+struct PushAllHelper<Arg1, Arg2, Args...> {
+ static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg1),
+ ToRegister(basm, &scope, arg2));
+ }
+ PushAll(basm, args...);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ PushAllReverse(basm, args...);
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg2),
+ ToRegister(basm, &scope, arg1));
+ }
+ }
+};
+// Currently RegisterLists are always be the last argument, so we don't
+// specialize for the case where they're not. We do still specialise for the
+// aligned and unaligned cases.
+template <typename Arg>
+struct PushAllHelper<Arg, interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 1);
+ PushAll(basm, arg, list[0], list.PopLeft());
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ if (list.register_count() == 0) {
+ PushAllReverse(basm, arg);
+ } else {
+ PushAllReverse(basm, arg, list[0], list.PopLeft());
+ }
+ }
+};
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 0);
+ for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
+ PushAll(basm, list[reg_index], list[reg_index + 1]);
+ }
+ }
+ static void PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ int reg_index = list.register_count() - 1;
+ if (reg_index % 2 == 0) {
+ // Push the padding register to round up the amount of values pushed.
+ PushAllReverse(basm, list[reg_index]);
+ reg_index--;
+ }
+ for (; reg_index >= 1; reg_index -= 2) {
+ PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ }
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
+ T... tail) {
+ basm->masm()->Pop(reg1, reg2);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ // We have to count the pushes first, to decide whether to add padding before
+ // the first push.
+ int push_count = detail::CountPushHelper<T...>::Count(vals...);
+ if (push_count % 2 == 0) {
+ detail::PushAll(this, vals...);
+ } else {
+ detail::PushAll(this, vals...);
+ }
+ return push_count;
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ // __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ li(tmp, Operand(value));
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(tmp, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(value, FieldMemOperand(target, offset));
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ RecordWriteField(target, offset, value, tmp, kRAHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(value, FieldMemOperand(target, offset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Add64(interrupt_budget, interrupt_budget, weight);
+ __ Sd(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Add64(interrupt_budget, interrupt_budget, weight);
+ __ Sd(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (SmiValuesAre31Bits()) {
+ __ Add32(lhs, lhs, Operand(rhs));
+ } else {
+ __ Add64(lhs, lhs, Operand(rhs));
+ }
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub64(reg, reg, Operand(case_value_base));
+ }
+
+ // Mostly copied from code-generator-riscv64.cc
+ ScratchRegisterScope scope(this);
+ Register temp = scope.AcquireScratch();
+ Label table;
+ __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
+ reg, Operand(int64_t(num_labels)));
+ int64_t imm64;
+ imm64 = __ branch_long_offset(&table);
+ DCHECK(is_int32(imm64));
+ int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ __ auipc(temp, Hi20); // Read PC + Hi20 into t6
+ __ lui(temp, Lo12); // jump PC + Hi20 + Lo12
+
+ int entry_size_log2 = 2;
+ Register temp2 = scope.AcquireScratch();
+ __ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
+ __ Jump(temp);
+ {
+ TurboAssembler::BlockTrampolinePoolScope(masm());
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ __ bind(&table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ Branch(labels[i]);
+ }
+ DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
+ __ bind(&fallthrough);
+ }
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by add
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Branch(&corrected_args_count, ge, params_size,
+ Operand(actual_params_size));
+ __ masm()->Move(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add64(params_size, params_size, 1); // Include the receiver.
+ __ masm()->slli(params_size, params_size, kPointerSizeLog2);
+ __ masm()->Add64(sp, sp, params_size);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
diff --git a/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
new file mode 100644
index 00000000000..98ca62e3034
--- /dev/null
+++ b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -0,0 +1,112 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
+#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
+
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ // Enter the frame here, since CallBuiltin will override lr.
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ // BaselineOutOfLinePrologue already pushed one undefined.
+ register_count -= 1;
+ if (has_new_target) {
+ if (new_target_index == 0) {
+ // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
+ // pushed.
+ __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
+ } else {
+ DCHECK_LE(new_target_index, register_count);
+ int index = 1;
+ for (; index + 2 <= new_target_index; index += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ if (index == new_target_index) {
+ __ masm()->Push(kJavaScriptCallNewTargetRegister,
+ kInterpreterAccumulatorRegister);
+ } else {
+ DCHECK_EQ(index, new_target_index - 1);
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kJavaScriptCallNewTargetRegister);
+ }
+ // We pushed "index" registers, minus the one the prologue pushed, plus
+ // the two registers that included new_target.
+ register_count -= (index - 1 + 2);
+ }
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ } else {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ masm()->Sub64(scratch, scratch, 1);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ __ masm()->Add64(kScratchReg, sp,
+ RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size(),
+ 2 * kSystemPointerSize));
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
+ Operand(fp));
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_