summaryrefslogtreecommitdiff
path: root/chromium/v8/src/baseline
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-09-03 13:32:17 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-10-01 14:31:55 +0200
commit21ba0c5d4bf8fba15dddd97cd693bad2358b77fd (patch)
tree91be119f694044dfc1ff9fdc054459e925de9df0 /chromium/v8/src/baseline
parent03c549e0392f92c02536d3f86d5e1d8dfa3435ac (diff)
downloadqtwebengine-chromium-21ba0c5d4bf8fba15dddd97cd693bad2358b77fd.tar.gz
BASELINE: Update Chromium to 92.0.4515.166
Change-Id: I42a050486714e9e54fc271f2a8939223a02ae364
Diffstat (limited to 'chromium/v8/src/baseline')
-rw-r--r--chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h12
-rw-r--r--chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h6
-rw-r--r--chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h4
-rw-r--r--chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h6
-rw-r--r--chromium/v8/src/baseline/baseline-assembler-inl.h7
-rw-r--r--chromium/v8/src/baseline/baseline-assembler.h5
-rw-r--r--chromium/v8/src/baseline/baseline-compiler.cc889
-rw-r--r--chromium/v8/src/baseline/baseline-compiler.h34
-rw-r--r--chromium/v8/src/baseline/baseline-osr-inl.h38
-rw-r--r--chromium/v8/src/baseline/baseline.cc38
-rw-r--r--chromium/v8/src/baseline/baseline.h2
-rw-r--r--chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h8
-rw-r--r--chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h6
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h615
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h112
-rw-r--r--chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h11
-rw-r--r--chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h6
17 files changed, 1313 insertions, 486 deletions
diff --git a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index eca2b47cc0e..bfccef90f8f 100644
--- a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -124,7 +124,7 @@ void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Call(temp);
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
@@ -133,7 +133,7 @@ void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Jump(temp);
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
@@ -151,7 +151,7 @@ void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
@@ -198,10 +198,10 @@ void BaselineAssembler::Move(MemOperand output, Register source) {
__ str(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
- __ mov(output, Operand(reference));
+ __ Move32BitImmediate(output, Operand(reference));
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
- __ mov(output, Operand(value));
+ __ Move32BitImmediate(output, Operand(value));
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ mov(output, Operand(value));
@@ -351,7 +351,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
Register value) {
__ str(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h
index ff2b6d1a831..d7f0a606d3b 100644
--- a/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h
+++ b/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h
@@ -19,9 +19,9 @@ void BaselineCompiler::Prologue() {
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
diff --git a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index 27b7c2b2d8d..63e90df4d62 100644
--- a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -167,7 +167,7 @@ void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
@@ -422,7 +422,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
Register value) {
__ StoreTaggedField(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
index e567be41d24..0807c5434ac 100644
--- a/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
+++ b/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
@@ -18,9 +18,9 @@ void BaselineCompiler::Prologue() {
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
__ masm()->AssertSpAligned();
PrologueFillFrame();
diff --git a/chromium/v8/src/baseline/baseline-assembler-inl.h b/chromium/v8/src/baseline/baseline-assembler-inl.h
index 8fd54d63a2f..401062517f6 100644
--- a/chromium/v8/src/baseline/baseline-assembler-inl.h
+++ b/chromium/v8/src/baseline/baseline-assembler-inl.h
@@ -8,12 +8,13 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include <type_traits>
#include <unordered_map>
#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/js-function.h"
@@ -27,6 +28,8 @@
#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -41,10 +44,10 @@ void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
__ GetCode(isolate, desc);
}
int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
-bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
+ if (!FLAG_code_comments) return;
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
diff --git a/chromium/v8/src/baseline/baseline-assembler.h b/chromium/v8/src/baseline/baseline-assembler.h
index 38874d556f0..7c46cd5e2c4 100644
--- a/chromium/v8/src/baseline/baseline-assembler.h
+++ b/chromium/v8/src/baseline/baseline-assembler.h
@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
@@ -32,10 +32,9 @@ class BaselineAssembler {
inline void GetCode(Isolate* isolate, CodeDesc* desc);
inline int pc_offset() const;
- inline bool emit_debug_code() const;
inline void CodeEntry() const;
inline void ExceptionHandler() const;
- inline void RecordComment(const char* string);
+ V8_INLINE void RecordComment(const char* string);
inline void Trap();
inline void DebugBreak();
diff --git a/chromium/v8/src/baseline/baseline-compiler.cc b/chromium/v8/src/baseline/baseline-compiler.cc
index 3d599c11fd5..9c6e3f10e6d 100644
--- a/chromium/v8/src/baseline/baseline-compiler.cc
+++ b/chromium/v8/src/baseline/baseline-compiler.cc
@@ -4,8 +4,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
+#include "src/base/bits.h"
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/baseline/baseline-compiler.h"
@@ -19,7 +20,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/assembler.h"
#include "src/codegen/compiler.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/common/globals.h"
@@ -40,6 +41,8 @@
#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-compiler-arm-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -48,9 +51,9 @@ namespace v8 {
namespace internal {
namespace baseline {
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> BytecodeOffsetTableBuilder::ToBytecodeOffsetTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (bytes_.empty()) return isolate->factory()->empty_byte_array();
Handle<ByteArray> table = isolate->factory()->NewByteArray(
static_cast<int>(bytes_.size()), AllocationType::kOld);
@@ -68,6 +71,7 @@ bool Clobbers(Register target, TaggedIndex index) { return false; }
bool Clobbers(Register target, int32_t imm) { return false; }
bool Clobbers(Register target, RootIndex index) { return false; }
bool Clobbers(Register target, interpreter::Register reg) { return false; }
+bool Clobbers(Register target, interpreter::RegisterList list) { return false; }
// We don't know what's inside machine registers or operands, so assume they
// match.
@@ -97,134 +101,151 @@ bool MachineTypeMatches(MachineType type, interpreter::Register reg) {
return type.IsTagged();
}
-template <typename... Args>
+template <typename Descriptor, typename... Args>
struct CheckArgsHelper;
-template <>
-struct CheckArgsHelper<> {
- static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i) {
- if (descriptor.AllowVarArgs()) {
- CHECK_GE(i, descriptor.GetParameterCount());
+template <typename Descriptor>
+struct CheckArgsHelper<Descriptor> {
+ static void Check(BaselineAssembler* masm, int i) {
+ if (Descriptor::AllowVarArgs()) {
+ CHECK_GE(i, Descriptor::GetParameterCount());
} else {
- CHECK_EQ(i, descriptor.GetParameterCount());
+ CHECK_EQ(i, Descriptor::GetParameterCount());
}
}
};
-template <typename Arg, typename... Args>
-struct CheckArgsHelper<Arg, Args...> {
- static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, Arg arg, Args... args) {
- if (i >= descriptor.GetParameterCount()) {
- CHECK(descriptor.AllowVarArgs());
+template <typename Descriptor, typename Arg, typename... Args>
+struct CheckArgsHelper<Descriptor, Arg, Args...> {
+ static void Check(BaselineAssembler* masm, int i, Arg arg, Args... args) {
+ if (i >= Descriptor::GetParameterCount()) {
+ CHECK(Descriptor::AllowVarArgs());
return;
}
- CHECK(MachineTypeMatches(descriptor.GetParameterType(i), arg));
- CheckArgsHelper<Args...>::Check(masm, descriptor, i + 1, args...);
+ CHECK(MachineTypeMatches(Descriptor().GetParameterType(i), arg));
+ CheckArgsHelper<Descriptor, Args...>::Check(masm, i + 1, args...);
}
};
-template <typename... Args>
-struct CheckArgsHelper<interpreter::RegisterList, Args...> {
- static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, interpreter::RegisterList list, Args... args) {
+template <typename Descriptor, typename... Args>
+struct CheckArgsHelper<Descriptor, interpreter::RegisterList, Args...> {
+ static void Check(BaselineAssembler* masm, int i,
+ interpreter::RegisterList list, Args... args) {
for (int reg_index = 0; reg_index < list.register_count();
++reg_index, ++i) {
- if (i >= descriptor.GetParameterCount()) {
- CHECK(descriptor.AllowVarArgs());
+ if (i >= Descriptor::GetParameterCount()) {
+ CHECK(Descriptor::AllowVarArgs());
return;
}
- CHECK(
- MachineTypeMatches(descriptor.GetParameterType(i), list[reg_index]));
+ CHECK(MachineTypeMatches(Descriptor().GetParameterType(i),
+ list[reg_index]));
}
- CheckArgsHelper<Args...>::Check(masm, descriptor, i, args...);
+ CheckArgsHelper<Descriptor, Args...>::Check(masm, i, args...);
}
};
-template <typename... Args>
-void CheckArgs(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- Args... args) {
- CheckArgsHelper<Args...>::Check(masm, descriptor, 0, args...);
+template <typename Descriptor, typename... Args>
+void CheckArgs(BaselineAssembler* masm, Args... args) {
+ CheckArgsHelper<Descriptor, Args...>::Check(masm, 0, args...);
+}
+
+void CheckSettingDoesntClobber(Register target) {}
+template <typename Arg, typename... Args>
+void CheckSettingDoesntClobber(Register target, Arg arg, Args... args) {
+ DCHECK(!Clobbers(target, arg));
+ CheckSettingDoesntClobber(target, args...);
}
#else // DEBUG
-template <typename... Args>
+template <typename Descriptor, typename... Args>
void CheckArgs(Args... args) {}
+template <typename... Args>
+void CheckSettingDoesntClobber(Register target, Args... args) {}
+
#endif // DEBUG
-template <typename... Args>
+template <typename Descriptor, int ArgIndex, bool kIsRegister, typename... Args>
struct ArgumentSettingHelper;
-template <>
-struct ArgumentSettingHelper<> {
- static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i) {}
- static void CheckSettingDoesntClobber(Register target, int arg_index) {}
+template <typename Descriptor, int ArgIndex, bool kIsRegister>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, kIsRegister> {
+ static void Set(BaselineAssembler* masm) {
+ // Should only ever be called for the end of register arguments.
+ STATIC_ASSERT(ArgIndex == Descriptor::GetRegisterParameterCount());
+ }
};
-template <typename Arg, typename... Args>
-struct ArgumentSettingHelper<Arg, Args...> {
- static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, Arg arg, Args... args) {
- if (i < descriptor.GetRegisterParameterCount()) {
- Register target = descriptor.GetRegisterParameter(i);
- ArgumentSettingHelper<Args...>::CheckSettingDoesntClobber(target, i + 1,
- args...);
- masm->Move(target, arg);
- ArgumentSettingHelper<Args...>::Set(masm, descriptor, i + 1, args...);
- } else if (descriptor.GetStackArgumentOrder() ==
- StackArgumentOrder::kDefault) {
- masm->Push(arg, args...);
- } else {
- masm->PushReverse(arg, args...);
- }
+template <typename Descriptor, int ArgIndex, typename Arg, typename... Args>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, true, Arg, Args...> {
+ static void Set(BaselineAssembler* masm, Arg arg, Args... args) {
+ STATIC_ASSERT(ArgIndex < Descriptor::GetRegisterParameterCount());
+ Register target = Descriptor::GetRegisterParameter(ArgIndex);
+ CheckSettingDoesntClobber(target, args...);
+ masm->Move(target, arg);
+ ArgumentSettingHelper<Descriptor, ArgIndex + 1,
+ (ArgIndex + 1 <
+ Descriptor::GetRegisterParameterCount()),
+ Args...>::Set(masm, args...);
}
- static void CheckSettingDoesntClobber(Register target, int arg_index, Arg arg,
- Args... args) {
- DCHECK(!Clobbers(target, arg));
- ArgumentSettingHelper<Args...>::CheckSettingDoesntClobber(
- target, arg_index + 1, args...);
+};
+
+template <typename Descriptor, int ArgIndex>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, true,
+ interpreter::RegisterList> {
+ static void Set(BaselineAssembler* masm, interpreter::RegisterList list) {
+ STATIC_ASSERT(ArgIndex < Descriptor::GetRegisterParameterCount());
+ DCHECK_EQ(ArgIndex + list.register_count(),
+ Descriptor::GetRegisterParameterCount());
+ for (int i = 0; ArgIndex + i < Descriptor::GetRegisterParameterCount();
+ ++i) {
+ Register target = Descriptor::GetRegisterParameter(ArgIndex + i);
+ masm->Move(target, masm->RegisterFrameOperand(list[i]));
+ }
}
};
-// Specialization for interpreter::RegisterList which iterates it.
-// RegisterLists are only allowed to be the last argument.
-template <>
-struct ArgumentSettingHelper<interpreter::RegisterList> {
- static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, interpreter::RegisterList list) {
- // Either all the values are in machine registers, or they're all on the
- // stack.
- if (i < descriptor.GetRegisterParameterCount()) {
- for (int reg_index = 0; reg_index < list.register_count();
- ++reg_index, ++i) {
- Register target = descriptor.GetRegisterParameter(i);
- masm->Move(target, masm->RegisterFrameOperand(list[reg_index]));
- }
- } else if (descriptor.GetStackArgumentOrder() ==
- StackArgumentOrder::kDefault) {
- masm->Push(list);
+template <typename Descriptor, int ArgIndex, typename Arg, typename... Args>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, false, Arg, Args...> {
+ static void Set(BaselineAssembler* masm, Arg arg, Args... args) {
+ if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) {
+ masm->Push(arg, args...);
} else {
- masm->PushReverse(list);
+ masm->PushReverse(arg, args...);
}
}
- static void CheckSettingDoesntClobber(Register target, int arg_index,
- interpreter::RegisterList arg) {}
};
-template <typename... Args>
-void MoveArgumentsForDescriptor(BaselineAssembler* masm,
- CallInterfaceDescriptor descriptor,
- Args... args) {
- CheckArgs(masm, descriptor, args...);
- ArgumentSettingHelper<Args...>::Set(masm, descriptor, 0, args...);
+template <Builtins::Name kBuiltin, typename... Args>
+void MoveArgumentsForBuiltin(BaselineAssembler* masm, Args... args) {
+ using Descriptor = typename CallInterfaceDescriptorFor<kBuiltin>::type;
+ CheckArgs<Descriptor>(masm, args...);
+ ArgumentSettingHelper<Descriptor, 0,
+ (0 < Descriptor::GetRegisterParameterCount()),
+ Args...>::Set(masm, args...);
+ if (Descriptor::HasContextParameter()) {
+ masm->LoadContext(Descriptor::ContextRegister());
+ }
}
} // namespace detail
+namespace {
+// Rough upper-bound estimate. Copying the data is most likely more expensive
+// than pre-allocating a large enough buffer.
+#ifdef V8_TARGET_ARCH_IA32
+const int kAverageBytecodeToInstructionRatio = 5;
+#else
+const int kAverageBytecodeToInstructionRatio = 7;
+#endif
+std::unique_ptr<AssemblerBuffer> AllocateBuffer(
+ Handle<BytecodeArray> bytecodes) {
+ int estimated_size = bytecodes->length() * kAverageBytecodeToInstructionRatio;
+ return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB));
+}
+} // namespace
+
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode)
@@ -232,20 +253,26 @@ BaselineCompiler::BaselineCompiler(
stats_(isolate->counters()->runtime_call_stats()),
shared_function_info_(shared_function_info),
bytecode_(bytecode),
- masm_(isolate, CodeObjectRequired::kNo),
+ masm_(isolate, CodeObjectRequired::kNo, AllocateBuffer(bytecode)),
basm_(&masm_),
iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME),
labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) {
MemsetPointer(labels_, nullptr, bytecode_->length());
+
+ // Empirically determined expected size of the offset table at the 95th %ile,
+ // based on the size of the bytecode, to be:
+ //
+ // 16 + (bytecode size) / 4
+ bytecode_offset_table_builder_.Reserve(
+ base::bits::RoundUpToPowerOfTwo(16 + bytecode_->Size() / 4));
}
#define __ basm_.
void BaselineCompiler::GenerateCode() {
{
- RuntimeCallTimerScope runtimeTimer(
- stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
+ RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
for (; !iterator_.done(); iterator_.Advance()) {
PreVisitSingleBytecode();
}
@@ -257,8 +284,7 @@ void BaselineCompiler::GenerateCode() {
__ CodeEntry();
{
- RuntimeCallTimerScope runtimeTimer(
- stats_, RuntimeCallCounterId::kCompileBaselineVisit);
+ RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselineVisit);
Prologue();
AddPosition();
for (; !iterator_.done(); iterator_.Advance()) {
@@ -453,7 +479,7 @@ void BaselineCompiler::VisitSingleBytecode() {
}
void BaselineCompiler::VerifyFrame() {
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ RecordComment("[ Verify frame");
__ RecordComment(" -- Verify frame size");
VerifyFrameSize();
@@ -552,28 +578,18 @@ Label* BaselineCompiler::BuildForwardJumpLabel() {
return &threaded_label->label;
}
-template <typename... Args>
-void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) {
+template <Builtins::Name kBuiltin, typename... Args>
+void BaselineCompiler::CallBuiltin(Args... args) {
__ RecordComment("[ CallBuiltin");
- CallInterfaceDescriptor descriptor =
- Builtins::CallInterfaceDescriptorFor(builtin);
- detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...);
- if (descriptor.HasContextParameter()) {
- __ LoadContext(descriptor.ContextRegister());
- }
- __ CallBuiltin(builtin);
+ detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...);
+ __ CallBuiltin(kBuiltin);
__ RecordComment("]");
}
-template <typename... Args>
-void BaselineCompiler::TailCallBuiltin(Builtins::Name builtin, Args... args) {
- CallInterfaceDescriptor descriptor =
- Builtins::CallInterfaceDescriptorFor(builtin);
- detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...);
- if (descriptor.HasContextParameter()) {
- __ LoadContext(descriptor.ContextRegister());
- }
- __ TailCallBuiltin(builtin);
+template <Builtins::Name kBuiltin, typename... Args>
+void BaselineCompiler::TailCallBuiltin(Args... args) {
+ detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...);
+ __ TailCallBuiltin(kBuiltin);
}
template <typename... Args>
@@ -584,27 +600,17 @@ void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) {
}
// Returns into kInterpreterAccumulatorRegister
-void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Register reg,
- Label* label, Label::Distance distance) {
- Label end;
- Label::Distance end_distance = Label::kNear;
-
- Label* true_label = do_jump_if_true ? label : &end;
- Label::Distance true_distance = do_jump_if_true ? distance : end_distance;
- Label* false_label = do_jump_if_true ? &end : label;
- Label::Distance false_distance = do_jump_if_true ? end_distance : distance;
-
- BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
- Register to_boolean = scratch_scope.AcquireScratch();
- {
- SaveAccumulatorScope accumulator_scope(&basm_);
- CallBuiltin(Builtins::kToBoolean, reg);
- __ Move(to_boolean, kInterpreterAccumulatorRegister);
- }
- __ JumpIfRoot(to_boolean, RootIndex::kTrueValue, true_label, true_distance);
- if (false_label != &end) __ Jump(false_label, false_distance);
-
- __ Bind(&end);
+void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Label* label,
+ Label::Distance distance) {
+ CallBuiltin<Builtins::kToBooleanForBaselineJump>(
+ kInterpreterAccumulatorRegister);
+ // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
+ // the original value into kInterpreterAccumulatorRegister, so we don't have
+ // to worry about it getting clobbered.
+ STATIC_ASSERT(kReturnRegister0 == kInterpreterAccumulatorRegister);
+ __ Cmp(kReturnRegister1, Smi::FromInt(0));
+ __ JumpIf(do_jump_if_true ? Condition::kNotEqual : Condition::kEqual, label,
+ distance);
}
void BaselineCompiler::VisitLdaZero() {
@@ -641,22 +647,21 @@ void BaselineCompiler::VisitLdaConstant() {
}
void BaselineCompiler::VisitLdaGlobal() {
- CallBuiltin(Builtins::kLoadGlobalICBaseline,
- Constant<Name>(0), // name
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kLoadGlobalICBaseline>(Constant<Name>(0), // name
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitLdaGlobalInsideTypeof() {
- CallBuiltin(Builtins::kLoadGlobalICInsideTypeofBaseline,
- Constant<Name>(0), // name
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kLoadGlobalICInsideTypeofBaseline>(
+ Constant<Name>(0), // name
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitStaGlobal() {
- CallBuiltin(Builtins::kStoreGlobalICBaseline,
- Constant<Name>(0), // name
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kStoreGlobalICBaseline>(
+ Constant<Name>(0), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitPushContext() {
@@ -730,13 +735,13 @@ void BaselineCompiler::VisitLdaLookupSlot() {
}
void BaselineCompiler::VisitLdaLookupContextSlot() {
- CallBuiltin(Builtins::kLookupContextBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupContextBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitLdaLookupGlobalSlot() {
- CallBuiltin(Builtins::kLookupGlobalICBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupGlobalICBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
@@ -744,13 +749,13 @@ void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
}
void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() {
- CallBuiltin(Builtins::kLookupContextInsideTypeofBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupContextInsideTypeofBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitLdaLookupGlobalSlotInsideTypeof() {
- CallBuiltin(Builtins::kLookupGlobalICInsideTypeofBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupGlobalICInsideTypeofBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitStaLookupSlot() {
@@ -793,14 +798,13 @@ void BaselineCompiler::VisitMov() {
}
void BaselineCompiler::VisitLdaNamedProperty() {
- CallBuiltin(Builtins::kLoadICBaseline,
- RegisterOperand(0), // object
- Constant<Name>(1), // name
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kLoadICBaseline>(RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitLdaNamedPropertyNoFeedback() {
- CallBuiltin(Builtins::kGetProperty, RegisterOperand(0), Constant<Name>(1));
+ CallBuiltin<Builtins::kGetProperty>(RegisterOperand(0), Constant<Name>(1));
}
void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
@@ -808,19 +812,19 @@ void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kLoadSuperICBaseline,
- RegisterOperand(0), // object
- LoadWithReceiverAndVectorDescriptor::
- LookupStartObjectRegister(), // lookup start
- Constant<Name>(1), // name
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kLoadSuperICBaseline>(
+ RegisterOperand(0), // object
+ LoadWithReceiverAndVectorDescriptor::
+ LookupStartObjectRegister(), // lookup start
+ Constant<Name>(1), // name
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitLdaKeyedProperty() {
- CallBuiltin(Builtins::kKeyedLoadICBaseline,
- RegisterOperand(0), // object
- kInterpreterAccumulatorRegister, // key
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kKeyedLoadICBaseline>(
+ RegisterOperand(0), // object
+ kInterpreterAccumulatorRegister, // key
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitLdaModuleVariable() {
@@ -878,11 +882,11 @@ void BaselineCompiler::VisitStaModuleVariable() {
}
void BaselineCompiler::VisitStaNamedProperty() {
- CallBuiltin(Builtins::kStoreICBaseline,
- RegisterOperand(0), // object
- Constant<Name>(1), // name
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kStoreICBaseline>(
+ RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitStaNamedPropertyNoFeedback() {
@@ -900,19 +904,19 @@ void BaselineCompiler::VisitStaNamedOwnProperty() {
}
void BaselineCompiler::VisitStaKeyedProperty() {
- CallBuiltin(Builtins::kKeyedStoreICBaseline,
- RegisterOperand(0), // object
- RegisterOperand(1), // key
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kKeyedStoreICBaseline>(
+ RegisterOperand(0), // object
+ RegisterOperand(1), // key
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitStaInArrayLiteral() {
- CallBuiltin(Builtins::kStoreInArrayLiteralICBaseline,
- RegisterOperand(0), // object
- RegisterOperand(1), // name
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kStoreInArrayLiteralICBaseline>(
+ RegisterOperand(0), // object
+ RegisterOperand(1), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitStaDataPropertyInLiteral() {
@@ -934,140 +938,149 @@ void BaselineCompiler::VisitCollectTypeProfile() {
}
void BaselineCompiler::VisitAdd() {
- CallBuiltin(Builtins::kAdd_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kAdd_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitSub() {
- CallBuiltin(Builtins::kSubtract_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kSubtract_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitMul() {
- CallBuiltin(Builtins::kMultiply_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kMultiply_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitDiv() {
- CallBuiltin(Builtins::kDivide_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kDivide_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitMod() {
- CallBuiltin(Builtins::kModulus_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kModulus_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitExp() {
- CallBuiltin(Builtins::kExponentiate_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kExponentiate_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitBitwiseOr() {
- CallBuiltin(Builtins::kBitwiseOr_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kBitwiseOr_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitBitwiseXor() {
- CallBuiltin(Builtins::kBitwiseXor_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kBitwiseXor_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitBitwiseAnd() {
- CallBuiltin(Builtins::kBitwiseAnd_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kBitwiseAnd_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitShiftLeft() {
- CallBuiltin(Builtins::kShiftLeft_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kShiftLeft_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitShiftRight() {
- CallBuiltin(Builtins::kShiftRight_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kShiftRight_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitShiftRightLogical() {
- CallBuiltin(Builtins::kShiftRightLogical_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
-}
-
-void BaselineCompiler::BuildBinopWithConstant(Builtins::Name builtin_name) {
- CallBuiltin(builtin_name, kInterpreterAccumulatorRegister, IntAsSmi(0),
- Index(1));
+ CallBuiltin<Builtins::kShiftRightLogical_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitAddSmi() {
- BuildBinopWithConstant(Builtins::kAdd_Baseline);
+ CallBuiltin<Builtins::kAdd_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitSubSmi() {
- BuildBinopWithConstant(Builtins::kSubtract_Baseline);
+ CallBuiltin<Builtins::kSubtract_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitMulSmi() {
- BuildBinopWithConstant(Builtins::kMultiply_Baseline);
+ CallBuiltin<Builtins::kMultiply_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitDivSmi() {
- BuildBinopWithConstant(Builtins::kDivide_Baseline);
+ CallBuiltin<Builtins::kDivide_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitModSmi() {
- BuildBinopWithConstant(Builtins::kModulus_Baseline);
+ CallBuiltin<Builtins::kModulus_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitExpSmi() {
- BuildBinopWithConstant(Builtins::kExponentiate_Baseline);
+ CallBuiltin<Builtins::kExponentiate_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseOrSmi() {
- BuildBinopWithConstant(Builtins::kBitwiseOr_Baseline);
+ CallBuiltin<Builtins::kBitwiseOr_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseXorSmi() {
- BuildBinopWithConstant(Builtins::kBitwiseXor_Baseline);
+ CallBuiltin<Builtins::kBitwiseXor_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseAndSmi() {
- BuildBinopWithConstant(Builtins::kBitwiseAnd_Baseline);
+ CallBuiltin<Builtins::kBitwiseAnd_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftLeftSmi() {
- BuildBinopWithConstant(Builtins::kShiftLeft_Baseline);
+ CallBuiltin<Builtins::kShiftLeft_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftRightSmi() {
- BuildBinopWithConstant(Builtins::kShiftRight_Baseline);
+ CallBuiltin<Builtins::kShiftRight_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftRightLogicalSmi() {
- BuildBinopWithConstant(Builtins::kShiftRightLogical_Baseline);
+ CallBuiltin<Builtins::kShiftRightLogical_Baseline>(
+ kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1));
}
-void BaselineCompiler::BuildUnop(Builtins::Name builtin_name) {
- CallBuiltin(builtin_name,
- kInterpreterAccumulatorRegister, // value
- Index(0)); // slot
+void BaselineCompiler::VisitInc() {
+ CallBuiltin<Builtins::kIncrement_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
-void BaselineCompiler::VisitInc() { BuildUnop(Builtins::kIncrement_Baseline); }
-
-void BaselineCompiler::VisitDec() { BuildUnop(Builtins::kDecrement_Baseline); }
+void BaselineCompiler::VisitDec() {
+ CallBuiltin<Builtins::kDecrement_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
+}
-void BaselineCompiler::VisitNegate() { BuildUnop(Builtins::kNegate_Baseline); }
+void BaselineCompiler::VisitNegate() {
+ CallBuiltin<Builtins::kNegate_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
+}
void BaselineCompiler::VisitBitwiseNot() {
- BuildUnop(Builtins::kBitwiseNot_Baseline);
+ CallBuiltin<Builtins::kBitwiseNot_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
void BaselineCompiler::VisitToBooleanLogicalNot() {
SelectBooleanConstant(kInterpreterAccumulatorRegister,
[&](Label* if_true, Label::Distance distance) {
- JumpIfToBoolean(false,
- kInterpreterAccumulatorRegister,
- if_true, distance);
+ JumpIfToBoolean(false, if_true, distance);
});
}
@@ -1081,23 +1094,23 @@ void BaselineCompiler::VisitLogicalNot() {
}
void BaselineCompiler::VisitTypeOf() {
- CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kTypeof>(kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitDeletePropertyStrict() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ Move(scratch, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch,
- Smi::FromEnum(LanguageMode::kStrict));
+ CallBuiltin<Builtins::kDeleteProperty>(RegisterOperand(0), scratch,
+ Smi::FromEnum(LanguageMode::kStrict));
}
void BaselineCompiler::VisitDeletePropertySloppy() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ Move(scratch, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch,
- Smi::FromEnum(LanguageMode::kSloppy));
+ CallBuiltin<Builtins::kDeleteProperty>(RegisterOperand(0), scratch,
+ Smi::FromEnum(LanguageMode::kSloppy));
}
void BaselineCompiler::VisitGetSuperConstructor() {
@@ -1106,87 +1119,115 @@ void BaselineCompiler::VisitGetSuperConstructor() {
__ LoadPrototype(prototype, kInterpreterAccumulatorRegister);
StoreRegister(0, prototype);
}
-template <typename... Args>
-void BaselineCompiler::BuildCall(ConvertReceiverMode mode, uint32_t slot,
- uint32_t arg_count, Args... args) {
- Builtins::Name builtin;
+
+namespace {
+constexpr Builtins::Name ConvertReceiverModeToCompactBuiltin(
+ ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kAny:
- builtin = Builtins::kCall_ReceiverIsAny_Baseline;
+ return Builtins::kCall_ReceiverIsAny_Baseline_Compact;
break;
case ConvertReceiverMode::kNullOrUndefined:
- builtin = Builtins::kCall_ReceiverIsNullOrUndefined_Baseline;
+ return Builtins::kCall_ReceiverIsNullOrUndefined_Baseline_Compact;
break;
case ConvertReceiverMode::kNotNullOrUndefined:
- builtin = Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline;
+ return Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline_Compact;
break;
- default:
- UNREACHABLE();
}
- CallBuiltin(builtin,
- RegisterOperand(0), // kFunction
- arg_count, // kActualArgumentsCount
- slot, // kSlot
- args...); // Arguments
+}
+constexpr Builtins::Name ConvertReceiverModeToBuiltin(
+ ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kAny:
+ return Builtins::kCall_ReceiverIsAny_Baseline;
+ break;
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Builtins::kCall_ReceiverIsNullOrUndefined_Baseline;
+ break;
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline;
+ break;
+ }
+}
+} // namespace
+
+template <ConvertReceiverMode kMode, typename... Args>
+void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
+ Args... args) {
+ uint32_t bitfield;
+ if (CallTrampoline_Baseline_CompactDescriptor::EncodeBitField(arg_count, slot,
+ &bitfield)) {
+ CallBuiltin<ConvertReceiverModeToCompactBuiltin(kMode)>(
+ RegisterOperand(0), // kFunction
+ bitfield, // kActualArgumentsCount | kSlot
+ args...); // Arguments
+ } else {
+ CallBuiltin<ConvertReceiverModeToBuiltin(kMode)>(
+ RegisterOperand(0), // kFunction
+ arg_count, // kActualArgumentsCount
+ slot, // kSlot
+ args...); // Arguments
+ }
}
void BaselineCompiler::VisitCallAnyReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
- BuildCall(ConvertReceiverMode::kAny, Index(3), arg_count, args);
+ BuildCall<ConvertReceiverMode::kAny>(Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), arg_count,
- args);
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(3), arg_count,
+ args);
}
void BaselineCompiler::VisitCallProperty0() {
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(2), 0,
- RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(2), 0,
+ RegisterOperand(1));
}
void BaselineCompiler::VisitCallProperty1() {
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), 1,
- RegisterOperand(1), RegisterOperand(2));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(3), 1, RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallProperty2() {
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(4), 2,
- RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(4), 2, RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
}
void BaselineCompiler::VisitCallUndefinedReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), arg_count,
- RootIndex::kUndefinedValue, args);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(3), arg_count, RootIndex::kUndefinedValue, args);
}
void BaselineCompiler::VisitCallUndefinedReceiver0() {
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(1), 0,
- RootIndex::kUndefinedValue);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(Index(1), 0,
+ RootIndex::kUndefinedValue);
}
void BaselineCompiler::VisitCallUndefinedReceiver1() {
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(2), 1,
- RootIndex::kUndefinedValue, RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(2), 1, RootIndex::kUndefinedValue, RegisterOperand(1));
}
void BaselineCompiler::VisitCallUndefinedReceiver2() {
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), 2,
- RootIndex::kUndefinedValue, RegisterOperand(1), RegisterOperand(2));
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(3), 2, RootIndex::kUndefinedValue, RegisterOperand(1),
+ RegisterOperand(2));
}
void BaselineCompiler::VisitCallNoFeedback() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- CallBuiltin(Builtins::kCall_ReceiverIsAny,
- RegisterOperand(0), // kFunction
- arg_count - 1, // kActualArgumentsCount
- args);
+ CallBuiltin<Builtins::kCall_ReceiverIsAny>(
+ RegisterOperand(0), // kFunction
+ arg_count - 1, // kActualArgumentsCount
+ args);
}
void BaselineCompiler::VisitCallWithSpread() {
@@ -1198,12 +1239,12 @@ void BaselineCompiler::VisitCallWithSpread() {
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
- CallBuiltin(Builtins::kCallWithSpread_Baseline,
- RegisterOperand(0), // kFunction
- arg_count, // kActualArgumentsCount
- spread_register, // kSpread
- Index(3), // kSlot
- args);
+ CallBuiltin<Builtins::kCallWithSpread_Baseline>(
+ RegisterOperand(0), // kFunction
+ arg_count, // kActualArgumentsCount
+ spread_register, // kSpread
+ Index(3), // kSlot
+ args);
}
void BaselineCompiler::VisitCallRuntime() {
@@ -1226,11 +1267,11 @@ void BaselineCompiler::VisitCallJSRuntime() {
__ LoadContext(kContextRegister);
__ LoadNativeContextSlot(kJavaScriptCallTargetRegister,
iterator().GetNativeContextIndexOperand(0));
- CallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined,
- kJavaScriptCallTargetRegister, // kFunction
- arg_count, // kActualArgumentsCount
- RootIndex::kUndefinedValue, // kReceiver
- args);
+ CallBuiltin<Builtins::kCall_ReceiverIsNullOrUndefined>(
+ kJavaScriptCallTargetRegister, // kFunction
+ arg_count, // kActualArgumentsCount
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
}
void BaselineCompiler::VisitInvokeIntrinsic() {
@@ -1301,29 +1342,25 @@ void BaselineCompiler::VisitIntrinsicIsSmi(interpreter::RegisterList args) {
void BaselineCompiler::VisitIntrinsicCopyDataProperties(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCopyDataProperties, args);
+ CallBuiltin<Builtins::kCopyDataProperties>(args);
}
void BaselineCompiler::VisitIntrinsicCreateIterResultObject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCreateIterResultObject, args);
+ CallBuiltin<Builtins::kCreateIterResultObject>(args);
}
void BaselineCompiler::VisitIntrinsicHasProperty(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kHasProperty, args);
-}
-
-void BaselineCompiler::VisitIntrinsicToString(interpreter::RegisterList args) {
- CallBuiltin(Builtins::kToString, args);
+ CallBuiltin<Builtins::kHasProperty>(args);
}
void BaselineCompiler::VisitIntrinsicToLength(interpreter::RegisterList args) {
- CallBuiltin(Builtins::kToLength, args);
+ CallBuiltin<Builtins::kToLength>(args);
}
void BaselineCompiler::VisitIntrinsicToObject(interpreter::RegisterList args) {
- CallBuiltin(Builtins::kToObject, args);
+ CallBuiltin<Builtins::kToObject>(args);
}
void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) {
@@ -1335,20 +1372,20 @@ void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) {
args = args.PopLeft();
uint32_t arg_count = args.register_count();
- CallBuiltin(Builtins::kCall_ReceiverIsAny,
- kJavaScriptCallTargetRegister, // kFunction
- arg_count - 1, // kActualArgumentsCount
- args);
+ CallBuiltin<Builtins::kCall_ReceiverIsAny>(
+ kJavaScriptCallTargetRegister, // kFunction
+ arg_count - 1, // kActualArgumentsCount
+ args);
}
void BaselineCompiler::VisitIntrinsicCreateAsyncFromSyncIterator(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCreateAsyncFromSyncIteratorBaseline, args[0]);
+ CallBuiltin<Builtins::kCreateAsyncFromSyncIteratorBaseline>(args[0]);
}
void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCreateGeneratorObject, args);
+ CallBuiltin<Builtins::kCreateGeneratorObject>(args);
}
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
@@ -1370,69 +1407,69 @@ void BaselineCompiler::VisitIntrinsicGeneratorClose(
void BaselineCompiler::VisitIntrinsicGetImportMetaObject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kGetImportMetaObjectBaseline);
+ CallBuiltin<Builtins::kGetImportMetaObjectBaseline>();
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitCaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionAwaitCaught, args);
+ CallBuiltin<Builtins::kAsyncFunctionAwaitCaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitUncaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionAwaitUncaught, args);
+ CallBuiltin<Builtins::kAsyncFunctionAwaitUncaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionEnter(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionEnter, args);
+ CallBuiltin<Builtins::kAsyncFunctionEnter>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionReject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionReject, args);
+ CallBuiltin<Builtins::kAsyncFunctionReject>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionResolve(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionResolve, args);
+ CallBuiltin<Builtins::kAsyncFunctionResolve>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitCaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorAwaitCaught, args);
+ CallBuiltin<Builtins::kAsyncGeneratorAwaitCaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitUncaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorAwaitUncaught, args);
+ CallBuiltin<Builtins::kAsyncGeneratorAwaitUncaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorReject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorReject, args);
+ CallBuiltin<Builtins::kAsyncGeneratorReject>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorResolve(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorResolve, args);
+ CallBuiltin<Builtins::kAsyncGeneratorResolve>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorYield, args);
+ CallBuiltin<Builtins::kAsyncGeneratorYield>(args);
}
void BaselineCompiler::VisitConstruct() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- CallBuiltin(Builtins::kConstruct_Baseline,
- RegisterOperand(0), // kFunction
- kInterpreterAccumulatorRegister, // kNewTarget
- arg_count, // kActualArgumentsCount
- Index(3), // kSlot
- RootIndex::kUndefinedValue, // kReceiver
- args);
+ CallBuiltin<Builtins::kConstruct_Baseline>(
+ RegisterOperand(0), // kFunction
+ kInterpreterAccumulatorRegister, // kNewTarget
+ arg_count, // kActualArgumentsCount
+ Index(3), // kSlot
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
}
void BaselineCompiler::VisitConstructWithSpread() {
@@ -1444,51 +1481,50 @@ void BaselineCompiler::VisitConstructWithSpread() {
uint32_t arg_count = args.register_count();
+ using Descriptor =
+ CallInterfaceDescriptorFor<Builtins::kConstructWithSpread_Baseline>::type;
Register new_target =
- Builtins::CallInterfaceDescriptorFor(
- Builtins::kConstructWithSpread_Baseline)
- .GetRegisterParameter(
- ConstructWithSpread_BaselineDescriptor::kNewTarget);
+ Descriptor::GetRegisterParameter(Descriptor::kNewTarget);
__ Move(new_target, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kConstructWithSpread_Baseline,
- RegisterOperand(0), // kFunction
- new_target, // kNewTarget
- arg_count, // kActualArgumentsCount
- Index(3), // kSlot
- spread_register, // kSpread
- RootIndex::kUndefinedValue, // kReceiver
- args);
-}
-
-void BaselineCompiler::BuildCompare(Builtins::Name builtin_name) {
- CallBuiltin(builtin_name, RegisterOperand(0), // lhs
- kInterpreterAccumulatorRegister, // rhs
- Index(1)); // slot
+ CallBuiltin<Builtins::kConstructWithSpread_Baseline>(
+ RegisterOperand(0), // kFunction
+ new_target, // kNewTarget
+ arg_count, // kActualArgumentsCount
+ Index(3), // kSlot
+ spread_register, // kSpread
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
}
void BaselineCompiler::VisitTestEqual() {
- BuildCompare(Builtins::kEqual_Baseline);
+ CallBuiltin<Builtins::kEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestEqualStrict() {
- BuildCompare(Builtins::kStrictEqual_Baseline);
+ CallBuiltin<Builtins::kStrictEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestLessThan() {
- BuildCompare(Builtins::kLessThan_Baseline);
+ CallBuiltin<Builtins::kLessThan_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestGreaterThan() {
- BuildCompare(Builtins::kGreaterThan_Baseline);
+ CallBuiltin<Builtins::kGreaterThan_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestLessThanOrEqual() {
- BuildCompare(Builtins::kLessThanOrEqual_Baseline);
+ CallBuiltin<Builtins::kLessThanOrEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestGreaterThanOrEqual() {
- BuildCompare(Builtins::kGreaterThanOrEqual_Baseline);
+ CallBuiltin<Builtins::kGreaterThanOrEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestReferenceEqual() {
@@ -1502,21 +1538,21 @@ void BaselineCompiler::VisitTestReferenceEqual() {
}
void BaselineCompiler::VisitTestInstanceOf() {
- Register callable =
- Builtins::CallInterfaceDescriptorFor(Builtins::kInstanceOf_Baseline)
- .GetRegisterParameter(Compare_BaselineDescriptor::kRight);
+ using Descriptor =
+ CallInterfaceDescriptorFor<Builtins::kInstanceOf_Baseline>::type;
+ Register callable = Descriptor::GetRegisterParameter(Descriptor::kRight);
__ Move(callable, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kInstanceOf_Baseline,
- RegisterOperand(0), // object
- callable, // callable
- Index(1)); // slot
+
+ CallBuiltin<Builtins::kInstanceOf_Baseline>(RegisterOperand(0), // object
+ callable, // callable
+ Index(1)); // slot
}
void BaselineCompiler::VisitTestIn() {
- CallBuiltin(Builtins::kKeyedHasICBaseline,
- kInterpreterAccumulatorRegister, // object
- RegisterOperand(0), // name
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kKeyedHasICBaseline>(
+ kInterpreterAccumulatorRegister, // object
+ RegisterOperand(0), // name
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitTestUndetectable() {
@@ -1727,36 +1763,36 @@ void BaselineCompiler::VisitTestTypeOf() {
void BaselineCompiler::VisitToName() {
SaveAccumulatorScope save_accumulator(&basm_);
- CallBuiltin(Builtins::kToName, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kToName>(kInterpreterAccumulatorRegister);
StoreRegister(0, kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitToNumber() {
- CallBuiltin(Builtins::kToNumber_Baseline, kInterpreterAccumulatorRegister,
- Index(0));
+ CallBuiltin<Builtins::kToNumber_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
void BaselineCompiler::VisitToNumeric() {
- CallBuiltin(Builtins::kToNumeric_Baseline, kInterpreterAccumulatorRegister,
- Index(0));
+ CallBuiltin<Builtins::kToNumeric_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
void BaselineCompiler::VisitToObject() {
SaveAccumulatorScope save_accumulator(&basm_);
- CallBuiltin(Builtins::kToObject, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kToObject>(kInterpreterAccumulatorRegister);
StoreRegister(0, kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitToString() {
- CallBuiltin(Builtins::kToString, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kToString>(kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitCreateRegExpLiteral() {
- CallBuiltin(Builtins::kCreateRegExpLiteral,
- FeedbackVector(), // feedback vector
- IndexAsTagged(1), // slot
- Constant<HeapObject>(0), // pattern
- FlagAsSmi(2)); // flags
+ CallBuiltin<Builtins::kCreateRegExpLiteral>(
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0), // pattern
+ FlagAsSmi(2)); // flags
}
void BaselineCompiler::VisitCreateArrayLiteral() {
@@ -1765,11 +1801,11 @@ void BaselineCompiler::VisitCreateArrayLiteral() {
interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags));
if (flags &
interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::kMask) {
- CallBuiltin(Builtins::kCreateShallowArrayLiteral,
- FeedbackVector(), // feedback vector
- IndexAsTagged(1), // slot
- Constant<HeapObject>(0), // constant elements
- Smi::FromInt(flags_raw)); // flags
+ CallBuiltin<Builtins::kCreateShallowArrayLiteral>(
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0), // constant elements
+ Smi::FromInt(flags_raw)); // flags
} else {
CallRuntime(Runtime::kCreateArrayLiteral,
FeedbackVector(), // feedback vector
@@ -1780,13 +1816,13 @@ void BaselineCompiler::VisitCreateArrayLiteral() {
}
void BaselineCompiler::VisitCreateArrayFromIterable() {
- CallBuiltin(Builtins::kIterableToListWithSymbolLookup,
- kInterpreterAccumulatorRegister); // iterable
+ CallBuiltin<Builtins::kIterableToListWithSymbolLookup>(
+ kInterpreterAccumulatorRegister); // iterable
}
void BaselineCompiler::VisitCreateEmptyArrayLiteral() {
- CallBuiltin(Builtins::kCreateEmptyArrayLiteral, FeedbackVector(),
- IndexAsTagged(0));
+ CallBuiltin<Builtins::kCreateEmptyArrayLiteral>(FeedbackVector(),
+ IndexAsTagged(0));
}
void BaselineCompiler::VisitCreateObjectLiteral() {
@@ -1795,11 +1831,11 @@ void BaselineCompiler::VisitCreateObjectLiteral() {
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags));
if (flags &
interpreter::CreateObjectLiteralFlags::FastCloneSupportedBit::kMask) {
- CallBuiltin(Builtins::kCreateShallowObjectLiteral,
- FeedbackVector(), // feedback vector
- IndexAsTagged(1), // slot
- Constant<ObjectBoilerplateDescription>(0), // boilerplate
- Smi::FromInt(flags_raw)); // flags
+ CallBuiltin<Builtins::kCreateShallowObjectLiteral>(
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<ObjectBoilerplateDescription>(0), // boilerplate
+ Smi::FromInt(flags_raw)); // flags
} else {
CallRuntime(Runtime::kCreateObjectLiteral,
FeedbackVector(), // feedback vector
@@ -1810,39 +1846,39 @@ void BaselineCompiler::VisitCreateObjectLiteral() {
}
void BaselineCompiler::VisitCreateEmptyObjectLiteral() {
- CallBuiltin(Builtins::kCreateEmptyLiteralObject);
+ CallBuiltin<Builtins::kCreateEmptyLiteralObject>();
}
void BaselineCompiler::VisitCloneObject() {
uint32_t flags = Flag(1);
int32_t raw_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags);
- CallBuiltin(Builtins::kCloneObjectICBaseline,
- RegisterOperand(0), // source
- Smi::FromInt(raw_flags), // flags
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kCloneObjectICBaseline>(
+ RegisterOperand(0), // source
+ Smi::FromInt(raw_flags), // flags
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitGetTemplateObject() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
- CallBuiltin(Builtins::kGetTemplateObject,
- shared_function_info_, // shared function info
- Constant<HeapObject>(0), // description
- Index(1), // slot
- FeedbackVector()); // feedback_vector
+ CallBuiltin<Builtins::kGetTemplateObject>(
+ shared_function_info_, // shared function info
+ Constant<HeapObject>(0), // description
+ Index(1), // slot
+ FeedbackVector()); // feedback_vector
}
void BaselineCompiler::VisitCreateClosure() {
Register feedback_cell =
- Builtins::CallInterfaceDescriptorFor(Builtins::kFastNewClosure)
- .GetRegisterParameter(FastNewClosureDescriptor::kFeedbackCell);
+ FastNewClosureBaselineDescriptor::GetRegisterParameter(
+ FastNewClosureBaselineDescriptor::kFeedbackCell);
LoadClosureFeedbackArray(feedback_cell);
__ LoadFixedArrayElement(feedback_cell, feedback_cell, Index(1));
uint32_t flags = Flag(2);
if (interpreter::CreateClosureFlags::FastNewClosureBit::decode(flags)) {
- CallBuiltin(Builtins::kFastNewClosure, Constant<SharedFunctionInfo>(0),
- feedback_cell);
+ CallBuiltin<Builtins::kFastNewClosureBaseline>(
+ Constant<SharedFunctionInfo>(0), feedback_cell);
} else {
Runtime::FunctionId function_id =
interpreter::CreateClosureFlags::PretenuredBit::decode(flags)
@@ -1868,7 +1904,7 @@ void BaselineCompiler::VisitCreateFunctionContext() {
if (slot_count < static_cast<uint32_t>(
ConstructorBuiltins::MaximumFunctionContextSlots())) {
DCHECK_EQ(info->scope_type(), ScopeType::FUNCTION_SCOPE);
- CallBuiltin(Builtins::kFastNewFunctionContextFunction, info, slot_count);
+ CallBuiltin<Builtins::kFastNewFunctionContextFunction>(info, slot_count);
} else {
CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
}
@@ -1880,7 +1916,7 @@ void BaselineCompiler::VisitCreateEvalContext() {
if (slot_count < static_cast<uint32_t>(
ConstructorBuiltins::MaximumFunctionContextSlots())) {
DCHECK_EQ(info->scope_type(), ScopeType::EVAL_SCOPE);
- CallBuiltin(Builtins::kFastNewFunctionContextEval, info, slot_count);
+ CallBuiltin<Builtins::kFastNewFunctionContextEval>(info, slot_count);
} else {
CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
}
@@ -1896,16 +1932,16 @@ void BaselineCompiler::VisitCreateMappedArguments() {
if (shared_function_info_->has_duplicate_parameters()) {
CallRuntime(Runtime::kNewSloppyArguments, __ FunctionOperand());
} else {
- CallBuiltin(Builtins::kFastNewSloppyArguments, __ FunctionOperand());
+ CallBuiltin<Builtins::kFastNewSloppyArguments>(__ FunctionOperand());
}
}
void BaselineCompiler::VisitCreateUnmappedArguments() {
- CallBuiltin(Builtins::kFastNewStrictArguments, __ FunctionOperand());
+ CallBuiltin<Builtins::kFastNewStrictArguments>(__ FunctionOperand());
}
void BaselineCompiler::VisitCreateRestParameter() {
- CallBuiltin(Builtins::kFastNewRestArguments, __ FunctionOperand());
+ CallBuiltin<Builtins::kFastNewRestArguments>(__ FunctionOperand());
}
void BaselineCompiler::VisitJumpLoop() {
@@ -1919,7 +1955,7 @@ void BaselineCompiler::VisitJumpLoop() {
int loop_depth = iterator().GetImmediateOperand(1);
__ CompareByte(osr_level, loop_depth);
__ JumpIf(Condition::kUnsignedLessThanEqual, &osr_not_armed);
- CallBuiltin(Builtins::kBaselineOnStackReplacement);
+ CallBuiltin<Builtins::kBaselineOnStackReplacement>();
__ RecordComment("]");
__ Bind(&osr_not_armed);
@@ -1972,16 +2008,14 @@ void BaselineCompiler::VisitJumpIfToBooleanFalseConstant() {
void BaselineCompiler::VisitJumpIfToBooleanTrue() {
Label dont_jump;
- JumpIfToBoolean(false, kInterpreterAccumulatorRegister, &dont_jump,
- Label::kNear);
+ JumpIfToBoolean(false, &dont_jump, Label::kNear);
UpdateInterruptBudgetAndDoInterpreterJump();
__ Bind(&dont_jump);
}
void BaselineCompiler::VisitJumpIfToBooleanFalse() {
Label dont_jump;
- JumpIfToBoolean(true, kInterpreterAccumulatorRegister, &dont_jump,
- Label::kNear);
+ JumpIfToBoolean(true, &dont_jump, Label::kNear);
UpdateInterruptBudgetAndDoInterpreterJump();
__ Bind(&dont_jump);
}
@@ -2057,13 +2091,13 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
}
void BaselineCompiler::VisitForInEnumerate() {
- CallBuiltin(Builtins::kForInEnumerate, RegisterOperand(0));
+ CallBuiltin<Builtins::kForInEnumerate>(RegisterOperand(0));
}
void BaselineCompiler::VisitForInPrepare() {
StoreRegister(0, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kForInPrepare, kInterpreterAccumulatorRegister,
- IndexAsTagged(1), FeedbackVector());
+ CallBuiltin<Builtins::kForInPrepare>(kInterpreterAccumulatorRegister,
+ IndexAsTagged(1), FeedbackVector());
interpreter::Register first = iterator().GetRegisterOperand(0);
interpreter::Register second(first.index() + 1);
interpreter::Register third(first.index() + 2);
@@ -2085,13 +2119,12 @@ void BaselineCompiler::VisitForInContinue() {
void BaselineCompiler::VisitForInNext() {
interpreter::Register cache_type, cache_array;
std::tie(cache_type, cache_array) = iterator().GetRegisterPairOperand(2);
- CallBuiltin(Builtins::kForInNext,
- Index(3), // vector slot
- RegisterOperand(0), // object
- cache_array, // cache array
- cache_type, // cache type
- RegisterOperand(1), // index
- FeedbackVector()); // feedback vector
+ CallBuiltin<Builtins::kForInNext>(Index(3), // vector slot
+ RegisterOperand(0), // object
+ cache_array, // cache array
+ cache_type, // cache type
+ RegisterOperand(1), // index
+ FeedbackVector()); // feedback vector
}
void BaselineCompiler::VisitForInStep() {
@@ -2131,8 +2164,8 @@ void BaselineCompiler::VisitReturn() {
int parameter_count_without_receiver =
parameter_count - 1; // Exclude the receiver to simplify the
// computation. We'll account for it at the end.
- TailCallBuiltin(Builtins::kBaselineLeaveFrame,
- parameter_count_without_receiver, -profiling_weight);
+ TailCallBuiltin<Builtins::kBaselineLeaveFrame>(
+ parameter_count_without_receiver, -profiling_weight);
__ RecordComment("]");
}
@@ -2235,10 +2268,11 @@ void BaselineCompiler::VisitSuspendGenerator() {
int bytecode_offset =
BytecodeArray::kHeaderSize + iterator().current_offset();
- CallBuiltin(Builtins::kSuspendGeneratorBaseline, generator_object,
- static_cast<int>(Uint(3)), // suspend_id
- bytecode_offset,
- static_cast<int>(RegisterCount(2))); // register_count
+ CallBuiltin<Builtins::kSuspendGeneratorBaseline>(
+ generator_object,
+ static_cast<int>(Uint(3)), // suspend_id
+ bytecode_offset,
+ static_cast<int>(RegisterCount(2))); // register_count
}
VisitReturn();
}
@@ -2248,26 +2282,27 @@ void BaselineCompiler::VisitResumeGenerator() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register generator_object = scratch_scope.AcquireScratch();
LoadRegister(generator_object, 0);
- CallBuiltin(Builtins::kResumeGeneratorBaseline, generator_object,
- static_cast<int>(RegisterCount(2))); // register_count
+ CallBuiltin<Builtins::kResumeGeneratorBaseline>(
+ generator_object,
+ static_cast<int>(RegisterCount(2))); // register_count
}
void BaselineCompiler::VisitGetIterator() {
- CallBuiltin(Builtins::kGetIteratorBaseline,
- RegisterOperand(0), // receiver
- IndexAsTagged(1), // load_slot
- IndexAsTagged(2)); // call_slot
+ CallBuiltin<Builtins::kGetIteratorBaseline>(RegisterOperand(0), // receiver
+ IndexAsTagged(1), // load_slot
+ IndexAsTagged(2)); // call_slot
}
void BaselineCompiler::VisitDebugger() {
SaveAccumulatorScope accumulator_scope(&basm_);
- CallBuiltin(Builtins::kHandleDebuggerStatement);
+ CallRuntime(Runtime::kHandleDebuggerStatement);
}
void BaselineCompiler::VisitIncBlockCounter() {
SaveAccumulatorScope accumulator_scope(&basm_);
- CallBuiltin(Builtins::kIncBlockCounter, __ FunctionOperand(),
- IndexAsSmi(0)); // coverage array slot
+ CallBuiltin<Builtins::kIncBlockCounter>(
+ __ FunctionOperand(),
+ IndexAsSmi(0)); // coverage array slot
}
void BaselineCompiler::VisitAbort() {
diff --git a/chromium/v8/src/baseline/baseline-compiler.h b/chromium/v8/src/baseline/baseline-compiler.h
index dbb2f64f6c5..c86d9417e8a 100644
--- a/chromium/v8/src/baseline/baseline-compiler.h
+++ b/chromium/v8/src/baseline/baseline-compiler.h
@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
@@ -39,8 +39,10 @@ class BytecodeOffsetTableBuilder {
previous_pc_ = pc_offset;
}
- template <typename LocalIsolate>
- Handle<ByteArray> ToBytecodeOffsetTable(LocalIsolate* isolate);
+ template <typename IsolateT>
+ Handle<ByteArray> ToBytecodeOffsetTable(IsolateT* isolate);
+
+ void Reserve(size_t size) { bytes_.reserve(size); }
private:
size_t previous_pc_ = 0;
@@ -121,31 +123,21 @@ class BaselineCompiler {
void SelectBooleanConstant(
Register output, std::function<void(Label*, Label::Distance)> jump_func);
- // Returns ToBoolean result into kInterpreterAccumulatorRegister.
- void JumpIfToBoolean(bool do_jump_if_true, Register reg, Label* label,
+ // Jumps based on calling ToBoolean on kInterpreterAccumulatorRegister.
+ void JumpIfToBoolean(bool do_jump_if_true, Label* label,
Label::Distance distance = Label::kFar);
// Call helpers.
- template <typename... Args>
- void CallBuiltin(Builtins::Name builtin, Args... args);
+ template <Builtins::Name kBuiltin, typename... Args>
+ void CallBuiltin(Args... args);
template <typename... Args>
void CallRuntime(Runtime::FunctionId function, Args... args);
- template <typename... Args>
- void TailCallBuiltin(Builtins::Name builtin, Args... args);
+ template <Builtins::Name kBuiltin, typename... Args>
+ void TailCallBuiltin(Args... args);
- void BuildBinop(
- Builtins::Name builtin_name, bool fast_path = false,
- bool check_overflow = false,
- std::function<void(Register, Register)> instruction = [](Register,
- Register) {});
- void BuildUnop(Builtins::Name builtin_name);
- void BuildCompare(Builtins::Name builtin_name);
- void BuildBinopWithConstant(Builtins::Name builtin_name);
-
- template <typename... Args>
- void BuildCall(ConvertReceiverMode mode, uint32_t slot, uint32_t arg_count,
- Args... args);
+ template <ConvertReceiverMode kMode, typename... Args>
+ void BuildCall(uint32_t slot, uint32_t arg_count, Args... args);
#ifdef V8_TRACE_UNOPTIMIZED
void TraceBytecode(Runtime::FunctionId function_id);
diff --git a/chromium/v8/src/baseline/baseline-osr-inl.h b/chromium/v8/src/baseline/baseline-osr-inl.h
new file mode 100644
index 00000000000..d37007f9cf6
--- /dev/null
+++ b/chromium/v8/src/baseline/baseline-osr-inl.h
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef V8_BASELINE_BASELINE_OSR_INL_H_
+#define V8_BASELINE_BASELINE_OSR_INL_H_
+
+#include "src/execution/frames.h"
+#include "src/execution/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline void OSRInterpreterFrameToBaseline(Isolate* isolate,
+ Handle<JSFunction> function,
+ UnoptimizedFrame* frame) {
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate));
+ if (Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ if (V8_LIKELY(FLAG_use_osr)) {
+ DCHECK_NOT_NULL(frame);
+ if (FLAG_trace_osr) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - Entry at OSR bytecode offset %d into baseline code]\n",
+ frame->GetBytecodeOffset());
+ }
+ frame->GetBytecodeArray().set_osr_loop_nesting_level(
+ AbstractCode::kMaxLoopNestingMarker);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_BASELINE_OSR_INL_H_
diff --git a/chromium/v8/src/baseline/baseline.cc b/chromium/v8/src/baseline/baseline.cc
index b5355660f94..c7cc130c5ed 100644
--- a/chromium/v8/src/baseline/baseline.cc
+++ b/chromium/v8/src/baseline/baseline.cc
@@ -5,14 +5,16 @@
#include "src/baseline/baseline.h"
#include "src/handles/maybe-handles.h"
+#include "src/objects/shared-function-info.h"
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
+#include "src/debug/debug.h"
#include "src/heap/factory-inl.h"
#include "src/logging/counters.h"
#include "src/objects/script-inl.h"
@@ -21,10 +23,36 @@
namespace v8 {
namespace internal {
+bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
+ DisallowGarbageCollection no_gc;
+
+ if (!FLAG_sparkplug) return false;
+
+ // Check that short builtin calls are enabled if needed.
+ if (FLAG_sparkplug_needs_short_builtins &&
+ !isolate->is_short_builtin_calls_enabled()) {
+ return false;
+ }
+
+ // Check if we actually have bytecode.
+ if (!shared.HasBytecodeArray()) return false;
+
+ // Do not optimize when debugger needs to hook into every call.
+ if (isolate->debug()->needs_check_on_function_call()) return false;
+
+ // Functions with breakpoints have to stay interpreted.
+ if (shared.HasBreakInfo()) return false;
+
+ // Do not baseline compile if sparkplug is disabled or function doesn't pass
+ // sparkplug_filter.
+ if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false;
+
+ return true;
+}
+
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kCompileBaseline);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
baseline::BaselineCompiler compiler(
isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate));
@@ -48,6 +76,10 @@ void EmitReturnBaseline(MacroAssembler* masm) {
namespace v8 {
namespace internal {
+bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
+ return false;
+}
+
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
UNREACHABLE();
diff --git a/chromium/v8/src/baseline/baseline.h b/chromium/v8/src/baseline/baseline.h
index 2dba2d9674b..10a6e25e4fb 100644
--- a/chromium/v8/src/baseline/baseline.h
+++ b/chromium/v8/src/baseline/baseline.h
@@ -14,6 +14,8 @@ class Code;
class SharedFunctionInfo;
class MacroAssembler;
+bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared);
+
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
diff --git a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index 2cd34aef710..8babb4a5b7b 100644
--- a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -122,13 +122,13 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ jmp(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
@@ -147,7 +147,7 @@ void BaselineAssembler::CmpObjectType(Register object,
}
void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ movd(xmm0, eax);
__ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, eax);
@@ -320,7 +320,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
Register scratch = scratch_scope.AcquireScratch();
DCHECK(!AreAliased(scratch, target, value));
__ mov(FieldOperand(target, offset), value);
- __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
+ __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
index 733c05fe185..4d09f536653 100644
--- a/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
+++ b/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
@@ -18,9 +18,9 @@ namespace baseline {
void BaselineCompiler::Prologue() {
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
diff --git a/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
new file mode 100644
index 00000000000..e0667d3472b
--- /dev/null
+++ b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -0,0 +1,615 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
+#define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/interface-descriptors.h"
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+constexpr Register kTestReg = t0;
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(t2, t4);
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+enum class Condition : uint32_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.is_reg() && op.rm() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // Nop
+}
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ jmp(target);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), kTestReg, Operand((int64_t)0));
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ if (masm()->options().short_builtin_calls) {
+ __ CallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ Register temp = t6;
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Call(temp);
+ __ RecordComment("]");
+ }
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative jump.
+ __ TailCallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ // t6 be used for function call in RISCV64
+ // For example 'jalr t6' or 'jal t6'
+ Register temp = t6;
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Jump(temp);
+ __ RecordComment("]");
+ }
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ __ And(kTestReg, value, Operand(mask));
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ GetObjectType(object, map, type);
+ __ Sub64(kTestReg, type, Operand(instance_type));
+}
+void BaselineAssembler::CmpInstanceType(Register value,
+ InstanceType instance_type) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ Ld(type, FieldMemOperand(value, Map::kInstanceTypeOffset));
+ __ Sub64(kTestReg, type, Operand(instance_type));
+}
+
+void BaselineAssembler::Cmp(Register value, Smi smi) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ li(temp, Operand(smi));
+ __ SmiUntag(temp);
+ __ Sub64(kTestReg, value, temp);
+}
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ Ld(temp, operand);
+ __ Sub64(kTestReg, value, temp);
+}
+
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, lhs, rhs);
+ } else {
+ __ Sub64(kTestReg, lhs, rhs);
+ }
+}
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ld(tmp, operand);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, value, tmp);
+ } else {
+ __ Sub64(kTestReg, value, tmp);
+ }
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ld(tmp, operand);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, tmp, value);
+ } else {
+ __ Sub64(kTestReg, tmp, value);
+ }
+}
+
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ Sub64(kTestReg, value, Operand(byte));
+}
+
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ li(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ Sd(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ li(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct CountPushHelper;
+template <>
+struct CountPushHelper<> {
+ static int Count() { return 0; }
+};
+template <typename Arg, typename... Args>
+struct CountPushHelper<Arg, Args...> {
+ static int Count(Arg arg, Args... args) {
+ return 1 + CountPushHelper<Args...>::Count(args...);
+ }
+};
+template <typename... Args>
+struct CountPushHelper<interpreter::RegisterList, Args...> {
+ static int Count(interpreter::RegisterList list, Args... args) {
+ return list.register_count() + CountPushHelper<Args...>::Count(args...);
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+template <typename... Args>
+void PushAll(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::Push(basm, args...);
+}
+template <typename... Args>
+void PushAllReverse(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::PushReverse(basm, args...);
+}
+
+template <>
+struct PushAllHelper<> {
+ static void Push(BaselineAssembler* basm) {}
+ static void PushReverse(BaselineAssembler* basm) {}
+};
+
+inline void PushSingle(MacroAssembler* masm, RootIndex source) {
+ masm->PushRoot(source);
+}
+inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
+
+inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
+inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
+ masm->Push(object);
+}
+inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
+ masm->li(kScratchReg, (int64_t)(immediate));
+ PushSingle(masm, kScratchReg);
+}
+
+inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
+ masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
+ PushSingle(masm, kScratchReg);
+}
+inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
+ masm->Ld(kScratchReg, operand);
+ PushSingle(masm, kScratchReg);
+}
+inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
+ return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
+}
+
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static void Push(BaselineAssembler* basm, Arg arg) {
+ PushSingle(basm->masm(), arg);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg) {
+ // Push the padding register to round up the amount of values pushed.
+ return Push(basm, arg);
+ }
+};
+template <typename Arg1, typename Arg2, typename... Args>
+struct PushAllHelper<Arg1, Arg2, Args...> {
+ static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg1),
+ ToRegister(basm, &scope, arg2));
+ }
+ PushAll(basm, args...);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ PushAllReverse(basm, args...);
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg2),
+ ToRegister(basm, &scope, arg1));
+ }
+ }
+};
+// Currently RegisterLists are always be the last argument, so we don't
+// specialize for the case where they're not. We do still specialise for the
+// aligned and unaligned cases.
+template <typename Arg>
+struct PushAllHelper<Arg, interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 1);
+ PushAll(basm, arg, list[0], list.PopLeft());
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ if (list.register_count() == 0) {
+ PushAllReverse(basm, arg);
+ } else {
+ PushAllReverse(basm, arg, list[0], list.PopLeft());
+ }
+ }
+};
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 0);
+ for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
+ PushAll(basm, list[reg_index], list[reg_index + 1]);
+ }
+ }
+ static void PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ int reg_index = list.register_count() - 1;
+ if (reg_index % 2 == 0) {
+ // Push the padding register to round up the amount of values pushed.
+ PushAllReverse(basm, list[reg_index]);
+ reg_index--;
+ }
+ for (; reg_index >= 1; reg_index -= 2) {
+ PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ }
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
+ T... tail) {
+ basm->masm()->Pop(reg1, reg2);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ // We have to count the pushes first, to decide whether to add padding before
+ // the first push.
+ int push_count = detail::CountPushHelper<T...>::Count(vals...);
+ if (push_count % 2 == 0) {
+ detail::PushAll(this, vals...);
+ } else {
+ detail::PushAll(this, vals...);
+ }
+ return push_count;
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ // __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ li(tmp, Operand(value));
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(tmp, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(value, FieldMemOperand(target, offset));
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ RecordWriteField(target, offset, value, tmp, kRAHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(value, FieldMemOperand(target, offset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Add64(interrupt_budget, interrupt_budget, weight);
+ __ Sd(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Add64(interrupt_budget, interrupt_budget, weight);
+ __ Sd(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (SmiValuesAre31Bits()) {
+ __ Add32(lhs, lhs, Operand(rhs));
+ } else {
+ __ Add64(lhs, lhs, Operand(rhs));
+ }
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub64(reg, reg, Operand(case_value_base));
+ }
+
+ // Mostly copied from code-generator-riscv64.cc
+ ScratchRegisterScope scope(this);
+ Register temp = scope.AcquireScratch();
+ Label table;
+ __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
+ reg, Operand(int64_t(num_labels)));
+ int64_t imm64;
+ imm64 = __ branch_long_offset(&table);
+ DCHECK(is_int32(imm64));
+ int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ __ auipc(temp, Hi20); // Read PC + Hi20 into t6
+ __ lui(temp, Lo12); // jump PC + Hi20 + Lo12
+
+ int entry_size_log2 = 2;
+ Register temp2 = scope.AcquireScratch();
+ __ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
+ __ Jump(temp);
+ {
+ TurboAssembler::BlockTrampolinePoolScope(masm());
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ __ bind(&table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ Branch(labels[i]);
+ }
+ DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
+ __ bind(&fallthrough);
+ }
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by add
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Branch(&corrected_args_count, ge, params_size,
+ Operand(actual_params_size));
+ __ masm()->Move(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add64(params_size, params_size, 1); // Include the receiver.
+ __ masm()->slli(params_size, params_size, kPointerSizeLog2);
+ __ masm()->Add64(sp, sp, params_size);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
diff --git a/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
new file mode 100644
index 00000000000..98ca62e3034
--- /dev/null
+++ b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -0,0 +1,112 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
+#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
+
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ // Enter the frame here, since CallBuiltin will override lr.
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ // BaselineOutOfLinePrologue already pushed one undefined.
+ register_count -= 1;
+ if (has_new_target) {
+ if (new_target_index == 0) {
+ // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
+ // pushed.
+ __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
+ } else {
+ DCHECK_LE(new_target_index, register_count);
+ int index = 1;
+ for (; index + 2 <= new_target_index; index += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ if (index == new_target_index) {
+ __ masm()->Push(kJavaScriptCallNewTargetRegister,
+ kInterpreterAccumulatorRegister);
+ } else {
+ DCHECK_EQ(index, new_target_index - 1);
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kJavaScriptCallNewTargetRegister);
+ }
+ // We pushed "index" registers, minus the one the prologue pushed, plus
+ // the two registers that included new_target.
+ register_count -= (index - 1 + 2);
+ }
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ } else {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ masm()->Sub64(scratch, scratch, 1);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ __ masm()->Add64(kScratchReg, sp,
+ RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size(),
+ 2 * kSystemPointerSize));
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
+ Operand(fp));
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
diff --git a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index 202f83c7615..98ed29a9cae 100644
--- a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -7,7 +7,6 @@
#include "src/base/macros.h"
#include "src/baseline/baseline-assembler.h"
-#include "src/codegen/interface-descriptors.h"
#include "src/codegen/x64/register-x64.h"
namespace v8 {
@@ -129,7 +128,7 @@ void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
} else {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
}
@@ -140,7 +139,7 @@ void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
} else {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
}
@@ -160,7 +159,7 @@ void BaselineAssembler::CmpObjectType(Register object,
}
void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
@@ -201,7 +200,7 @@ void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Move(output, value);
}
void BaselineAssembler::Move(Register output, int32_t value) {
- __ Move(output, Immediate(value));
+ __ Move(output, value);
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ mov_tagged(output, source);
@@ -326,7 +325,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
DCHECK_NE(target, scratch);
DCHECK_NE(value, scratch);
__ StoreTaggedField(FieldOperand(target, offset), value);
- __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
+ __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h
index 73b43770e56..a4d547b0671 100644
--- a/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h
+++ b/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h
@@ -18,9 +18,9 @@ namespace baseline {
void BaselineCompiler::Prologue() {
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}