diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2021-09-01 11:08:40 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2021-10-01 12:16:21 +0000 |
commit | 03c549e0392f92c02536d3f86d5e1d8dfa3435ac (patch) | |
tree | fe49d170a929b34ba82cd10db1a0bd8e3760fa4b /chromium/v8/src/baseline | |
parent | 5d013f5804a0d91fcf6c626b2d6fb6eca5c845b0 (diff) | |
download | qtwebengine-chromium-03c549e0392f92c02536d3f86d5e1d8dfa3435ac.tar.gz |
BASELINE: Update Chromium to 91.0.4472.160
Change-Id: I0def1f08a2412aeed79a9ab95dd50eb5c3f65f31
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/baseline')
17 files changed, 1684 insertions, 240 deletions
diff --git a/chromium/v8/src/baseline/OWNERS b/chromium/v8/src/baseline/OWNERS index f9e17a90b1a..6b48a30d8df 100644 --- a/chromium/v8/src/baseline/OWNERS +++ b/chromium/v8/src/baseline/OWNERS @@ -1,4 +1,5 @@ cbruni@chromium.org +ishell@chromium.org leszeks@chromium.org marja@chromium.org pthier@chromium.org diff --git a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h new file mode 100644 index 00000000000..eca2b47cc0e --- /dev/null +++ b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h @@ -0,0 +1,483 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_ +#define V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/arm/assembler-arm-inl.h" +#include "src/codegen/interface-descriptors.h" + +namespace v8 { +namespace internal { +namespace baseline { + +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + wrapped_scope_(assembler->masm()) { + if (!assembler_->scratch_register_scope_) { + // If we haven't opened a scratch scope yet, for the first one add a + // couple of extra registers. + DCHECK(wrapped_scope_.CanAcquire()); + wrapped_scope_.Include(r8, r9); + wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister); + } + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { return wrapped_scope_.Acquire(); } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + UseScratchRegisterScope wrapped_scope_; +}; + +// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler. +enum class Condition : uint32_t { + kEqual = static_cast<uint32_t>(eq), + kNotEqual = static_cast<uint32_t>(ne), + + kLessThan = static_cast<uint32_t>(lt), + kGreaterThan = static_cast<uint32_t>(gt), + kLessThanEqual = static_cast<uint32_t>(le), + kGreaterThanEqual = static_cast<uint32_t>(ge), + + kUnsignedLessThan = static_cast<uint32_t>(lo), + kUnsignedGreaterThan = static_cast<uint32_t>(hi), + kUnsignedLessThanEqual = static_cast<uint32_t>(ls), + kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs), + + kOverflow = static_cast<uint32_t>(vs), + kNoOverflow = static_cast<uint32_t>(vc), + + kZero = static_cast<uint32_t>(eq), + kNotZero = static_cast<uint32_t>(ne), +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + // This is important for arm, where the internal::Condition where each value + // represents an encoded bit field value. + STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition)); + return static_cast<internal::Condition>(cond); +} + +namespace detail { + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.rn() == target || op.rm() == target; +} +#endif + +} // namespace detail + +#define __ masm_-> + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // NOP on arm. +} + +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + __ b(target); +} +void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) { + __ b(AsMasmCondition(cc), target); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfRoot(value, index, target); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfNotRoot(value, index, target); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfSmi(value, target); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfNotSmi(value, target); +} + +void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { + // __ CallBuiltin(static_cast<int>(builtin)); + __ RecordCommentForOffHeapTrampoline(builtin); + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ LoadEntryFromBuiltinIndex(builtin, temp); + __ Call(temp); + if (FLAG_code_comments) __ RecordComment("]"); +} + +void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { + __ RecordCommentForOffHeapTrampoline(builtin); + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ LoadEntryFromBuiltinIndex(builtin, temp); + __ Jump(temp); + if (FLAG_code_comments) __ RecordComment("]"); +} + +void BaselineAssembler::Test(Register value, int mask) { + __ tst(value, Operand(mask)); +} + +void BaselineAssembler::CmpObjectType(Register object, + InstanceType instance_type, + Register map) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + __ CompareObjectType(object, map, type, instance_type); +} +void BaselineAssembler::CmpInstanceType(Register map, + InstanceType instance_type) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + if (emit_debug_code()) { + __ AssertNotSmi(map); + __ CompareObjectType(map, type, type, MAP_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + __ CompareInstanceType(map, type, instance_type); +} +void BaselineAssembler::Cmp(Register value, Smi smi) { + __ cmp(value, Operand(smi)); +} +void BaselineAssembler::ComparePointer(Register value, MemOperand operand) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ ldr(tmp, operand); + __ cmp(value, tmp); +} +void BaselineAssembler::SmiCompare(Register lhs, Register rhs) { + __ AssertSmi(lhs); + __ AssertSmi(rhs); + __ cmp(lhs, rhs); +} +void BaselineAssembler::CompareTagged(Register value, MemOperand operand) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ ldr(tmp, operand); + __ cmp(value, tmp); +} +void BaselineAssembler::CompareTagged(MemOperand operand, Register value) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ ldr(tmp, operand); + __ cmp(tmp, value); +} +void BaselineAssembler::CompareByte(Register value, int32_t byte) { + __ cmp(value, Operand(byte)); +} + +void BaselineAssembler::Move(interpreter::Register output, Register source) { + Move(RegisterFrameOperand(output), source); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + __ mov(output, Operand(value.ptr())); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + __ str(source, output); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + __ mov(output, Operand(reference)); +} +void BaselineAssembler::Move(Register output, Handle<HeapObject> value) { + __ mov(output, Operand(value)); +} +void BaselineAssembler::Move(Register output, int32_t value) { + __ mov(output, Operand(value)); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + __ mov(output, source); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + __ mov(output, source); +} + +namespace detail { + +template <typename Arg> +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Arg arg) { + Register reg = scope->AcquireScratch(); + basm->Move(reg, arg); + return reg; +} +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Register reg) { + return reg; +} + +template <typename... Args> +struct PushAllHelper; +template <> +struct PushAllHelper<> { + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template <typename Arg> +struct PushAllHelper<Arg> { + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; + } + static int PushReverse(BaselineAssembler* basm, Arg arg) { + return Push(basm, arg); + } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template <typename Arg, typename... Args> +struct PushAllHelper<Arg, Args...> { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper<Arg>::Push(basm, arg); + return 1 + PushAllHelper<Args...>::Push(basm, args...); + } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper<Args...>::PushReverse(basm, args...); + PushAllHelper<Arg>::Push(basm, arg); + return nargs + 1; + } +}; +template <> +struct PushAllHelper<interpreter::RegisterList> { + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]); + } + return list.register_count(); + } + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]); + } + return list.register_count(); + } +}; + +template <typename... T> +struct PopAllHelper; +template <> +struct PopAllHelper<> { + static void Pop(BaselineAssembler* basm) {} +}; +// TODO(ishell): try to pack sequence of pops into one instruction by +// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4) +// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4). +template <> +struct PopAllHelper<Register> { + static void Pop(BaselineAssembler* basm, Register reg) { + basm->masm()->Pop(reg); + } +}; +template <typename... T> +struct PopAllHelper<Register, T...> { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper<Register>::Pop(basm, reg); + PopAllHelper<T...>::Pop(basm, tail...); + } +}; + +} // namespace detail + +template <typename... T> +int BaselineAssembler::Push(T... vals) { + return detail::PushAllHelper<T...>::Push(this, vals...); +} + +template <typename... T> +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllHelper<T...>::PushReverse(this, vals...); +} + +template <typename... T> +void BaselineAssembler::Pop(T... registers) { + detail::PopAllHelper<T...>::Pop(this, registers...); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + __ ldr(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + __ ldr(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + __ ldr(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadByteField(Register output, Register source, + int offset) { + __ ldrb(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ mov(tmp, Operand(value)); + __ str(tmp, FieldMemOperand(target, offset)); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + Register value) { + __ str(value, FieldMemOperand(target, offset)); + __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved, + kDontSaveFPRegs); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + __ str(value, FieldMemOperand(target, offset)); +} + +void BaselineAssembler::AddToInterruptBudget(int32_t weight) { + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ ldr(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ add(interrupt_budget, interrupt_budget, Operand(weight), SetCC); + __ str(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); +} + +void BaselineAssembler::AddToInterruptBudget(Register weight) { + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ ldr(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ add(interrupt_budget, interrupt_budget, weight, SetCC); + __ str(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { + __ add(lhs, lhs, Operand(rhs)); +} + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + Label fallthrough; + if (case_value_base > 0) { + __ sub(reg, reg, Operand(case_value_base)); + } + + // Mostly copied from code-generator-arm.cc + ScratchRegisterScope scope(this); + __ cmp(reg, Operand(num_labels)); + JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough); + // Ensure to emit the constant pool first if necessary. + __ CheckConstPool(true, true); + __ BlockConstPoolFor(num_labels); + int entry_size_log2 = 2; + __ add(pc, pc, Operand(reg, LSL, entry_size_log2), LeaveCC, lo); + __ b(&fallthrough); + for (int i = 0; i < num_labels; ++i) { + __ b(labels[i]); + } + __ bind(&fallthrough); +} + +#undef __ + +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { + BaselineAssembler basm(masm); + + Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); + Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); + + __ RecordComment("[ Update Interrupt Budget"); + __ AddToInterruptBudget(weight); + + // Use compare flags set by add + Label skip_interrupt_label; + __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label); + { + __ masm()->SmiTag(params_size); + __ Push(params_size, kInterpreterAccumulatorRegister); + + __ LoadContext(kContextRegister); + __ LoadFunction(kJSFunctionRegister); + __ Push(kJSFunctionRegister); + __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + + __ Pop(kInterpreterAccumulatorRegister, params_size); + __ masm()->SmiUntag(params_size); + } + __ RecordComment("]"); + + __ Bind(&skip_interrupt_label); + + BaselineAssembler::ScratchRegisterScope temps(&basm); + Register actual_params_size = temps.AcquireScratch(); + // Compute the size of the actual parameters + receiver (in bytes). + __ Move(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + Label corrected_args_count; + __ masm()->cmp(params_size, actual_params_size); + __ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count); + __ masm()->mov(params_size, actual_params_size); + __ Bind(&corrected_args_count); + + // Leave the frame (also dropping the register file). + __ masm()->LeaveFrame(StackFrame::BASELINE); + + // Drop receiver + arguments. + __ masm()->add(params_size, params_size, + Operand(1)); // Include the receiver. + __ masm()->Drop(params_size); + __ masm()->Ret(); +} + +#undef __ + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_ diff --git a/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h new file mode 100644 index 00000000000..ff2b6d1a831 --- /dev/null +++ b/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h @@ -0,0 +1,94 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_ +#define V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_ + +#include "src/base/logging.h" +#include "src/baseline/baseline-compiler.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { + // Enter the frame here, since CallBuiltin will override lr. + __ masm()->EnterFrame(StackFrame::BASELINE); + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = bytecode_->frame_size() + max_call_args_; + CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, + kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + + PrologueFillFrame(); +} + +void BaselineCompiler::PrologueFillFrame() { + __ RecordComment("[ Fill frame"); + // Inlined register frame fill + interpreter::Register new_target_or_generator_register = + bytecode_->incoming_new_target_or_generator_register(); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + int register_count = bytecode_->register_count(); + // Magic value + const int kLoopUnrollSize = 8; + const int new_target_index = new_target_or_generator_register.index(); + const bool has_new_target = new_target_index != kMaxInt; + if (has_new_target) { + DCHECK_LE(new_target_index, register_count); + for (int i = 0; i < new_target_index; i++) { + __ Push(kInterpreterAccumulatorRegister); + } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; + } + if (register_count < 2 * kLoopUnrollSize) { + // If the frame is small enough, just unroll the frame fill completely. + for (int i = 0; i < register_count; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + + } else { + // Extract the first few registers to round to the unroll size. + int first_registers = register_count % kLoopUnrollSize; + for (int i = 0; i < first_registers; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + BaselineAssembler::ScratchRegisterScope temps(&basm_); + Register scratch = temps.AcquireScratch(); + + __ Move(scratch, register_count / kLoopUnrollSize); + // We enter the loop unconditionally, so make sure we need to loop at least + // once. + DCHECK_GT(register_count / kLoopUnrollSize, 0); + Label loop; + __ Bind(&loop); + for (int i = 0; i < kLoopUnrollSize; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + __ masm()->sub(scratch, scratch, Operand(1), SetCC); + __ JumpIf(Condition::kGreaterThan, &loop); + } + __ RecordComment("]"); +} + +void BaselineCompiler::VerifyFrameSize() { + BaselineAssembler::ScratchRegisterScope temps(&basm_); + Register scratch = temps.AcquireScratch(); + + __ masm()->add(scratch, sp, + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); + __ masm()->cmp(scratch, fp); + __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer); +} + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_ diff --git a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h index 021df8d9cf1..27b7c2b2d8d 100644 --- a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -23,6 +23,7 @@ class BaselineAssembler::ScratchRegisterScope { // If we haven't opened a scratch scope yet, for the first one add a // couple of extra registers. wrapped_scope_.Include(x14, x15); + wrapped_scope_.Include(x19); } assembler_->scratch_register_scope_ = this; } @@ -37,7 +38,7 @@ class BaselineAssembler::ScratchRegisterScope { }; // TODO(v8:11461): Unify condition names in the MacroAssembler. -enum class Condition : uint8_t { +enum class Condition : uint32_t { kEqual = eq, kNotEqual = ne, @@ -87,6 +88,10 @@ void BaselineAssembler::Bind(Label* label) { __ BindJumpTarget(label); } +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ Bind(label); } + +void BaselineAssembler::JumpTarget() { __ JumpTarget(); } + void BaselineAssembler::Jump(Label* target, Label::Distance distance) { __ B(target); } @@ -111,23 +116,40 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, } void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { - ScratchRegisterScope temps(this); - Register temp = temps.AcquireScratch(); - __ LoadEntryFromBuiltinIndex(builtin, temp); - __ Call(temp); + if (masm()->options().short_builtin_calls) { + // Generate pc-relative call. + __ CallBuiltin(builtin); + } else { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ LoadEntryFromBuiltinIndex(builtin, temp); + __ Call(temp); + } } void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { - // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.] - // `bti j`) landing pads for the tail-called code. - Register temp = x17; - - // Make sure we're don't use this register as a temporary. - UseScratchRegisterScope temps(masm()); - temps.Exclude(temp); - - __ LoadEntryFromBuiltinIndex(builtin, temp); - __ Jump(temp); + if (masm()->options().short_builtin_calls) { + // Generate pc-relative call. + __ TailCallBuiltin(builtin); + } else { + // The control flow integrity (CFI) feature allows us to "sign" code entry + // points as a target for calls, jumps or both. Arm64 has special + // instructions for this purpose, so-called "landing pads" (see + // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and + // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call" + // landing pads for CPP builtins. In order to allow tail calling to those + // builtins we have to use a workaround. + // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e. + // `bti j`) landing pads for the tail-called code. + Register temp = x17; + + // Make sure we're don't use this register as a temporary. + UseScratchRegisterScope temps(masm()); + temps.Exclude(temp); + + __ LoadEntryFromBuiltinIndex(builtin, temp); + __ Jump(temp); + } } void BaselineAssembler::Test(Register value, int mask) { diff --git a/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h index 2ce652d1a0c..e567be41d24 100644 --- a/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h +++ b/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h @@ -14,14 +14,13 @@ namespace baseline { #define __ basm_. void BaselineCompiler::Prologue() { - __ masm()->Mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_)); - DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); // Enter the frame here, since CallBuiltin will override lr. __ masm()->EnterFrame(StackFrame::BASELINE); + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = bytecode_->frame_size() + max_call_args_; CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, - kInterpreterBytecodeArrayRegister, - kJavaScriptCallNewTargetRegister); + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); __ masm()->AssertSpAligned(); PrologueFillFrame(); diff --git a/chromium/v8/src/baseline/baseline-assembler-inl.h b/chromium/v8/src/baseline/baseline-assembler-inl.h index d949425a19c..8fd54d63a2f 100644 --- a/chromium/v8/src/baseline/baseline-assembler-inl.h +++ b/chromium/v8/src/baseline/baseline-assembler-inl.h @@ -7,7 +7,8 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_ARM #include <type_traits> #include <unordered_map> @@ -22,6 +23,10 @@ #include "src/baseline/x64/baseline-assembler-x64-inl.h" #elif V8_TARGET_ARCH_ARM64 #include "src/baseline/arm64/baseline-assembler-arm64-inl.h" +#elif V8_TARGET_ARCH_IA32 +#include "src/baseline/ia32/baseline-assembler-ia32-inl.h" +#elif V8_TARGET_ARCH_ARM +#include "src/baseline/arm/baseline-assembler-arm-inl.h" #else #error Unsupported target architecture. #endif @@ -62,7 +67,7 @@ void BaselineAssembler::LoadRoot(Register output, RootIndex index) { __ LoadRoot(output, index); } void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) { - __ LoadNativeContextSlot(index, output); + __ LoadNativeContextSlot(output, index); } void BaselineAssembler::Move(Register output, interpreter::Register source) { diff --git a/chromium/v8/src/baseline/baseline-assembler.h b/chromium/v8/src/baseline/baseline-assembler.h index de6bd239116..38874d556f0 100644 --- a/chromium/v8/src/baseline/baseline-assembler.h +++ b/chromium/v8/src/baseline/baseline-assembler.h @@ -1,4 +1,3 @@ - // Copyright 2021 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -8,15 +7,17 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_ARM #include "src/codegen/macro-assembler.h" +#include "src/objects/tagged-index.h" namespace v8 { namespace internal { namespace baseline { -enum class Condition : uint8_t; +enum class Condition : uint32_t; class BaselineAssembler { public: @@ -39,6 +40,13 @@ class BaselineAssembler { inline void DebugBreak(); inline void Bind(Label* label); + // Binds the label without marking it as a valid jump target. + // This is only useful, when the position is already marked as a valid jump + // target (i.e. at the beginning of the bytecode). + inline void BindWithoutJumpTarget(Label* label); + // Marks the current position as a valid jump target on CFI enabled + // architectures. + inline void JumpTarget(); inline void JumpIf(Condition cc, Label* target, Label::Distance distance = Label::kFar); inline void Jump(Label* target, Label::Distance distance = Label::kFar); diff --git a/chromium/v8/src/baseline/baseline-compiler.cc b/chromium/v8/src/baseline/baseline-compiler.cc index 60be8c8386b..3d599c11fd5 100644 --- a/chromium/v8/src/baseline/baseline-compiler.cc +++ b/chromium/v8/src/baseline/baseline-compiler.cc @@ -4,14 +4,16 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_ARM #include "src/baseline/baseline-compiler.h" +#include <algorithm> #include <type_traits> -#include <unordered_map> #include "src/baseline/baseline-assembler-inl.h" +#include "src/baseline/baseline-assembler.h" #include "src/builtins/builtins-constructor.h" #include "src/builtins/builtins-descriptors.h" #include "src/builtins/builtins.h" @@ -22,7 +24,6 @@ #include "src/codegen/macro-assembler-inl.h" #include "src/common/globals.h" #include "src/execution/frame-constants.h" -#include "src/interpreter/bytecode-array-accessor.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-flags.h" #include "src/objects/code.h" @@ -35,6 +36,10 @@ #include "src/baseline/x64/baseline-compiler-x64-inl.h" #elif V8_TARGET_ARCH_ARM64 #include "src/baseline/arm64/baseline-compiler-arm64-inl.h" +#elif V8_TARGET_ARCH_IA32 +#include "src/baseline/ia32/baseline-compiler-ia32-inl.h" +#elif V8_TARGET_ARCH_ARM +#include "src/baseline/arm/baseline-compiler-arm-inl.h" #else #error Unsupported target architecture. #endif @@ -220,7 +225,6 @@ void MoveArgumentsForDescriptor(BaselineAssembler* masm, } // namespace detail - BaselineCompiler::BaselineCompiler( Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info, Handle<BytecodeArray> bytecode) @@ -232,24 +236,13 @@ BaselineCompiler::BaselineCompiler( basm_(&masm_), iterator_(bytecode_), zone_(isolate->allocator(), ZONE_NAME), - labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())), - handler_offsets_(&zone_) { + labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) { MemsetPointer(labels_, nullptr, bytecode_->length()); } #define __ basm_. void BaselineCompiler::GenerateCode() { - HandlerTable table(*bytecode_); - { - RuntimeCallTimerScope runtimeTimer( - stats_, RuntimeCallCounterId::kCompileBaselinePrepareHandlerOffsets); - for (int i = 0; i < table.NumberOfRangeEntries(); ++i) { - int handler_offset = table.GetRangeHandler(i); - handler_offsets_.insert(handler_offset); - } - } - { RuntimeCallTimerScope runtimeTimer( stats_, RuntimeCallCounterId::kCompileBaselinePreVisit); @@ -267,13 +260,15 @@ void BaselineCompiler::GenerateCode() { RuntimeCallTimerScope runtimeTimer( stats_, RuntimeCallCounterId::kCompileBaselineVisit); Prologue(); + AddPosition(); for (; !iterator_.done(); iterator_.Advance()) { VisitSingleBytecode(); + AddPosition(); } } } -Handle<Code> BaselineCompiler::Build(Isolate* isolate) { +MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) { CodeDesc desc; __ GetCode(isolate, &desc); // Allocate the bytecode offset table. @@ -281,11 +276,11 @@ Handle<Code> BaselineCompiler::Build(Isolate* isolate) { bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate); return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE) .set_bytecode_offset_table(bytecode_offset_table) - .Build(); + .TryBuild(); } interpreter::Register BaselineCompiler::RegisterOperand(int operand_index) { - return accessor().GetRegisterOperand(operand_index); + return iterator().GetRegisterOperand(operand_index); } void BaselineCompiler::LoadRegister(Register output, int operand_index) { @@ -299,36 +294,36 @@ void BaselineCompiler::StoreRegister(int operand_index, Register value) { void BaselineCompiler::StoreRegisterPair(int operand_index, Register val0, Register val1) { interpreter::Register reg0, reg1; - std::tie(reg0, reg1) = accessor().GetRegisterPairOperand(operand_index); + std::tie(reg0, reg1) = iterator().GetRegisterPairOperand(operand_index); __ StoreRegister(reg0, val0); __ StoreRegister(reg1, val1); } template <typename Type> Handle<Type> BaselineCompiler::Constant(int operand_index) { return Handle<Type>::cast( - accessor().GetConstantForIndexOperand(operand_index, isolate_)); + iterator().GetConstantForIndexOperand(operand_index, isolate_)); } Smi BaselineCompiler::ConstantSmi(int operand_index) { - return accessor().GetConstantAtIndexAsSmi(operand_index); + return iterator().GetConstantAtIndexAsSmi(operand_index); } template <typename Type> void BaselineCompiler::LoadConstant(Register output, int operand_index) { __ Move(output, Constant<Type>(operand_index)); } uint32_t BaselineCompiler::Uint(int operand_index) { - return accessor().GetUnsignedImmediateOperand(operand_index); + return iterator().GetUnsignedImmediateOperand(operand_index); } int32_t BaselineCompiler::Int(int operand_index) { - return accessor().GetImmediateOperand(operand_index); + return iterator().GetImmediateOperand(operand_index); } uint32_t BaselineCompiler::Index(int operand_index) { - return accessor().GetIndexOperand(operand_index); + return iterator().GetIndexOperand(operand_index); } uint32_t BaselineCompiler::Flag(int operand_index) { - return accessor().GetFlagOperand(operand_index); + return iterator().GetFlagOperand(operand_index); } uint32_t BaselineCompiler::RegisterCount(int operand_index) { - return accessor().GetRegisterCountOperand(operand_index); + return iterator().GetRegisterCountOperand(operand_index); } TaggedIndex BaselineCompiler::IndexAsTagged(int operand_index) { return TaggedIndex::FromIntptr(Index(operand_index)); @@ -374,41 +369,65 @@ void BaselineCompiler::SelectBooleanConstant( } void BaselineCompiler::AddPosition() { - bytecode_offset_table_builder_.AddPosition(__ pc_offset(), - accessor().current_offset()); + bytecode_offset_table_builder_.AddPosition(__ pc_offset()); } void BaselineCompiler::PreVisitSingleBytecode() { - if (accessor().current_bytecode() == interpreter::Bytecode::kJumpLoop) { - EnsureLabels(accessor().GetJumpTargetOffset()); + switch (iterator().current_bytecode()) { + case interpreter::Bytecode::kJumpLoop: + EnsureLabels(iterator().GetJumpTargetOffset()); + break; + + // TODO(leszeks): Update the max_call_args as part of the main bytecode + // visit loop, by patching the value passed to the prologue. + case interpreter::Bytecode::kCallProperty: + case interpreter::Bytecode::kCallAnyReceiver: + case interpreter::Bytecode::kCallWithSpread: + case interpreter::Bytecode::kCallNoFeedback: + case interpreter::Bytecode::kConstruct: + case interpreter::Bytecode::kConstructWithSpread: + return UpdateMaxCallArgs( + iterator().GetRegisterListOperand(1).register_count()); + case interpreter::Bytecode::kCallUndefinedReceiver: + return UpdateMaxCallArgs( + iterator().GetRegisterListOperand(1).register_count() + 1); + case interpreter::Bytecode::kCallProperty0: + case interpreter::Bytecode::kCallUndefinedReceiver0: + return UpdateMaxCallArgs(1); + case interpreter::Bytecode::kCallProperty1: + case interpreter::Bytecode::kCallUndefinedReceiver1: + return UpdateMaxCallArgs(2); + case interpreter::Bytecode::kCallProperty2: + case interpreter::Bytecode::kCallUndefinedReceiver2: + return UpdateMaxCallArgs(3); + + default: + break; } } void BaselineCompiler::VisitSingleBytecode() { - int offset = accessor().current_offset(); + int offset = iterator().current_offset(); if (labels_[offset]) { // Bind labels for this offset that have already been linked to a // jump (i.e. forward jumps, excluding jump tables). for (auto&& label : labels_[offset]->linked) { - __ Bind(&label->label); + __ BindWithoutJumpTarget(&label->label); } #ifdef DEBUG labels_[offset]->linked.Clear(); #endif - __ Bind(&labels_[offset]->unlinked); + __ BindWithoutJumpTarget(&labels_[offset]->unlinked); } - // Record positions of exception handlers. - if (handler_offsets_.find(accessor().current_offset()) != - handler_offsets_.end()) { - AddPosition(); - __ ExceptionHandler(); - } + // Mark position as valid jump target. This is required for the deoptimizer + // and exception handling, when CFI is enabled. + __ JumpTarget(); if (FLAG_code_comments) { std::ostringstream str; str << "[ "; - accessor().PrintTo(str); + iterator().PrintTo(str); __ RecordComment(str.str().c_str()); } @@ -418,7 +437,7 @@ void BaselineCompiler::VisitSingleBytecode() { TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry); #endif - switch (accessor().current_bytecode()) { + switch (iterator().current_bytecode()) { #define BYTECODE_CASE(name, ...) \ case interpreter::Bytecode::k##name: \ Visit##name(); \ @@ -469,7 +488,7 @@ void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) { SaveAccumulatorScope accumulator_scope(&basm_); CallRuntime(function_id, bytecode_, Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + - accessor().current_offset()), + iterator().current_offset()), kInterpreterAccumulatorRegister); __ RecordComment("]"); } @@ -486,22 +505,25 @@ INTRINSICS_LIST(DECLARE_VISITOR) void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel( int weight, Label* label, Label* skip_interrupt_label) { - __ RecordComment("[ Update Interrupt Budget"); - __ AddToInterruptBudget(weight); - - if (weight < 0) { - // Use compare flags set by AddToInterruptBudget - __ JumpIf(Condition::kGreaterThanEqual, skip_interrupt_label); - SaveAccumulatorScope accumulator_scope(&basm_); - CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, - __ FunctionOperand()); + if (weight != 0) { + __ RecordComment("[ Update Interrupt Budget"); + __ AddToInterruptBudget(weight); + + if (weight < 0) { + // Use compare flags set by AddToInterruptBudget + __ JumpIf(Condition::kGreaterThanEqual, skip_interrupt_label); + SaveAccumulatorScope accumulator_scope(&basm_); + CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, + __ FunctionOperand()); + } } if (label) __ Jump(label); - __ RecordComment("]"); + if (weight != 0) __ RecordComment("]"); } void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJump() { - int weight = accessor().GetRelativeJumpTargetOffset(); + int weight = iterator().GetRelativeJumpTargetOffset() - + iterator().current_bytecode_size_without_prefix(); UpdateInterruptBudgetAndJumpToLabel(weight, BuildForwardJumpLabel(), nullptr); } @@ -524,7 +546,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot( } Label* BaselineCompiler::BuildForwardJumpLabel() { - int target_offset = accessor().GetJumpTargetOffset(); + int target_offset = iterator().GetJumpTargetOffset(); ThreadedLabel* threaded_label = zone_.New<ThreadedLabel>(); EnsureLabels(target_offset)->linked.Add(threaded_label); return &threaded_label->label; @@ -540,7 +562,6 @@ void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) { __ LoadContext(descriptor.ContextRegister()); } __ CallBuiltin(builtin); - AddPosition(); __ RecordComment("]"); } @@ -560,7 +581,6 @@ void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) { __ LoadContext(kContextRegister); int nargs = __ Push(args...); __ CallRuntime(function, nargs); - AddPosition(); } // Returns into kInterpreterAccumulatorRegister @@ -592,7 +612,7 @@ void BaselineCompiler::VisitLdaZero() { } void BaselineCompiler::VisitLdaSmi() { - Smi constant = Smi::FromInt(accessor().GetImmediateOperand(0)); + Smi constant = Smi::FromInt(iterator().GetImmediateOperand(0)); __ Move(kInterpreterAccumulatorRegister, constant); } @@ -691,7 +711,7 @@ void BaselineCompiler::VisitStaContextSlot() { Register value = scratch_scope.AcquireScratch(); __ Move(value, kInterpreterAccumulatorRegister); __ StoreTaggedFieldWithWriteBarrier( - context, Context::OffsetOfElementAt(accessor().GetIndexOperand(1)), + context, Context::OffsetOfElementAt(iterator().GetIndexOperand(1)), value); } @@ -906,6 +926,7 @@ void BaselineCompiler::VisitStaDataPropertyInLiteral() { } void BaselineCompiler::VisitCollectTypeProfile() { + SaveAccumulatorScope accumulator_scope(&basm_); CallRuntime(Runtime::kCollectTypeProfile, IntAsSmi(0), // position kInterpreterAccumulatorRegister, // value @@ -1110,13 +1131,13 @@ void BaselineCompiler::BuildCall(ConvertReceiverMode mode, uint32_t slot, } void BaselineCompiler::VisitCallAnyReceiver() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count() - 1; // Remove receiver. BuildCall(ConvertReceiverMode::kAny, Index(3), arg_count, args); } void BaselineCompiler::VisitCallProperty() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count() - 1; // Remove receiver. BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), arg_count, args); @@ -1138,7 +1159,7 @@ void BaselineCompiler::VisitCallProperty2() { } void BaselineCompiler::VisitCallUndefinedReceiver() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), arg_count, RootIndex::kUndefinedValue, args); @@ -1160,7 +1181,7 @@ void BaselineCompiler::VisitCallUndefinedReceiver2() { } void BaselineCompiler::VisitCallNoFeedback() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); CallBuiltin(Builtins::kCall_ReceiverIsAny, RegisterOperand(0), // kFunction @@ -1169,7 +1190,7 @@ void BaselineCompiler::VisitCallNoFeedback() { } void BaselineCompiler::VisitCallWithSpread() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); // Do not push the spread argument interpreter::Register spread_register = args.last_register(); @@ -1186,24 +1207,25 @@ void BaselineCompiler::VisitCallWithSpread() { } void BaselineCompiler::VisitCallRuntime() { - CallRuntime(accessor().GetRuntimeIdOperand(0), - accessor().GetRegisterListOperand(1)); + CallRuntime(iterator().GetRuntimeIdOperand(0), + iterator().GetRegisterListOperand(1)); } void BaselineCompiler::VisitCallRuntimeForPair() { - CallRuntime(accessor().GetRuntimeIdOperand(0), - accessor().GetRegisterListOperand(1)); + SaveAccumulatorScope accumulator_scope(&basm_); + CallRuntime(iterator().GetRuntimeIdOperand(0), + iterator().GetRegisterListOperand(1)); StoreRegisterPair(3, kReturnRegister0, kReturnRegister1); } void BaselineCompiler::VisitCallJSRuntime() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); // Load context for LoadNativeContextSlot. __ LoadContext(kContextRegister); __ LoadNativeContextSlot(kJavaScriptCallTargetRegister, - accessor().GetNativeContextIndexOperand(0)); + iterator().GetNativeContextIndexOperand(0)); CallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, kJavaScriptCallTargetRegister, // kFunction arg_count, // kActualArgumentsCount @@ -1212,8 +1234,8 @@ void BaselineCompiler::VisitCallJSRuntime() { } void BaselineCompiler::VisitInvokeIntrinsic() { - Runtime::FunctionId intrinsic_id = accessor().GetIntrinsicIdOperand(0); - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + Runtime::FunctionId intrinsic_id = iterator().GetIntrinsicIdOperand(0); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); switch (intrinsic_id) { #define CASE(Name, ...) \ case Runtime::kInline##Name: \ @@ -1402,7 +1424,7 @@ void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield( } void BaselineCompiler::VisitConstruct() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); CallBuiltin(Builtins::kConstruct_Baseline, RegisterOperand(0), // kFunction @@ -1414,7 +1436,7 @@ void BaselineCompiler::VisitConstruct() { } void BaselineCompiler::VisitConstructWithSpread() { - interpreter::RegisterList args = accessor().GetRegisterListOperand(1); + interpreter::RegisterList args = iterator().GetRegisterListOperand(1); // Do not push the spread argument interpreter::Register spread_register = args.last_register(); @@ -1494,23 +1516,24 @@ void BaselineCompiler::VisitTestIn() { CallBuiltin(Builtins::kKeyedHasICBaseline, kInterpreterAccumulatorRegister, // object RegisterOperand(0), // name - IndexAsSmi(1)); // slot + IndexAsTagged(1)); // slot } void BaselineCompiler::VisitTestUndetectable() { - Label done, set_false; - __ JumpIfSmi(kInterpreterAccumulatorRegister, &set_false, Label::kNear); + Label done, is_smi, not_undetectable; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); Register map_bit_field = kInterpreterAccumulatorRegister; __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister); __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask); - __ JumpIf(Condition::kZero, &set_false, Label::kNear); + __ JumpIf(Condition::kZero, ¬_undetectable, Label::kNear); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); __ Jump(&done, Label::kNear); - __ Bind(&set_false); + __ Bind(&is_smi); + __ Bind(¬_undetectable); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); __ Bind(&done); } @@ -1534,36 +1557,172 @@ void BaselineCompiler::VisitTestUndefined() { } void BaselineCompiler::VisitTestTypeOf() { - uint32_t literal_flag = Flag(0); - CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister); - -#define TYPEOF_FLAG_VALUE(type_name) \ - static_cast< \ - std::underlying_type<interpreter::TestTypeOfFlags::LiteralFlag>::type>( \ - interpreter::TestTypeOfFlags::LiteralFlag::k##type_name) -#define TYPEOF_COMPARE(type_name) \ - SelectBooleanConstant(kInterpreterAccumulatorRegister, \ - [&](Label* is_true, Label::Distance distance) { \ - __ JumpIfRoot(kInterpreterAccumulatorRegister, \ - RootIndex::k##type_name##_string, \ - is_true, distance); \ - }); + BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); -#define TYPEOF_CASE(type_upper, type_lower) \ - case TYPEOF_FLAG_VALUE(type_upper): \ - TYPEOF_COMPARE(type_lower); \ - break; + auto literal_flag = + static_cast<interpreter::TestTypeOfFlags::LiteralFlag>(Flag(0)); + Label done; switch (literal_flag) { - default: - __ Trap(); + case interpreter::TestTypeOfFlags::LiteralFlag::kNumber: { + Label is_smi, is_heap_number; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); + __ CmpObjectType(kInterpreterAccumulatorRegister, HEAP_NUMBER_TYPE, + scratch_scope.AcquireScratch()); + __ JumpIf(Condition::kEqual, &is_heap_number, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(&is_heap_number); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); break; - TYPEOF_LITERAL_LIST(TYPEOF_CASE) - } + } + case interpreter::TestTypeOfFlags::LiteralFlag::kString: { + Label is_smi, bad_instance_type; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); + STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE); + __ CmpObjectType(kInterpreterAccumulatorRegister, FIRST_NONSTRING_TYPE, + scratch_scope.AcquireScratch()); + __ JumpIf(Condition::kGreaterThanEqual, &bad_instance_type, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(&bad_instance_type); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol: { + Label is_smi, bad_instance_type; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); + __ CmpObjectType(kInterpreterAccumulatorRegister, SYMBOL_TYPE, + scratch_scope.AcquireScratch()); + __ JumpIf(Condition::kNotEqual, &bad_instance_type, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(&bad_instance_type); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kBoolean: { + Label is_true, is_false; + __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue, + &is_true, Label::kNear); + __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue, + &is_false, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_true); + __ Bind(&is_false); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt: { + Label is_smi, bad_instance_type; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); + __ CmpObjectType(kInterpreterAccumulatorRegister, BIGINT_TYPE, + scratch_scope.AcquireScratch()); + __ JumpIf(Condition::kNotEqual, &bad_instance_type, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(&bad_instance_type); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kUndefined: { + Label is_smi, is_null, not_undetectable; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); -#undef TYPEOF_COMPARE -#undef TYPEOF_FLAG_VALUE -#undef TYPEOF_CASE + // null is undetectable, so test it explicitly, and return false. + __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue, + &is_null, Label::kNear); + + // All other undetectable maps are typeof undefined. + Register map_bit_field = kInterpreterAccumulatorRegister; + __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister); + __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); + __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask); + __ JumpIf(Condition::kZero, ¬_undetectable, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(&is_null); + __ Bind(¬_undetectable); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kFunction: { + Label is_smi, not_callable, undetectable; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); + + // Check if the map is callable but not undetectable. + Register map_bit_field = kInterpreterAccumulatorRegister; + __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister); + __ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); + __ Test(map_bit_field, Map::Bits1::IsCallableBit::kMask); + __ JumpIf(Condition::kZero, ¬_callable, Label::kNear); + __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask); + __ JumpIf(Condition::kNotZero, &undetectable, Label::kNear); + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(¬_callable); + __ Bind(&undetectable); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kObject: { + Label is_smi, is_null, bad_instance_type, undetectable_or_callable; + __ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear); + + // If the object is null, return true. + __ JumpIfRoot(kInterpreterAccumulatorRegister, RootIndex::kNullValue, + &is_null, Label::kNear); + + // If the object's instance type isn't within the range, return false. + STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); + Register map = scratch_scope.AcquireScratch(); + __ CmpObjectType(kInterpreterAccumulatorRegister, FIRST_JS_RECEIVER_TYPE, + map); + __ JumpIf(Condition::kLessThan, &bad_instance_type, Label::kNear); + + // If the map is undetectable or callable, return false. + Register map_bit_field = kInterpreterAccumulatorRegister; + __ LoadByteField(map_bit_field, map, Map::kBitFieldOffset); + __ Test(map_bit_field, Map::Bits1::IsUndetectableBit::kMask | + Map::Bits1::IsCallableBit::kMask); + __ JumpIf(Condition::kNotZero, &undetectable_or_callable, Label::kNear); + + __ Bind(&is_null); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue); + __ Jump(&done, Label::kNear); + + __ Bind(&is_smi); + __ Bind(&bad_instance_type); + __ Bind(&undetectable_or_callable); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue); + break; + } + case interpreter::TestTypeOfFlags::LiteralFlag::kOther: + default: + UNREACHABLE(); + } + __ Bind(&done); } void BaselineCompiler::VisitToName() { @@ -1602,15 +1761,16 @@ void BaselineCompiler::VisitCreateRegExpLiteral() { void BaselineCompiler::VisitCreateArrayLiteral() { uint32_t flags = Flag(2); + int32_t flags_raw = static_cast<int32_t>( + interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags)); if (flags & interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::kMask) { CallBuiltin(Builtins::kCreateShallowArrayLiteral, FeedbackVector(), // feedback vector IndexAsTagged(1), // slot - Constant<HeapObject>(0)); // constant elements + Constant<HeapObject>(0), // constant elements + Smi::FromInt(flags_raw)); // flags } else { - int32_t flags_raw = static_cast<int32_t>( - interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags)); CallRuntime(Runtime::kCreateArrayLiteral, FeedbackVector(), // feedback vector IndexAsTagged(1), // slot @@ -1756,15 +1916,16 @@ void BaselineCompiler::VisitJumpLoop() { Register osr_level = scratch; __ LoadRegister(osr_level, interpreter::Register::bytecode_array()); __ LoadByteField(osr_level, osr_level, BytecodeArray::kOsrNestingLevelOffset); - int loop_depth = accessor().GetImmediateOperand(1); + int loop_depth = iterator().GetImmediateOperand(1); __ CompareByte(osr_level, loop_depth); __ JumpIf(Condition::kUnsignedLessThanEqual, &osr_not_armed); CallBuiltin(Builtins::kBaselineOnStackReplacement); __ RecordComment("]"); __ Bind(&osr_not_armed); - Label* label = &labels_[accessor().GetJumpTargetOffset()]->unlinked; - int weight = accessor().GetRelativeJumpTargetOffset(); + Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked; + int weight = iterator().GetRelativeJumpTargetOffset() - + iterator().current_bytecode_size_without_prefix(); // We can pass in the same label twice since it's a back edge and thus already // bound. DCHECK(label->is_bound()); @@ -1879,7 +2040,7 @@ void BaselineCompiler::VisitJumpIfJSReceiver() { void BaselineCompiler::VisitSwitchOnSmiNoFeedback() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); interpreter::JumpTableTargetOffsets offsets = - accessor().GetJumpTableTargetOffsets(); + iterator().GetJumpTableTargetOffsets(); if (offsets.size() == 0) return; @@ -1903,7 +2064,7 @@ void BaselineCompiler::VisitForInPrepare() { StoreRegister(0, kInterpreterAccumulatorRegister); CallBuiltin(Builtins::kForInPrepare, kInterpreterAccumulatorRegister, IndexAsTagged(1), FeedbackVector()); - interpreter::Register first = accessor().GetRegisterOperand(0); + interpreter::Register first = iterator().GetRegisterOperand(0); interpreter::Register second(first.index() + 1); interpreter::Register third(first.index() + 2); __ StoreRegister(second, kReturnRegister0); @@ -1923,7 +2084,7 @@ void BaselineCompiler::VisitForInContinue() { void BaselineCompiler::VisitForInNext() { interpreter::Register cache_type, cache_array; - std::tie(cache_type, cache_array) = accessor().GetRegisterPairOperand(2); + std::tie(cache_type, cache_array) = iterator().GetRegisterPairOperand(2); CallBuiltin(Builtins::kForInNext, Index(3), // vector slot RegisterOperand(0), // object @@ -1961,7 +2122,8 @@ void BaselineCompiler::VisitReThrow() { void BaselineCompiler::VisitReturn() { __ RecordComment("[ Return"); - int profiling_weight = accessor().current_offset(); + int profiling_weight = iterator().current_offset() + + iterator().current_bytecode_size_without_prefix(); int parameter_count = bytecode_->parameter_count(); // We must pop all arguments from the stack (including the receiver). This @@ -2043,7 +2205,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() { __ StoreContext(context); interpreter::JumpTableTargetOffsets offsets = - accessor().GetJumpTableTargetOffsets(); + iterator().GetJumpTableTargetOffsets(); if (0 < offsets.size()) { DCHECK_EQ(0, (*offsets.begin()).case_value); @@ -2064,73 +2226,30 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() { } void BaselineCompiler::VisitSuspendGenerator() { - DCHECK_EQ(accessor().GetRegisterOperand(1), interpreter::Register(0)); - int register_count = RegisterCount(2); - uint32_t suspend_id = Uint(3); - + DCHECK_EQ(iterator().GetRegisterOperand(1), interpreter::Register(0)); BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register generator_object = scratch_scope.AcquireScratch(); - Register parameters_and_registers_array = scratch_scope.AcquireScratch(); - Register value = scratch_scope.AcquireScratch(); - LoadRegister(generator_object, 0); - __ LoadTaggedPointerField(parameters_and_registers_array, generator_object, - JSGeneratorObject::kParametersAndRegistersOffset); - - int formal_parameter_count = - shared_function_info_->internal_formal_parameter_count(); - for (int i = 0; i < formal_parameter_count; ++i) { - __ LoadRegister(value, interpreter::Register::FromParameterIndex( - i + 1, bytecode_->parameter_count())); - __ StoreTaggedFieldWithWriteBarrier(parameters_and_registers_array, - FixedArray::OffsetOfElementAt(i), - value); - } - for (int i = 0; i < register_count; ++i) { - __ LoadRegister(value, interpreter::Register(i)); - __ StoreTaggedFieldWithWriteBarrier( - parameters_and_registers_array, - FixedArray::OffsetOfElementAt(formal_parameter_count + i), value); - } - - __ LoadContext(value); - __ StoreTaggedFieldWithWriteBarrier(generator_object, - JSGeneratorObject::kContextOffset, value); - - __ StoreTaggedSignedField(generator_object, - JSGeneratorObject::kContinuationOffset, - Smi::FromInt(suspend_id)); + { + SaveAccumulatorScope accumulator_scope(&basm_); - __ StoreTaggedSignedField( - generator_object, JSGeneratorObject::kInputOrDebugPosOffset, - Smi::FromInt(BytecodeArray::kHeaderSize + accessor().current_offset())); + int bytecode_offset = + BytecodeArray::kHeaderSize + iterator().current_offset(); + CallBuiltin(Builtins::kSuspendGeneratorBaseline, generator_object, + static_cast<int>(Uint(3)), // suspend_id + bytecode_offset, + static_cast<int>(RegisterCount(2))); // register_count + } VisitReturn(); } void BaselineCompiler::VisitResumeGenerator() { - DCHECK_EQ(accessor().GetRegisterOperand(1), interpreter::Register(0)); - int register_count = RegisterCount(2); - + DCHECK_EQ(iterator().GetRegisterOperand(1), interpreter::Register(0)); BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register generator_object = scratch_scope.AcquireScratch(); - Register parameters_and_registers_array = scratch_scope.AcquireScratch(); - Register value = scratch_scope.AcquireScratch(); - LoadRegister(generator_object, 0); - __ LoadTaggedPointerField(parameters_and_registers_array, generator_object, - JSGeneratorObject::kParametersAndRegistersOffset); - - int formal_parameter_count = - shared_function_info_->internal_formal_parameter_count(); - for (int i = 0; i < register_count; ++i) { - __ LoadTaggedAnyField( - value, parameters_and_registers_array, - FixedArray::OffsetOfElementAt(formal_parameter_count + i)); - __ StoreRegister(interpreter::Register(i), value); - } - - __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, generator_object, - JSGeneratorObject::kInputOrDebugPosOffset); + CallBuiltin(Builtins::kResumeGeneratorBaseline, generator_object, + static_cast<int>(RegisterCount(2))); // register_count } void BaselineCompiler::VisitGetIterator() { @@ -2141,10 +2260,12 @@ void BaselineCompiler::VisitGetIterator() { } void BaselineCompiler::VisitDebugger() { + SaveAccumulatorScope accumulator_scope(&basm_); CallBuiltin(Builtins::kHandleDebuggerStatement); } void BaselineCompiler::VisitIncBlockCounter() { + SaveAccumulatorScope accumulator_scope(&basm_); CallBuiltin(Builtins::kIncBlockCounter, __ FunctionOperand(), IndexAsSmi(0)); // coverage array slot } diff --git a/chromium/v8/src/baseline/baseline-compiler.h b/chromium/v8/src/baseline/baseline-compiler.h index 2ddd8fdb16c..dbb2f64f6c5 100644 --- a/chromium/v8/src/baseline/baseline-compiler.h +++ b/chromium/v8/src/baseline/baseline-compiler.h @@ -7,12 +7,12 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 - -#include <unordered_map> +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_ARM #include "src/base/logging.h" #include "src/base/threaded-list.h" +#include "src/base/vlq.h" #include "src/baseline/baseline-assembler.h" #include "src/handles/handles.h" #include "src/interpreter/bytecode-array-iterator.h" @@ -21,7 +21,6 @@ #include "src/logging/counters.h" #include "src/objects/map.h" #include "src/objects/tagged-index.h" -#include "src/zone/zone-containers.h" namespace v8 { namespace internal { @@ -32,30 +31,19 @@ namespace baseline { class BytecodeOffsetTableBuilder { public: - void AddPosition(size_t pc_offset, size_t bytecode_offset) { - WriteUint(pc_offset - previous_pc_); - WriteUint(bytecode_offset - previous_bytecode_); + void AddPosition(size_t pc_offset) { + size_t pc_diff = pc_offset - previous_pc_; + DCHECK_GE(pc_diff, 0); + DCHECK_LE(pc_diff, std::numeric_limits<uint32_t>::max()); + base::VLQEncodeUnsigned(&bytes_, static_cast<uint32_t>(pc_diff)); previous_pc_ = pc_offset; - previous_bytecode_ = bytecode_offset; } template <typename LocalIsolate> Handle<ByteArray> ToBytecodeOffsetTable(LocalIsolate* isolate); private: - void WriteUint(size_t value) { - bool has_next; - do { - uint8_t byte = value & ((1 << 7) - 1); - value >>= 7; - has_next = value != 0; - byte |= (has_next << 7); - bytes_.push_back(byte); - } while (has_next); - } - size_t previous_pc_ = 0; - size_t previous_bytecode_ = 0; std::vector<byte> bytes_; }; @@ -66,7 +54,7 @@ class BaselineCompiler { Handle<BytecodeArray> bytecode); void GenerateCode(); - Handle<Code> Build(Isolate* isolate); + MaybeHandle<Code> Build(Isolate* isolate); private: void Prologue(); @@ -123,6 +111,10 @@ class BaselineCompiler { // Misc. helpers. + void UpdateMaxCallArgs(int max_call_args) { + max_call_args_ = std::max(max_call_args_, max_call_args); + } + // Select the root boolean constant based on the jump in the given // `jump_func` -- the function should jump to the given label if we want to // select "true", otherwise it should fall through. @@ -170,7 +162,7 @@ class BaselineCompiler { INTRINSICS_LIST(DECLARE_VISITOR) #undef DECLARE_VISITOR - const interpreter::BytecodeArrayAccessor& accessor() { return iterator_; } + const interpreter::BytecodeArrayIterator& iterator() { return iterator_; } Isolate* isolate_; RuntimeCallStats* stats_; @@ -182,6 +174,8 @@ class BaselineCompiler { BytecodeOffsetTableBuilder bytecode_offset_table_builder_; Zone zone_; + int max_call_args_ = 0; + struct ThreadedLabel { Label label; ThreadedLabel* ptr; @@ -201,7 +195,6 @@ class BaselineCompiler { } BaselineLabels** labels_; - ZoneSet<int> handler_offsets_; }; } // namespace baseline diff --git a/chromium/v8/src/baseline/baseline.cc b/chromium/v8/src/baseline/baseline.cc index 3229c134f43..b5355660f94 100644 --- a/chromium/v8/src/baseline/baseline.cc +++ b/chromium/v8/src/baseline/baseline.cc @@ -4,9 +4,12 @@ #include "src/baseline/baseline.h" +#include "src/handles/maybe-handles.h" + // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_ARM #include "src/baseline/baseline-assembler-inl.h" #include "src/baseline/baseline-compiler.h" @@ -18,17 +21,17 @@ namespace v8 { namespace internal { -Handle<Code> GenerateBaselineCode(Isolate* isolate, - Handle<SharedFunctionInfo> shared) { +MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate, + Handle<SharedFunctionInfo> shared) { RuntimeCallTimerScope runtimeTimer(isolate, RuntimeCallCounterId::kCompileBaseline); baseline::BaselineCompiler compiler( isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate)); compiler.GenerateCode(); - Handle<Code> code = compiler.Build(isolate); - if (FLAG_print_code) { - code->Print(); + MaybeHandle<Code> code = compiler.Build(isolate); + if (FLAG_print_code && !code.is_null()) { + code.ToHandleChecked()->Print(); } return code; } @@ -45,8 +48,8 @@ void EmitReturnBaseline(MacroAssembler* masm) { namespace v8 { namespace internal { -Handle<Code> GenerateBaselineCode(Isolate* isolate, - Handle<SharedFunctionInfo> shared) { +MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate, + Handle<SharedFunctionInfo> shared) { UNREACHABLE(); } diff --git a/chromium/v8/src/baseline/baseline.h b/chromium/v8/src/baseline/baseline.h index 071c0bdbfb4..2dba2d9674b 100644 --- a/chromium/v8/src/baseline/baseline.h +++ b/chromium/v8/src/baseline/baseline.h @@ -14,8 +14,8 @@ class Code; class SharedFunctionInfo; class MacroAssembler; -Handle<Code> GenerateBaselineCode(Isolate* isolate, - Handle<SharedFunctionInfo> shared); +MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate, + Handle<SharedFunctionInfo> shared); void EmitReturnBaseline(MacroAssembler* masm); diff --git a/chromium/v8/src/baseline/bytecode-offset-iterator.cc b/chromium/v8/src/baseline/bytecode-offset-iterator.cc new file mode 100644 index 00000000000..bbedac8ef30 --- /dev/null +++ b/chromium/v8/src/baseline/bytecode-offset-iterator.cc @@ -0,0 +1,65 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/baseline/bytecode-offset-iterator.h" + +#include "src/objects/code-inl.h" + +namespace v8 { +namespace internal { +namespace baseline { + +BytecodeOffsetIterator::BytecodeOffsetIterator(Handle<ByteArray> mapping_table, + Handle<BytecodeArray> bytecodes) + : mapping_table_(mapping_table), + data_start_address_(mapping_table_->GetDataStartAddress()), + data_length_(mapping_table_->length()), + current_index_(0), + bytecode_iterator_(bytecodes), + local_heap_(LocalHeap::Current() + ? LocalHeap::Current() + : Isolate::Current()->main_thread_local_heap()) { + local_heap_->AddGCEpilogueCallback(UpdatePointersCallback, this); + Initialize(); +} + +BytecodeOffsetIterator::BytecodeOffsetIterator(ByteArray mapping_table, + BytecodeArray bytecodes) + : data_start_address_(mapping_table.GetDataStartAddress()), + data_length_(mapping_table.length()), + current_index_(0), + bytecode_handle_storage_(bytecodes), + // In the non-handlified version, no GC is allowed. We use a "dummy" + // handle to pass the BytecodeArray to the BytecodeArrayIterator, which + // is fine since no objects will be moved. + bytecode_iterator_(Handle<BytecodeArray>( + reinterpret_cast<Address*>(&bytecode_handle_storage_))), + local_heap_(nullptr) { + no_gc.emplace(); + Initialize(); +} + +BytecodeOffsetIterator::~BytecodeOffsetIterator() { + if (local_heap_ != nullptr) { + local_heap_->RemoveGCEpilogueCallback(UpdatePointersCallback, this); + } +} + +void BytecodeOffsetIterator::Initialize() { + // Initialize values for the prologue. + // The first recorded position is at the start of the first bytecode. + current_pc_start_offset_ = 0; + current_pc_end_offset_ = ReadPosition(); + current_bytecode_offset_ = kFunctionEntryBytecodeOffset; +} + +void BytecodeOffsetIterator::UpdatePointers() { + DisallowGarbageCollection no_gc; + DCHECK(!mapping_table_.is_null()); + data_start_address_ = mapping_table_->GetDataStartAddress(); +} + +} // namespace baseline +} // namespace internal +} // namespace v8 diff --git a/chromium/v8/src/baseline/bytecode-offset-iterator.h b/chromium/v8/src/baseline/bytecode-offset-iterator.h new file mode 100644 index 00000000000..6e78fba0614 --- /dev/null +++ b/chromium/v8/src/baseline/bytecode-offset-iterator.h @@ -0,0 +1,98 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_ +#define V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_ + +#include "src/base/vlq.h" +#include "src/common/globals.h" +#include "src/interpreter/bytecode-array-iterator.h" +#include "src/objects/code.h" +#include "src/objects/fixed-array.h" + +namespace v8 { +namespace internal { + +class BytecodeArray; + +namespace baseline { + +class V8_EXPORT_PRIVATE BytecodeOffsetIterator { + public: + explicit BytecodeOffsetIterator(Handle<ByteArray> mapping_table, + Handle<BytecodeArray> bytecodes); + // Non-handlified version for use when no GC can happen. + explicit BytecodeOffsetIterator(ByteArray mapping_table, + BytecodeArray bytecodes); + ~BytecodeOffsetIterator(); + + inline void Advance() { + DCHECK(!done()); + current_pc_start_offset_ = current_pc_end_offset_; + current_pc_end_offset_ += ReadPosition(); + current_bytecode_offset_ = bytecode_iterator_.current_offset(); + bytecode_iterator_.Advance(); + } + + inline void AdvanceToBytecodeOffset(int bytecode_offset) { + while (current_bytecode_offset() < bytecode_offset) { + Advance(); + } + DCHECK_EQ(bytecode_offset, current_bytecode_offset()); + } + + inline void AdvanceToPCOffset(Address pc_offset) { + while (current_pc_end_offset() < pc_offset) { + Advance(); + } + DCHECK_GT(pc_offset, current_pc_start_offset()); + DCHECK_LE(pc_offset, current_pc_end_offset()); + } + + // For this iterator, done() means that it is not safe to Advance(). + // Values are cached, so reads are always allowed. + inline bool done() const { return current_index_ >= data_length_; } + + inline Address current_pc_start_offset() const { + return current_pc_start_offset_; + } + + inline Address current_pc_end_offset() const { + return current_pc_end_offset_; + } + + inline int current_bytecode_offset() const { + return current_bytecode_offset_; + } + + static void UpdatePointersCallback(void* iterator) { + reinterpret_cast<BytecodeOffsetIterator*>(iterator)->UpdatePointers(); + } + + void UpdatePointers(); + + private: + void Initialize(); + inline int ReadPosition() { + return base::VLQDecodeUnsigned(data_start_address_, ¤t_index_); + } + + Handle<ByteArray> mapping_table_; + byte* data_start_address_; + int data_length_; + int current_index_; + Address current_pc_start_offset_; + Address current_pc_end_offset_; + int current_bytecode_offset_; + BytecodeArray bytecode_handle_storage_; + interpreter::BytecodeArrayIterator bytecode_iterator_; + LocalHeap* local_heap_; + base::Optional<DisallowGarbageCollection> no_gc; +}; + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_ diff --git a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h new file mode 100644 index 00000000000..2cd34aef710 --- /dev/null +++ b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -0,0 +1,445 @@ +// Use of this source code is governed by a BSD-style license that can be +// Copyright 2021 the V8 project authors. All rights reserved. +// found in the LICENSE file. + +#ifndef V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_ +#define V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/ia32/register-ia32.h" +#include "src/codegen/interface-descriptors.h" + +namespace v8 { +namespace internal { +namespace baseline { + +namespace detail { + +static constexpr Register kScratchRegisters[] = {ecx, edx, esi, edi}; +static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters); + +} // namespace detail + +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + registers_used_(prev_scope_ == nullptr ? 0 + : prev_scope_->registers_used_) { + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { + DCHECK_LT(registers_used_, detail::kNumScratchRegisters); + return detail::kScratchRegisters[registers_used_++]; + } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + int registers_used_; +}; + +// TODO(v8:11461): Unify condition names in the MacroAssembler. +enum class Condition : uint32_t { + kEqual = equal, + kNotEqual = not_equal, + + kLessThan = less, + kGreaterThan = greater, + kLessThanEqual = less_equal, + kGreaterThanEqual = greater_equal, + + kUnsignedLessThan = below, + kUnsignedGreaterThan = above, + kUnsignedLessThanEqual = below_equal, + kUnsignedGreaterThanEqual = above_equal, + + kOverflow = overflow, + kNoOverflow = no_overflow, + + kZero = zero, + kNotZero = not_zero, +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + return static_cast<internal::Condition>(cond); +} + +namespace detail { + +#define __ masm_-> + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.is_reg(target); +} +#endif + +} // namespace detail + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + return MemOperand(ebp, interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + return MemOperand(ebp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // NOP on ia32. +} + +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + __ jmp(target, distance); +} +void BaselineAssembler::JumpIf(Condition cc, Label* target, + Label::Distance distance) { + __ j(AsMasmCondition(cc), target, distance); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance distance) { + __ JumpIfRoot(value, index, target, distance); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance distance) { + __ JumpIfNotRoot(value, index, target, distance); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance distance) { + __ JumpIfSmi(value, target, distance); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance distance) { + __ JumpIfNotSmi(value, target, distance); +} + +void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { + __ RecordCommentForOffHeapTrampoline(builtin); + __ Call(__ EntryFromBuiltinIndexAsOperand(builtin)); + if (FLAG_code_comments) __ RecordComment("]"); +} + +void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { + __ RecordCommentForOffHeapTrampoline(builtin); + __ jmp(__ EntryFromBuiltinIndexAsOperand(builtin)); + if (FLAG_code_comments) __ RecordComment("]"); +} + +void BaselineAssembler::Test(Register value, int mask) { + if ((mask & 0xff) == mask) { + __ test_b(value, Immediate(mask)); + } else { + __ test(value, Immediate(mask)); + } +} + +void BaselineAssembler::CmpObjectType(Register object, + InstanceType instance_type, + Register map) { + __ AssertNotSmi(object); + __ CmpObjectType(object, instance_type, map); +} +void BaselineAssembler::CmpInstanceType(Register map, + InstanceType instance_type) { + if (emit_debug_code()) { + __ movd(xmm0, eax); + __ AssertNotSmi(map); + __ CmpObjectType(map, MAP_TYPE, eax); + __ Assert(equal, AbortReason::kUnexpectedValue); + __ movd(eax, xmm0); + } + __ CmpInstanceType(map, instance_type); +} +void BaselineAssembler::Cmp(Register value, Smi smi) { + if (smi.value() == 0) { + __ test(value, value); + } else { + __ cmp(value, Immediate(smi)); + } +} +void BaselineAssembler::ComparePointer(Register value, MemOperand operand) { + __ cmp(value, operand); +} +void BaselineAssembler::SmiCompare(Register lhs, Register rhs) { + __ AssertSmi(lhs); + __ AssertSmi(rhs); + __ cmp(lhs, rhs); +} +void BaselineAssembler::CompareTagged(Register value, MemOperand operand) { + __ cmp(value, operand); +} +void BaselineAssembler::CompareTagged(MemOperand operand, Register value) { + __ cmp(operand, value); +} +void BaselineAssembler::CompareByte(Register value, int32_t byte) { + __ cmpb(value, Immediate(byte)); +} +void BaselineAssembler::Move(interpreter::Register output, Register source) { + return __ mov(RegisterFrameOperand(output), source); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + __ Move(output, Immediate(value.ptr())); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + __ mov(output, source); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + __ Move(output, Immediate(reference)); +} +void BaselineAssembler::Move(Register output, Handle<HeapObject> value) { + __ Move(output, value); +} +void BaselineAssembler::Move(Register output, int32_t value) { + __ Move(output, Immediate(value)); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + __ mov(output, source); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + __ mov(output, source); +} + +namespace detail { +inline void PushSingle(MacroAssembler* masm, RootIndex source) { + masm->PushRoot(source); +} +inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); } +inline void PushSingle(MacroAssembler* masm, TaggedIndex value) { + masm->Push(Immediate(value.ptr())); +} +inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); } +inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) { + masm->Push(object); +} +inline void PushSingle(MacroAssembler* masm, int32_t immediate) { + masm->Push(Immediate(immediate)); +} +inline void PushSingle(MacroAssembler* masm, MemOperand operand) { + masm->Push(operand); +} +inline void PushSingle(MacroAssembler* masm, interpreter::Register source) { + return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source)); +} + +template <typename Arg> +struct PushHelper { + static int Push(BaselineAssembler* basm, Arg arg) { + PushSingle(basm->masm(), arg); + return 1; + } + static int PushReverse(BaselineAssembler* basm, Arg arg) { + return Push(basm, arg); + } +}; + +template <> +struct PushHelper<interpreter::RegisterList> { + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushSingle(basm->masm(), list[reg_index]); + } + return list.register_count(); + } + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushSingle(basm->masm(), list[reg_index]); + } + return list.register_count(); + } +}; + +template <typename... Args> +struct PushAllHelper; +template <> +struct PushAllHelper<> { + static int Push(BaselineAssembler* masm) { return 0; } + static int PushReverse(BaselineAssembler* masm) { return 0; } +}; +template <typename Arg, typename... Args> +struct PushAllHelper<Arg, Args...> { + static int Push(BaselineAssembler* masm, Arg arg, Args... args) { + int nargs = PushHelper<Arg>::Push(masm, arg); + return nargs + PushAllHelper<Args...>::Push(masm, args...); + } + static int PushReverse(BaselineAssembler* masm, Arg arg, Args... args) { + int nargs = PushAllHelper<Args...>::PushReverse(masm, args...); + return nargs + PushHelper<Arg>::PushReverse(masm, arg); + } +}; + +} // namespace detail + +template <typename... T> +int BaselineAssembler::Push(T... vals) { + return detail::PushAllHelper<T...>::Push(this, vals...); +} + +template <typename... T> +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllHelper<T...>::PushReverse(this, vals...); +} + +template <typename... T> +void BaselineAssembler::Pop(T... registers) { + ITERATE_PACK(__ Pop(registers)); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + __ mov(output, FieldOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + __ mov(output, FieldOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + __ mov(output, FieldOperand(source, offset)); +} +void BaselineAssembler::LoadByteField(Register output, Register source, + int offset) { + __ mov_b(output, FieldOperand(source, offset)); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + __ mov(FieldOperand(target, offset), Immediate(value)); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + + Register value) { + BaselineAssembler::ScratchRegisterScope scratch_scope(this); + Register scratch = scratch_scope.AcquireScratch(); + DCHECK(!AreAliased(scratch, target, value)); + __ mov(FieldOperand(target, offset), value); + __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + DCHECK(!AreAliased(target, value)); + __ mov(FieldOperand(target, offset), value); +} + +void BaselineAssembler::AddToInterruptBudget(int32_t weight) { + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), + Immediate(weight)); +} + +void BaselineAssembler::AddToInterruptBudget(Register weight) { + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + DCHECK(!AreAliased(feedback_cell, weight)); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), + weight); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { + if (rhs.value() == 0) return; + __ add(lhs, Immediate(rhs)); +} + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + ScratchRegisterScope scope(this); + Register table = scope.AcquireScratch(); + DCHECK(!AreAliased(reg, table)); + Label fallthrough, jump_table; + if (case_value_base > 0) { + __ sub(reg, Immediate(case_value_base)); + } + __ cmp(reg, Immediate(num_labels)); + __ j(above_equal, &fallthrough); + __ lea(table, MemOperand(&jump_table)); + __ jmp(Operand(table, reg, times_system_pointer_size, 0)); + // Emit the jump table inline, under the assumption that it's not too big. + __ Align(kSystemPointerSize); + __ bind(&jump_table); + for (int i = 0; i < num_labels; ++i) { + __ dd(labels[i]); + } + __ bind(&fallthrough); +} + +#undef __ +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { + BaselineAssembler basm(masm); + + Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); + Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); + + __ RecordComment("[ Update Interrupt Budget"); + __ AddToInterruptBudget(weight); + + // Use compare flags set by AddToInterruptBudget + Label skip_interrupt_label; + __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label); + { + __ masm()->SmiTag(params_size); + __ Push(params_size, kInterpreterAccumulatorRegister); + + __ LoadContext(kContextRegister); + __ Push(MemOperand(ebp, InterpreterFrameConstants::kFunctionOffset)); + __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + + __ Pop(kInterpreterAccumulatorRegister, params_size); + __ masm()->SmiUntag(params_size); + } + __ RecordComment("]"); + + __ Bind(&skip_interrupt_label); + + BaselineAssembler::ScratchRegisterScope scope(&basm); + Register scratch = scope.AcquireScratch(); + DCHECK(!AreAliased(weight, params_size, scratch)); + + Register actual_params_size = scratch; + // Compute the size of the actual parameters + receiver (in bytes). + __ masm()->mov(actual_params_size, + MemOperand(ebp, StandardFrameConstants::kArgCOffset)); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + Label corrected_args_count; + __ masm()->cmp(params_size, actual_params_size); + __ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count, Label::kNear); + __ masm()->mov(params_size, actual_params_size); + __ Bind(&corrected_args_count); + + // Leave the frame (also dropping the register file). + __ masm()->LeaveFrame(StackFrame::BASELINE); + + // Drop receiver + arguments. + Register return_pc = scratch; + __ masm()->PopReturnAddressTo(return_pc); + __ masm()->lea(esp, MemOperand(esp, params_size, times_system_pointer_size, + kSystemPointerSize)); + __ masm()->PushReturnAddressFrom(return_pc); + __ masm()->Ret(); +} + +#undef __ + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_IA32_BASELINE_ASSEMBLER_IA32_INL_H_ diff --git a/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h new file mode 100644 index 00000000000..733c05fe185 --- /dev/null +++ b/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h @@ -0,0 +1,93 @@ +// Use of this source code is governed by a BSD-style license that can be +// Copyright 2021 the V8 project authors. All rights reserved. +// found in the LICENSE file. + +#ifndef V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_ +#define V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_ + +#include "src/base/macros.h" +#include "src/baseline/baseline-compiler.h" +#include "src/codegen/interface-descriptors.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = bytecode_->frame_size() + max_call_args_; + CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, + kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + + PrologueFillFrame(); +} + +void BaselineCompiler::PrologueFillFrame() { + __ RecordComment("[ Fill frame"); + // Inlined register frame fill + interpreter::Register new_target_or_generator_register = + bytecode_->incoming_new_target_or_generator_register(); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + int register_count = bytecode_->register_count(); + // Magic value + const int kLoopUnrollSize = 8; + const int new_target_index = new_target_or_generator_register.index(); + const bool has_new_target = new_target_index != kMaxInt; + if (has_new_target) { + DCHECK_LE(new_target_index, register_count); + for (int i = 0; i < new_target_index; i++) { + __ Push(kInterpreterAccumulatorRegister); + } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; + } + if (register_count < 2 * kLoopUnrollSize) { + // If the frame is small enough, just unroll the frame fill completely. + for (int i = 0; i < register_count; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + } else { + // Extract the first few registers to round to the unroll size. + int first_registers = register_count % kLoopUnrollSize; + for (int i = 0; i < first_registers; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + BaselineAssembler::ScratchRegisterScope scope(&basm_); + Register scratch = scope.AcquireScratch(); + __ Move(scratch, register_count / kLoopUnrollSize); + // We enter the loop unconditionally, so make sure we need to loop at least + // once. + DCHECK_GT(register_count / kLoopUnrollSize, 0); + Label loop; + __ Bind(&loop); + for (int i = 0; i < kLoopUnrollSize; ++i) { + __ Push(kInterpreterAccumulatorRegister); + } + __ masm()->dec(scratch); + __ JumpIf(Condition::kGreaterThan, &loop); + } + __ RecordComment("]"); +} + +void BaselineCompiler::VerifyFrameSize() { + __ masm()->movd(xmm0, eax); + __ Move(eax, esp); + __ masm()->add(eax, + Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); + __ masm()->cmp(eax, ebp); + __ masm()->Assert(equal, AbortReason::kUnexpectedStackPointer); + __ masm()->movd(eax, xmm0); +} + +#undef __ + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_ diff --git a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h index 8fd564442ea..202f83c7615 100644 --- a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h @@ -5,6 +5,7 @@ #ifndef V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_ #define V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_ +#include "src/base/macros.h" #include "src/baseline/baseline-assembler.h" #include "src/codegen/interface-descriptors.h" #include "src/codegen/x64/register-x64.h" @@ -17,12 +18,11 @@ namespace detail { // Avoid using kScratchRegister(==r10) since the macro-assembler doesn't use // this scope and will conflict. -static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r14, r15}; +static constexpr Register kScratchRegisters[] = {r8, r9, r11, r12, r15}; static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters); } // namespace detail -// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h class BaselineAssembler::ScratchRegisterScope { public: explicit ScratchRegisterScope(BaselineAssembler* assembler) @@ -46,7 +46,7 @@ class BaselineAssembler::ScratchRegisterScope { }; // TODO(v8:11461): Unify condition names in the MacroAssembler. -enum class Condition : uint8_t { +enum class Condition : uint32_t { kEqual = equal, kNotEqual = not_equal, @@ -92,6 +92,11 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { } void BaselineAssembler::Bind(Label* label) { __ bind(label); } +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // NOP on x64. +} void BaselineAssembler::Jump(Label* target, Label::Distance distance) { __ jmp(target, distance); @@ -118,15 +123,25 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, } void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { - __ RecordCommentForOffHeapTrampoline(builtin); - __ Call(__ EntryFromBuiltinIndexAsOperand(builtin)); - if (FLAG_code_comments) __ RecordComment("]"); + if (masm()->options().short_builtin_calls) { + // Generate pc-relative call. + __ CallBuiltin(builtin); + } else { + __ RecordCommentForOffHeapTrampoline(builtin); + __ Call(__ EntryFromBuiltinIndexAsOperand(builtin)); + if (FLAG_code_comments) __ RecordComment("]"); + } } void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { - __ RecordCommentForOffHeapTrampoline(builtin); - __ Jump(__ EntryFromBuiltinIndexAsOperand(builtin)); - if (FLAG_code_comments) __ RecordComment("]"); + if (masm()->options().short_builtin_calls) { + // Generate pc-relative jump. + __ TailCallBuiltin(builtin); + } else { + __ RecordCommentForOffHeapTrampoline(builtin); + __ Jump(__ EntryFromBuiltinIndexAsOperand(builtin)); + if (FLAG_code_comments) __ RecordComment("]"); + } } void BaselineAssembler::Test(Register value, int mask) { diff --git a/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h index e4f123e8e05..73b43770e56 100644 --- a/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h +++ b/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h @@ -16,12 +16,11 @@ namespace baseline { #define __ basm_. void BaselineCompiler::Prologue() { - __ Move(kInterpreterBytecodeArrayRegister, bytecode_); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = bytecode_->frame_size() + max_call_args_; CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, - kInterpreterBytecodeArrayRegister, - kJavaScriptCallNewTargetRegister); + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); PrologueFillFrame(); } |