summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/mips64/builtins-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/mips64/builtins-mips64.cc')
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc473
1 files changed, 415 insertions, 58 deletions
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 9d0156e927..ce1df3bd6a 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -300,12 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -320,12 +324,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- a1 : the JSGeneratorObject to resume
// -- ra : return address
// -----------------------------------
- __ AssertGeneratorObject(a1);
-
// Store input value into generator object.
__ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
+ // Check that a1 is still valid, RecordWrite might have clobbered it.
+ __ AssertGeneratorObject(a1);
// Load suspended function and context.
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -388,12 +392,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline;
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, a3, a0);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
+ __ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
@@ -481,7 +487,7 @@ namespace {
// using JSEntryFunction = GeneratedCode<Address(
// Address root_register_value, MicrotaskQueue* microtask_queue)>;
void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
- Builtins::Name entry_trampoline) {
+ Builtin entry_trampoline) {
Label invoke, handler_entry, exit;
{
@@ -628,7 +634,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// pop the faked function when we return.
Handle<Code> trampoline_code =
- masm->isolate()->builtins()->builtin_handle(entry_trampoline);
+ masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
@@ -665,18 +671,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
} // namespace
void Builtins::Generate_JSEntry(MacroAssembler* masm) {
- Generate_JSEntryVariant(masm, StackFrame::ENTRY,
- Builtins::kJSEntryTrampoline);
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
}
void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
- Builtins::kJSConstructEntryTrampoline);
+ Builtin::kJSConstructEntryTrampoline);
}
void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
Generate_JSEntryVariant(masm, StackFrame::ENTRY,
- Builtins::kRunMicrotasksTrampoline);
+ Builtin::kRunMicrotasksTrampoline);
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
@@ -774,6 +779,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register closure,
Register scratch1,
Register scratch2) {
+ DCHECK(!AreAliased(optimized_code, closure, scratch1, scratch2));
// Store code entry in the closure.
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
@@ -842,11 +848,12 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- __ Ld(a5,
+ __ Ld(scratch1,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
- __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
- __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg));
+ __ Lw(scratch1,
+ FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -978,14 +985,35 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ ASM_CODE_COMMENT(masm);
+ Register scratch = t2;
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+}
+
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
+ ASM_CODE_COMMENT(masm);
Label maybe_has_optimized_code;
// Check if optimized code marker is available
- __ andi(t0, optimization_state,
- FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
- __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
+ }
Register optimization_marker = optimization_state;
__ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
@@ -996,10 +1024,156 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
__ Ld(optimization_marker,
FieldMemOperand(feedback_vector,
FeedbackVector::kMaybeOptimizedCodeOffset));
-
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
}
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ auto descriptor =
+ Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(feedback_vector, scratch, scratch);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = no_reg;
+ {
+ UseScratchRegisterScope temps(masm);
+ optimization_state = temps.Acquire();
+ // optimization_state will be used only in |has_optimized_code_or_marker|
+ // and outside it can be reused.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+ }
+ // Increment invocation count for the function.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ Lw(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Addu(invocation_count, invocation_count, Operand(1));
+ __ Sw(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
+ // Normally the first thing we'd do here is Push(ra, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Sh(zero_reg, FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ Push(feedback_vector);
+ }
+
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ UseScratchRegisterScope temps(masm);
+ Register sp_minus_frame_size = temps.Acquire();
+ __ Dsubu(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = temps.Acquire();
+ __ LoadStackLimit(interrupt_limit,
+ MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
+ Operand(interrupt_limit));
+ }
+
+ // Do "fast" return to the caller pc in ra.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(optimization_state);
+ // Ensure the optimization_state is not allocated again.
+ // Drop the frame created by the baseline call.
+ __ Pop(ra, fp);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+ __ Ret();
+ temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -1014,7 +1188,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// o ra: return address
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
-// frames.h for its layout.
+// frame-constants.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = a1;
Register feedback_vector = a2;
@@ -1025,8 +1199,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
- kScratchReg);
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1206,7 +1381,36 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&has_optimized_code_or_marker);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ Ld(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
+ __ JumpCodeObject(a2);
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
@@ -1218,11 +1422,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ break_(0xCC);
}
-static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register num_args,
- Register start_address,
- Register scratch,
- Register scratch2) {
+static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
+ Register start_address,
+ Register scratch, Register scratch2) {
// Find the address of the last argument.
__ Dsubu(scratch, num_args, Operand(1));
__ dsll(scratch, scratch, kPointerSizeLog2);
@@ -1261,7 +1463,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// This function modifies a2, t0 and a4.
- Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
+ GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
@@ -1311,7 +1513,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
// Push the arguments, This function modifies t0, a4 and a5.
- Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
+ GenerateInterpreterPushArgs(masm, a0, a4, a5, t0);
// Push a slot for the receiver.
__ push(zero_reg);
@@ -1562,7 +1764,16 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(zero_reg)) {
+ __ Daddu(ra, entry_address, offset);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -1570,11 +1781,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::zero()));
-
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
-
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -1588,10 +1799,18 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Daddu(v0, v0, a1);
- __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+ Generate_OSREntry(masm, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+} // namespace
- // And "return" to the OSR entry point of the function.
- __ Ret();
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ Ld(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
}
// static
@@ -3302,53 +3521,191 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the start or the end of the current bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ Push(kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = a1;
+ __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Replace BytecodeOffset with the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ GetObjectType(feedback_vector, t2, t2);
+ __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ Sd(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Ld(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+
+ Register get_baseline_pc = a3;
+ __ li(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ }
+
+ __ Dsubu(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ Register arg_reg_1 = a0;
+ Register arg_reg_2 = a1;
+ Register arg_reg_3 = a2;
+ __ Move(arg_reg_1, code_obj);
+ __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Daddu(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ // TODO(liuyu): Remove Ld as arm64 after register reallocation.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ Daddu(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
+ if (next_bytecode) {
+ __ li(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ Branch(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ Branch(&start);
+}
+
+} // namespace
+
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
- // Implement on this platform, https://crrev.com/c/2695591.
- __ break_(0xCC);
+ Generate_BaselineEntry(masm, false);
}
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
- // Implement on this platform, https://crrev.com/c/2695591.
- __ break_(0xCC);
+ Generate_BaselineEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- // Implement on this platform, https://crrev.com/c/2800112.
- __ break_(0xCC);
+ Generate_BaselineEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
+}
+
+void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
+ MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<
+ DynamicCheckMapsWithFeedbackVectorDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
+}
+
+template <class Descriptor>
+void Builtins::Generate_DynamicCheckMapsTrampoline(
+ MacroAssembler* masm, Handle<Code> builtin_target) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
// Only save the registers that the DynamicCheckMaps builtin can clobber.
- DynamicCheckMapsDescriptor descriptor;
+ Descriptor descriptor;
RegList registers = descriptor.allocatable_registers();
// FLAG_debug_code is enabled CSA checks will call C function and so we need
// to save all CallerSaved registers too.
if (FLAG_debug_code) registers |= kJSCallerSaved;
- __ SaveRegisters(registers);
+ __ MaybeSaveRegisters(registers);
// Load the immediate arguments from the deopt exit to pass to the builtin.
- Register slot_arg =
- descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
- Register handler_arg =
- descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
+ Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
+ Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
__ Ld(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
__ Uld(slot_arg, MemOperand(handler_arg,
Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
__ Uld(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
- __ Call(BUILTIN_CODE(masm->isolate(), DynamicCheckMaps),
- RelocInfo::CODE_TARGET);
+ __ Call(builtin_target, RelocInfo::CODE_TARGET);
Label deopt, bailout;
__ Branch(&deopt, ne, v0,
Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
- __ RestoreRegisters(registers);
+ __ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
__ Ret();
@@ -3360,16 +3717,16 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, v0,
Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
}
- __ RestoreRegisters(registers);
+ __ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_eager = masm->isolate()->builtins()->builtin_handle(
+ Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
__ Jump(deopt_eager, RelocInfo::CODE_TARGET);
__ bind(&bailout);
- __ RestoreRegisters(registers);
+ __ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
- Handle<Code> deopt_bailout = masm->isolate()->builtins()->builtin_handle(
+ Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
__ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}