diff options
Diffstat (limited to 'deps/v8/src/builtins/riscv64/builtins-riscv64.cc')
-rw-r--r-- | deps/v8/src/builtins/riscv64/builtins-riscv64.cc | 85 |
1 files changed, 59 insertions, 26 deletions
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 03f20057e6..f79e392f48 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -1149,10 +1149,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Sh(zero_reg, FieldMemOperand(bytecodeArray, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); __ Push(argc, bytecodeArray); @@ -1315,10 +1315,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -3633,11 +3633,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the start or the end of the current bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { __ Push(zero_reg, kInterpreterAccumulatorRegister); Label start; __ bind(&start); @@ -3646,6 +3648,46 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = a1; __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = a4; + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(code_obj, scratch, scratch); + __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE)); + + // Start with bytecode as there is no baseline code. + __ Pop(zero_reg, kInterpreterAccumulatorRegister); + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(code_obj, scratch, scratch); + __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, + Operand(BASELINE_DATA_TYPE)); + } + + // Load baseline code from baseline data. + __ LoadTaggedPointerField( + code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + // Replace BytecodeOffset with the feedback vector. Register feedback_vector = a2; __ LoadTaggedPointerField( @@ -3668,17 +3710,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = type; - __ LoadTaggedPointerField( - code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ LoadTaggedPointerField( - code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); - // Compute baseline pc for bytecode offset. __ Push(zero_reg, kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref; @@ -3731,7 +3762,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); } else { @@ -3764,17 +3795,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { |