summaryrefslogtreecommitdiff
path: root/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc')
-rw-r--r--chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc61
1 files changed, 38 insertions, 23 deletions
diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 6edb1539439..257ef1bca15 100644
--- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -163,18 +163,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
__ Daddu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
@@ -465,8 +463,8 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
@@ -711,7 +709,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -724,7 +723,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -746,7 +746,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int offset = __ root_array_available() ? 76 : 88;
#endif // V8_ENABLE_WEBASSEMBLY
#if V8_HOST_ARCH_MIPS64
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
offset += 16;
}
#endif
@@ -835,9 +835,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRet:
AssembleReturn(instr->InputAt(0));
break;
- case kArchStackPointerGreaterThan:
- // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Dsubu(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
break;
+ }
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
@@ -868,6 +875,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -3817,7 +3827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
#define UNSUPPORTED_COND(opcode, condition) \
StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
@@ -3880,13 +3890,11 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
- Register lhs_register = sp;
- uint32_t offset;
- if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) {
- lhs_register = i.TempRegister(0);
- __ Dsubu(lhs_register, sp, offset);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
}
- __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0)));
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64CmpS ||
instr->arch_opcode() == kMips64CmpD) {
bool predicate;
@@ -4232,6 +4240,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
}
return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
@@ -4342,7 +4357,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -4446,7 +4461,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
g.ToRegister(additional_pop_count),
Operand(static_cast<int64_t>(0)));