summaryrefslogtreecommitdiff
path: root/chromium/v8/src/compiler/mips
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-01-04 14:17:57 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-01-05 10:05:06 +0000
commit39d357e3248f80abea0159765ff39554affb40db (patch)
treeaba0e6bfb76de0244bba0f5fdbd64b830dd6e621 /chromium/v8/src/compiler/mips
parent87778abf5a1f89266f37d1321b92a21851d8244d (diff)
downloadqtwebengine-chromium-39d357e3248f80abea0159765ff39554affb40db.tar.gz
BASELINE: Update Chromium to 55.0.2883.105
And updates ninja to 1.7.2 Change-Id: I20d43c737f82764d857ada9a55586901b18b9243 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/compiler/mips')
-rw-r--r--chromium/v8/src/compiler/mips/code-generator-mips.cc413
-rw-r--r--chromium/v8/src/compiler/mips/instruction-codes-mips.h31
-rw-r--r--chromium/v8/src/compiler/mips/instruction-selector-mips.cc365
3 files changed, 599 insertions, 210 deletions
diff --git a/chromium/v8/src/compiler/mips/code-generator-mips.cc b/chromium/v8/src/compiler/mips/code-generator-mips.cc
index 5e30e341234..12ab4af771b 100644
--- a/chromium/v8/src/compiler/mips/code-generator-mips.cc
+++ b/chromium/v8/src/compiler/mips/code-generator-mips.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
+#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -513,21 +513,7 @@ void CodeGenerator::AssembleDeconstructFrame() {
__ Pop(ra, fp);
}
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ addiu(sp, sp, sp_slot_delta * kPointerSize);
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -560,6 +546,38 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -582,8 +600,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -597,14 +613,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(at);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallAddress: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
CHECK(!instr->InputAt(0)->IsImmediate());
__ Jump(i.InputRegister(0));
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -620,6 +636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(at);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallJSFunctionFromJSFunction:
@@ -631,8 +648,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -651,7 +666,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -692,8 +707,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result = AssembleDeoptimizerCall(
+ deopt_state_id, bailout_type, current_source_position_);
if (result != kSuccess) return result;
break;
}
@@ -741,15 +756,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
case kIeee754Float64Atan:
ASSEMBLE_IEEE754_UNOP(atan);
break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
case kIeee754Float64Atan2:
ASSEMBLE_IEEE754_BINOP(atan2);
break;
case kIeee754Float64Cos:
ASSEMBLE_IEEE754_UNOP(cos);
break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
case kIeee754Float64Cbrt:
ASSEMBLE_IEEE754_UNOP(cbrt);
break;
@@ -759,9 +792,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Expm1:
ASSEMBLE_IEEE754_UNOP(expm1);
break;
- case kIeee754Float64Atanh:
- ASSEMBLE_IEEE754_UNOP(atanh);
- break;
case kIeee754Float64Log:
ASSEMBLE_IEEE754_UNOP(log);
break;
@@ -774,12 +804,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Log2:
ASSEMBLE_IEEE754_UNOP(log2);
break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ break;
+ }
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
case kIeee754Float64Tan:
ASSEMBLE_IEEE754_UNOP(tan);
break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -795,6 +836,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsMulOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMipsMulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -1008,11 +1052,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsSubPreserveNanS:
- __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
- i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- break;
case kMipsMulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1079,10 +1118,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsSubPreserveNanD:
- __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
- i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ case kMipsMaddS:
+ __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2));
+ break;
+ case kMipsMaddD:
+ __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+ break;
+ case kMipsMaddfS:
+ __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
+ break;
+ case kMipsMaddfD:
+ __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ break;
+ case kMipsMsubS:
+ __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2));
+ break;
+ case kMipsMsubD:
+ __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2));
+ break;
+ case kMipsMsubfS:
+ __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
+ break;
+ case kMipsMsubfD:
+ __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1109,6 +1175,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsAbsD:
__ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kMipsNegS:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kMipsNegD:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -1153,60 +1225,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
break;
}
- case kMipsFloat64Max: {
- // (b < a) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- // Left operand is result, passthrough if false.
- __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat32Max: {
+ Label compare_nan, done_compare;
+ __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputSingleRegister(),
+ std::numeric_limits<float>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMipsFloat64Min: {
- // (a < b) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- // Right operand is result, passthrough if false.
- __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat64Max: {
+ Label compare_nan, done_compare;
+ __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputDoubleRegister(),
+ std::numeric_limits<double>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMipsFloat32Max: {
- // (b < a) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- // Left operand is result, passthrough if false.
- __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat32Min: {
+ Label compare_nan, done_compare;
+ __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputSingleRegister(),
+ std::numeric_limits<float>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMipsFloat32Min: {
- // (a < b) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- // Right operand is result, passthrough if false.
- __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat64Min: {
+ Label compare_nan, done_compare;
+ __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputDoubleRegister(),
+ std::numeric_limits<double>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
case kMipsCvtSD: {
@@ -1287,6 +1347,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister scratch = kScratchDoubleReg;
__ trunc_w_s(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
+ __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
break;
}
case kMipsTruncUwD: {
@@ -1299,6 +1364,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
__ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
break;
}
case kMipsFloat64ExtractLowWord32:
@@ -1313,22 +1382,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsFloat64InsertHighWord32:
__ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
break;
- case kMipsFloat64SilenceNaN: {
- FPURegister value = i.InputDoubleRegister(0);
- FPURegister result = i.OutputDoubleRegister();
- Register scratch0 = i.TempRegister(0);
- Label is_nan, not_nan;
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
- __ bind(&is_nan);
- __ LoadRoot(scratch0, Heap::kNanValueRootIndex);
- __ ldc1(result, FieldMemOperand(scratch0, HeapNumber::kValueOffset));
- __ bind(&not_nan);
+ case kMipsFloat64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- }
// ... more basic instructions ...
-
+ case kMipsSeb:
+ __ seb(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kMipsSeh:
+ __ seh(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1341,34 +1405,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMipsUlhu:
+ __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMipsUlh:
+ __ Ulh(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMipsSh:
__ sh(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMipsUsh:
+ __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+ break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMipsUlw:
+ __ Ulw(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMipsSw:
__ sw(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMipsUsw:
+ __ Usw(i.InputRegister(2), i.MemoryOperand());
+ break;
case kMipsLwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
+ case kMipsUlwc1: {
+ __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
+ break;
+ }
case kMipsSwc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
}
+ case kMipsUswc1: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+ break;
+ }
case kMipsLdc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
+ case kMipsUldc1:
+ __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
+ break;
case kMipsSdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
+ case kMipsUsdc1:
+ __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+ break;
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1398,6 +1493,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kMipsByteSwap32: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -1544,6 +1643,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
break;
}
+ } else if (instr->arch_opcode() == kMipsMulOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1609,7 +1722,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
+ instr->arch_opcode() == kMipsSubOvf ||
+ instr->arch_opcode() == kMipsMulOvf) {
Label flabel, tlabel;
switch (instr->arch_opcode()) {
case kMipsAddOvf:
@@ -1621,6 +1735,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), &flabel);
break;
+ case kMipsMulOvf:
+ __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+ break;
default:
UNREACHABLE();
break;
@@ -1759,10 +1877,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type,
+ SourcePosition pos) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -1926,10 +2048,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
- int slot;
- if (IsMaterializableFromFrame(src_object, &slot)) {
- __ lw(dst, g.SlotToMemOperand(slot));
- } else if (IsMaterializableFromRoot(src_object, &index)) {
+ if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ li(dst, src_object);
@@ -1944,9 +2063,14 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
- __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ sw(at, dst);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ sw(zero_reg, dst);
+ } else {
+ __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ sw(at, dst);
+ }
} else {
+ DCHECK(destination->IsFPRegister());
FloatRegister dst = g.ToSingleRegister(destination);
__ Move(dst, src.ToFloat32());
}
@@ -1967,23 +2091,42 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ sdc1(src, g.ToMemOperand(destination));
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ sdc1(src, g.ToMemOperand(destination));
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ swc1(src, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (destination->IsFPRegister()) {
- LocationOperand* op = LocationOperand::cast(source);
- if (op->representation() == MachineRepresentation::kFloat64) {
+ if (rep == MachineRepresentation::kFloat64) {
__ ldc1(g.ToDoubleRegister(destination), src);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ } else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
}
} else {
FPURegister temp = kScratchDoubleReg;
- __ ldc1(temp, src);
- __ sdc1(temp, g.ToMemOperand(destination));
+ if (rep == MachineRepresentation::kFloat64) {
+ __ ldc1(temp, src);
+ __ sdc1(temp, g.ToMemOperand(destination));
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ lwc1(temp, src);
+ __ swc1(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
}
} else {
UNREACHABLE();
@@ -2033,24 +2176,46 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ ldc1(src, dst);
- __ sdc1(temp, dst);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ Move(temp, src);
+ __ ldc1(src, dst);
+ __ sdc1(temp, dst);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ Move(temp, src);
+ __ lwc1(src, dst);
+ __ swc1(temp, dst);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ ldc1(temp_1, dst0); // Save destination in temp_1.
- __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ sw(temp_0, dst0);
- __ lw(temp_0, src1);
- __ sw(temp_0, dst1);
- __ sdc1(temp_1, src0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+ __ ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ lw(temp_0, src1);
+ __ sw(temp_0, dst1);
+ __ sdc1(temp_1, src0);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ lwc1(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ swc1(temp_1, src0);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
diff --git a/chromium/v8/src/compiler/mips/instruction-codes-mips.h b/chromium/v8/src/compiler/mips/instruction-codes-mips.h
index 766a5b1ad3c..45ed041175e 100644
--- a/chromium/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/chromium/v8/src/compiler/mips/instruction-codes-mips.h
@@ -17,6 +17,7 @@ namespace compiler {
V(MipsSub) \
V(MipsSubOvf) \
V(MipsMul) \
+ V(MipsMulOvf) \
V(MipsMulHigh) \
V(MipsMulHighU) \
V(MipsDiv) \
@@ -46,7 +47,6 @@ namespace compiler {
V(MipsCmpS) \
V(MipsAddS) \
V(MipsSubS) \
- V(MipsSubPreserveNanS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
@@ -57,7 +57,6 @@ namespace compiler {
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
- V(MipsSubPreserveNanD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
@@ -65,9 +64,19 @@ namespace compiler {
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsNegS) \
+ V(MipsNegD) \
V(MipsAddPair) \
V(MipsSubPair) \
V(MipsMulPair) \
+ V(MipsMaddS) \
+ V(MipsMaddD) \
+ V(MipsMaddfS) \
+ V(MipsMaddfD) \
+ V(MipsMsubS) \
+ V(MipsMsubD) \
+ V(MipsMsubfS) \
+ V(MipsMsubfD) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \
@@ -96,26 +105,38 @@ namespace compiler {
V(MipsLbu) \
V(MipsSb) \
V(MipsLh) \
+ V(MipsUlh) \
V(MipsLhu) \
+ V(MipsUlhu) \
V(MipsSh) \
+ V(MipsUsh) \
V(MipsLw) \
+ V(MipsUlw) \
V(MipsSw) \
+ V(MipsUsw) \
V(MipsLwc1) \
+ V(MipsUlwc1) \
V(MipsSwc1) \
+ V(MipsUswc1) \
V(MipsLdc1) \
+ V(MipsUldc1) \
V(MipsSdc1) \
+ V(MipsUsdc1) \
V(MipsFloat64ExtractLowWord32) \
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
V(MipsFloat64SilenceNaN) \
- V(MipsFloat64Max) \
- V(MipsFloat64Min) \
V(MipsFloat32Max) \
+ V(MipsFloat64Max) \
V(MipsFloat32Min) \
+ V(MipsFloat64Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
- V(MipsStackClaim)
+ V(MipsByteSwap32) \
+ V(MipsStackClaim) \
+ V(MipsSeb) \
+ V(MipsSeh)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/chromium/v8/src/compiler/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/mips/instruction-selector-mips.cc
index c95613eba8b..0a98930b5c4 100644
--- a/chromium/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/chromium/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -104,7 +104,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -117,7 +124,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -152,6 +159,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsLw;
@@ -176,6 +185,10 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitStore(Node* node) {
MipsOperandGenerator g(this);
@@ -189,7 +202,7 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -231,6 +244,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kMipsSh;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsSw;
@@ -392,6 +407,24 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasValue() && mleft.right().HasValue()) {
+ MipsOperandGenerator g(this);
+ uint32_t sar = m.right().Value();
+ uint32_t shl = mleft.right().Value();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kMipsSeh, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kMipsSeb, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMipsSar, node);
}
@@ -473,6 +506,13 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitWord32Ctz(Node* node) {
MipsOperandGenerator g(this);
@@ -741,45 +781,129 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
+ MipsOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Emit(kMipsMaddS, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
+ Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
+ Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
+ Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsAddS, node);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
+ MipsOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y).
+ Emit(kMipsMaddD, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
+ Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
+ Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
+ Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsAddD, node);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
+ MipsOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
+ Float32BinopMatcher mleft(m.left().node());
+ Emit(kMipsMsubS, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
+ Float32BinopMatcher mright(m.right().node());
+ Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ }
VisitRRR(this, kMipsSubS, node);
}
-void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
- VisitRRR(this, kMipsSubPreserveNanS, node);
-}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
Float64BinopMatcher m(node);
- if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
+ Float64BinopMatcher mleft(m.left().node());
+ Emit(kMipsMsubD, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
+ Float64BinopMatcher mright(m.right().node());
+ Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
}
}
VisitRRR(this, kMipsSubD, node);
}
-void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
- VisitRRR(this, kMipsSubPreserveNanD, node);
-}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
}
@@ -806,64 +930,28 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat32Max, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat64Max, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat32Min(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat32Min, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat64Min, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
@@ -930,15 +1018,19 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kMipsFloat64RoundTiesEven, node);
}
-void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kMipsNegS, node);
+}
-void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kMipsNegD, node);
+}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
InstructionCode opcode) {
MipsOperandGenerator g(this);
- Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
- g.UseFixed(node->InputAt(1), f14))
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
+ g.UseFixed(node->InputAt(1), f4))
->MarkAsCall();
}
@@ -957,7 +1049,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -991,6 +1083,104 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ UnalignedLoadRepresentation load_rep =
+ UnalignedLoadRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsUlw;
+ break;
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsUlwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsUldc1;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+
+ // TODO(mips): I guess this could be done in a better way.
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsUswc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsUsdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMipsUsh;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsUsw;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
@@ -1015,6 +1205,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
@@ -1093,7 +1285,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1267,6 +1459,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMipsSubOvf, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMipsMulOvf, cont);
default:
break;
}
@@ -1290,7 +1485,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->frame_state());
+ g.TempImmediate(0), cont->reason(),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@@ -1307,14 +1503,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1401,6 +1597,14 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kMipsSubOvf, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kMipsMulOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMipsMulOvf, &cont);
+}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1558,19 +1762,18 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven;
}
+
return flags | MachineOperatorBuilder::kWord32Ctz |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
- MachineOperatorBuilder::kFloat32RoundTiesEven;
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kWord32ReverseBytes |
+ MachineOperatorBuilder::kWord64ReverseBytes;
}
// static