diff options
author | Chris Dickinson <christopher.s.dickinson@gmail.com> | 2015-05-05 13:48:55 -0700 |
---|---|---|
committer | Rod Vagg <rod@vagg.org> | 2015-08-04 11:56:09 -0700 |
commit | d58e780504bdba6c5897c48428fd984c5b5f96fe (patch) | |
tree | 033f1568ae3f9f077aceb843b42eb1ed1739ce0f /deps/v8/src/compiler/ia32/code-generator-ia32.cc | |
parent | 21d31c08e7d0b6865e52452750b20b05e6dca443 (diff) | |
download | node-new-d58e780504bdba6c5897c48428fd984c5b5f96fe.tar.gz |
deps: update v8 to 4.3.61.21
* @indutny's SealHandleScope patch (484bebc38319fc7c622478037922ad73b2edcbf9)
has been cherry picked onto the top of V8 to make it compile.
* There's some test breakage in contextify.
* This was merged at the request of the TC.
PR-URL: https://github.com/iojs/io.js/pull/1632
Diffstat (limited to 'deps/v8/src/compiler/ia32/code-generator-ia32.cc')
-rw-r--r-- | deps/v8/src/compiler/ia32/code-generator-ia32.cc | 128 |
1 files changed, 95 insertions, 33 deletions
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc index d20848918d..316333ba89 100644 --- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc @@ -24,9 +24,11 @@ class IA32OperandConverter : public InstructionOperandConverter { IA32OperandConverter(CodeGenerator* gen, Instruction* instr) : InstructionOperandConverter(gen, instr) {} - Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); } + Operand InputOperand(size_t index, int extra = 0) { + return ToOperand(instr_->InputAt(index), extra); + } - Immediate InputImmediate(int index) { + Immediate InputImmediate(size_t index) { return ToImmediate(instr_->InputAt(index)); } @@ -75,8 +77,8 @@ class IA32OperandConverter : public InstructionOperandConverter { return Immediate(-1); } - static int NextOffset(int* offset) { - int i = *offset; + static size_t NextOffset(size_t* offset) { + size_t i = *offset; (*offset)++; return i; } @@ -91,7 +93,7 @@ class IA32OperandConverter : public InstructionOperandConverter { return static_cast<ScaleFactor>(scale); } - Operand MemoryOperand(int* offset) { + Operand MemoryOperand(size_t* offset) { AddressingMode mode = AddressingModeField::decode(instr_->opcode()); switch (mode) { case kMode_MR: { @@ -154,7 +156,7 @@ class IA32OperandConverter : public InstructionOperandConverter { return Operand(no_reg, 0); } - Operand MemoryOperand(int first_input = 0) { + Operand MemoryOperand(size_t first_input = 0) { return MemoryOperand(&first_input); } }; @@ -162,7 +164,7 @@ class IA32OperandConverter : public InstructionOperandConverter { namespace { -bool HasImmediateInput(Instruction* instr, int index) { +bool HasImmediateInput(Instruction* instr, size_t index) { return instr->InputAt(index)->IsImmediate(); } @@ -292,7 +294,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { Register reg = i.InputRegister(0); __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag)); } - AddSafepointAndDeopt(instr); + RecordCallPosition(instr); break; } case kArchCallJSFunction: { @@ -304,7 +306,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { __ Assert(equal, kWrongFunctionContext); } __ call(FieldOperand(func, JSFunction::kCodeEntryOffset)); - AddSafepointAndDeopt(instr); + RecordCallPosition(instr); break; } case kArchJmp: @@ -319,6 +321,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { case kArchNop: // don't emit code for nops. break; + case kArchDeoptimize: { + int deopt_state_id = + BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); + AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER); + break; + } case kArchRet: AssembleReturn(); break; @@ -439,6 +447,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { __ ror_cl(i.OutputOperand()); } break; + case kIA32Lzcnt: + __ Lzcnt(i.OutputRegister(), i.InputOperand(0)); + break; case kSSEFloat64Cmp: __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); break; @@ -454,6 +465,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { case kSSEFloat64Div: __ divsd(i.InputDoubleRegister(0), i.InputOperand(1)); break; + case kSSEFloat64Max: + __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1)); + break; + case kSSEFloat64Min: + __ minsd(i.InputDoubleRegister(0), i.InputOperand(1)); + break; case kSSEFloat64Mod: { // TODO(dcarney): alignment is wrong. __ sub(esp, Immediate(kDoubleSize)); @@ -482,22 +499,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { case kSSEFloat64Sqrt: __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0)); break; - case kSSEFloat64Floor: { - CpuFeatureScope sse_scope(masm(), SSE4_1); - __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - v8::internal::Assembler::kRoundDown); - break; - } - case kSSEFloat64Ceil: { + case kSSEFloat64Round: { CpuFeatureScope sse_scope(masm(), SSE4_1); - __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - v8::internal::Assembler::kRoundUp); - break; - } - case kSSEFloat64RoundTruncate: { - CpuFeatureScope sse_scope(masm(), SSE4_1); - __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - v8::internal::Assembler::kRoundToZero); + RoundingMode const mode = + static_cast<RoundingMode>(MiscField::decode(instr->opcode())); + __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); break; } case kSSECvtss2sd: @@ -523,6 +529,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { case kSSEUint32ToFloat64: __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0)); break; + case kSSEFloat64ExtractLowWord32: + if (instr->InputAt(0)->IsDoubleStackSlot()) { + __ mov(i.OutputRegister(), i.InputOperand(0)); + } else { + __ movd(i.OutputRegister(), i.InputDoubleRegister(0)); + } + break; + case kSSEFloat64ExtractHighWord32: + if (instr->InputAt(0)->IsDoubleStackSlot()) { + __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2)); + } else { + __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1); + } + break; + case kSSEFloat64InsertLowWord32: + __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0); + break; + case kSSEFloat64InsertHighWord32: + __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1); + break; + case kSSEFloat64LoadLowWord32: + __ movd(i.OutputDoubleRegister(), i.InputOperand(0)); + break; case kAVXFloat64Add: { CpuFeatureScope avx_scope(masm(), AVX); __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), @@ -547,6 +576,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { i.InputOperand(1)); break; } + case kAVXFloat64Max: { + CpuFeatureScope avx_scope(masm(), AVX); + __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kAVXFloat64Min: { + CpuFeatureScope avx_scope(masm(), AVX); + __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } case kIA32Movsxbl: __ movsx_b(i.OutputRegister(), i.MemoryOperand()); break; @@ -554,7 +595,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { __ movzx_b(i.OutputRegister(), i.MemoryOperand()); break; case kIA32Movb: { - int index = 0; + size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { __ mov_b(operand, i.InputInt8(index)); @@ -570,7 +611,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { __ movzx_w(i.OutputRegister(), i.MemoryOperand()); break; case kIA32Movw: { - int index = 0; + size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { __ mov_w(operand, i.InputInt16(index)); @@ -583,7 +624,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { if (instr->HasOutput()) { __ mov(i.OutputRegister(), i.MemoryOperand()); } else { - int index = 0; + size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { __ mov(operand, i.InputImmediate(index)); @@ -596,7 +637,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { if (instr->HasOutput()) { __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); } else { - int index = 0; + size_t index = 0; Operand operand = i.MemoryOperand(&index); __ movsd(operand, i.InputDoubleRegister(index)); } @@ -605,7 +646,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { if (instr->HasOutput()) { __ movss(i.OutputDoubleRegister(), i.MemoryOperand()); } else { - int index = 0; + size_t index = 0; Operand operand = i.MemoryOperand(&index); __ movss(operand, i.InputDoubleRegister(index)); } @@ -699,6 +740,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { case kCheckedStoreFloat64: ASSEMBLE_CHECKED_STORE_FLOAT(movsd); break; + case kIA32StackCheck: { + ExternalReference const stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + break; + } } } @@ -759,7 +806,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { } -void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) { +void CodeGenerator::AssembleArchJump(RpoNumber target) { if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target)); } @@ -868,9 +915,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { } -void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) { +void CodeGenerator::AssembleDeoptimizerCall( + int deoptimization_id, Deoptimizer::BailoutType bailout_type) { Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( - isolate(), deoptimization_id, Deoptimizer::LAZY); + isolate(), deoptimization_id, bailout_type); __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); } @@ -1043,6 +1091,8 @@ void CodeGenerator::AssemblePrologue() { // remaining stack slots. if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); + // TODO(titzer): cannot address target function == local #-1 + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); DCHECK(stack_slots >= frame()->GetOsrStackSlotCount()); stack_slots -= frame()->GetOsrStackSlotCount(); } @@ -1117,7 +1167,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, Constant src_constant = g.ToConstant(source); if (src_constant.type() == Constant::kHeapObject) { Handle<HeapObject> src = src_constant.ToHeapObject(); - if (destination->IsRegister()) { + if (info()->IsOptimizing() && src.is_identical_to(info()->context())) { + // Loading the context from the frame is way cheaper than materializing + // the actual context heap object address. + if (destination->IsRegister()) { + Register dst = g.ToRegister(destination); + __ mov(dst, Operand(ebp, StandardFrameConstants::kContextOffset)); + } else { + DCHECK(destination->IsStackSlot()); + Operand dst = g.ToOperand(destination); + __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); + __ pop(dst); + } + } else if (destination->IsRegister()) { Register dst = g.ToRegister(destination); __ LoadHeapObject(dst, src); } else { |