diff options
Diffstat (limited to 'deps/v8/src/compiler/instruction-selector.cc')
-rw-r--r-- | deps/v8/src/compiler/instruction-selector.cc | 1053 |
1 files changed, 1053 insertions, 0 deletions
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc new file mode 100644 index 000000000..541e0452f --- /dev/null +++ b/deps/v8/src/compiler/instruction-selector.cc @@ -0,0 +1,1053 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/instruction-selector.h" + +#include "src/compiler/instruction-selector-impl.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties-inl.h" +#include "src/compiler/pipeline.h" + +namespace v8 { +namespace internal { +namespace compiler { + +InstructionSelector::InstructionSelector(InstructionSequence* sequence, + SourcePositionTable* source_positions, + Features features) + : zone_(sequence->isolate()), + sequence_(sequence), + source_positions_(source_positions), + features_(features), + current_block_(NULL), + instructions_(InstructionDeque::allocator_type(zone())), + defined_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())), + used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {} + + +void InstructionSelector::SelectInstructions() { + // Mark the inputs of all phis in loop headers as used. + BasicBlockVector* blocks = schedule()->rpo_order(); + for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { + BasicBlock* block = *i; + if (!block->IsLoopHeader()) continue; + DCHECK_NE(0, block->PredecessorCount()); + DCHECK_NE(1, block->PredecessorCount()); + for (BasicBlock::const_iterator j = block->begin(); j != block->end(); + ++j) { + Node* phi = *j; + if (phi->opcode() != IrOpcode::kPhi) continue; + + // Mark all inputs as used. + Node::Inputs inputs = phi->inputs(); + for (InputIter k = inputs.begin(); k != inputs.end(); ++k) { + MarkAsUsed(*k); + } + } + } + + // Visit each basic block in post order. + for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) { + VisitBlock(*i); + } + + // Schedule the selected instructions. + for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { + BasicBlock* block = *i; + size_t end = block->code_end_; + size_t start = block->code_start_; + sequence()->StartBlock(block); + while (start-- > end) { + sequence()->AddInstruction(instructions_[start], block); + } + sequence()->EndBlock(block); + } +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps); +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + InstructionOperand* a, size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + return Emit(opcode, output_count, &output, 1, &a, temp_count, temps); +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + InstructionOperand* a, + InstructionOperand* b, size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + InstructionOperand* inputs[] = {a, b}; + size_t input_count = ARRAY_SIZE(inputs); + return Emit(opcode, output_count, &output, input_count, inputs, temp_count, + temps); +} + + +Instruction* InstructionSelector::Emit(InstructionCode opcode, + InstructionOperand* output, + InstructionOperand* a, + InstructionOperand* b, + InstructionOperand* c, size_t temp_count, + InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + InstructionOperand* inputs[] = {a, b, c}; + size_t input_count = ARRAY_SIZE(inputs); + return Emit(opcode, output_count, &output, input_count, inputs, temp_count, + temps); +} + + +Instruction* InstructionSelector::Emit( + InstructionCode opcode, InstructionOperand* output, InstructionOperand* a, + InstructionOperand* b, InstructionOperand* c, InstructionOperand* d, + size_t temp_count, InstructionOperand** temps) { + size_t output_count = output == NULL ? 0 : 1; + InstructionOperand* inputs[] = {a, b, c, d}; + size_t input_count = ARRAY_SIZE(inputs); + return Emit(opcode, output_count, &output, input_count, inputs, temp_count, + temps); +} + + +Instruction* InstructionSelector::Emit( + InstructionCode opcode, size_t output_count, InstructionOperand** outputs, + size_t input_count, InstructionOperand** inputs, size_t temp_count, + InstructionOperand** temps) { + Instruction* instr = + Instruction::New(instruction_zone(), opcode, output_count, outputs, + input_count, inputs, temp_count, temps); + return Emit(instr); +} + + +Instruction* InstructionSelector::Emit(Instruction* instr) { + instructions_.push_back(instr); + return instr; +} + + +bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const { + return block->rpo_number_ == (current_block_->rpo_number_ + 1) && + block->deferred_ == current_block_->deferred_; +} + + +bool InstructionSelector::CanCover(Node* user, Node* node) const { + return node->OwnedBy(user) && + schedule()->block(node) == schedule()->block(user); +} + + +bool InstructionSelector::IsDefined(Node* node) const { + DCHECK_NOT_NULL(node); + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(defined_.size())); + return defined_[id]; +} + + +void InstructionSelector::MarkAsDefined(Node* node) { + DCHECK_NOT_NULL(node); + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(defined_.size())); + defined_[id] = true; +} + + +bool InstructionSelector::IsUsed(Node* node) const { + if (!node->op()->HasProperty(Operator::kEliminatable)) return true; + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(used_.size())); + return used_[id]; +} + + +void InstructionSelector::MarkAsUsed(Node* node) { + DCHECK_NOT_NULL(node); + NodeId id = node->id(); + DCHECK(id >= 0); + DCHECK(id < static_cast<NodeId>(used_.size())); + used_[id] = true; +} + + +bool InstructionSelector::IsDouble(const Node* node) const { + DCHECK_NOT_NULL(node); + return sequence()->IsDouble(node->id()); +} + + +void InstructionSelector::MarkAsDouble(Node* node) { + DCHECK_NOT_NULL(node); + DCHECK(!IsReference(node)); + sequence()->MarkAsDouble(node->id()); + + // Propagate "doubleness" throughout phis. + for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { + Node* user = *i; + if (user->opcode() != IrOpcode::kPhi) continue; + if (IsDouble(user)) continue; + MarkAsDouble(user); + } +} + + +bool InstructionSelector::IsReference(const Node* node) const { + DCHECK_NOT_NULL(node); + return sequence()->IsReference(node->id()); +} + + +void InstructionSelector::MarkAsReference(Node* node) { + DCHECK_NOT_NULL(node); + DCHECK(!IsDouble(node)); + sequence()->MarkAsReference(node->id()); + + // Propagate "referenceness" throughout phis. + for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { + Node* user = *i; + if (user->opcode() != IrOpcode::kPhi) continue; + if (IsReference(user)) continue; + MarkAsReference(user); + } +} + + +void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) { + DCHECK_NOT_NULL(node); + if (rep == kMachineFloat64) MarkAsDouble(node); + if (rep == kMachineTagged) MarkAsReference(node); +} + + +// TODO(bmeurer): Get rid of the CallBuffer business and make +// InstructionSelector::VisitCall platform independent instead. +CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d) + : output_count(0), + descriptor(d), + output_nodes(zone->NewArray<Node*>(d->ReturnCount())), + outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())), + fixed_and_control_args( + zone->NewArray<InstructionOperand*>(input_count() + control_count())), + fixed_count(0), + pushed_nodes(zone->NewArray<Node*>(input_count())), + pushed_count(0) { + if (d->ReturnCount() > 1) { + memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT + } + memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT +} + + +// TODO(bmeurer): Get rid of the CallBuffer business and make +// InstructionSelector::VisitCall platform independent instead. +void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, + bool call_code_immediate, + bool call_address_immediate, + BasicBlock* cont_node, + BasicBlock* deopt_node) { + OperandGenerator g(this); + DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount()); + DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()), + buffer->input_count()); + + if (buffer->descriptor->ReturnCount() > 0) { + // Collect the projections that represent multiple outputs from this call. + if (buffer->descriptor->ReturnCount() == 1) { + buffer->output_nodes[0] = call; + } else { + call->CollectProjections(buffer->descriptor->ReturnCount(), + buffer->output_nodes); + } + + // Filter out the outputs that aren't live because no projection uses them. + for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) { + if (buffer->output_nodes[i] != NULL) { + Node* output = buffer->output_nodes[i]; + LinkageLocation location = buffer->descriptor->GetReturnLocation(i); + MarkAsRepresentation(location.representation(), output); + buffer->outputs[buffer->output_count++] = + g.DefineAsLocation(output, location); + } + } + } + + buffer->fixed_count = 1; // First argument is always the callee. + Node* callee = call->InputAt(0); + switch (buffer->descriptor->kind()) { + case CallDescriptor::kCallCodeObject: + buffer->fixed_and_control_args[0] = + (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant) + ? g.UseImmediate(callee) + : g.UseRegister(callee); + break; + case CallDescriptor::kCallAddress: + buffer->fixed_and_control_args[0] = + (call_address_immediate && + (callee->opcode() == IrOpcode::kInt32Constant || + callee->opcode() == IrOpcode::kInt64Constant)) + ? g.UseImmediate(callee) + : g.UseRegister(callee); + break; + case CallDescriptor::kCallJSFunction: + buffer->fixed_and_control_args[0] = + g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)); + break; + } + + int input_count = buffer->input_count(); + + // Split the arguments into pushed_nodes and fixed_args. Pushed arguments + // require an explicit push instruction before the call and do not appear + // as arguments to the call. Everything else ends up as an InstructionOperand + // argument to the call. + InputIter iter(call->inputs().begin()); + for (int index = 0; index < input_count; ++iter, ++index) { + DCHECK(iter != call->inputs().end()); + DCHECK(index == iter.index()); + if (index == 0) continue; // The first argument (callee) is already done. + InstructionOperand* op = + g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index)); + if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) { + int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1; + DCHECK(buffer->pushed_nodes[stack_index] == NULL); + buffer->pushed_nodes[stack_index] = *iter; + buffer->pushed_count++; + } else { + buffer->fixed_and_control_args[buffer->fixed_count] = op; + buffer->fixed_count++; + } + } + + // If the call can deoptimize, we add the continuation and deoptimization + // block labels. + if (buffer->descriptor->CanLazilyDeoptimize()) { + DCHECK(cont_node != NULL); + DCHECK(deopt_node != NULL); + buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node); + buffer->fixed_and_control_args[buffer->fixed_count + 1] = + g.Label(deopt_node); + } else { + DCHECK(cont_node == NULL); + DCHECK(deopt_node == NULL); + } + + DCHECK(input_count == (buffer->fixed_count + buffer->pushed_count)); +} + + +void InstructionSelector::VisitBlock(BasicBlock* block) { + DCHECK_EQ(NULL, current_block_); + current_block_ = block; + int current_block_end = static_cast<int>(instructions_.size()); + + // Generate code for the block control "top down", but schedule the code + // "bottom up". + VisitControl(block); + std::reverse(instructions_.begin() + current_block_end, instructions_.end()); + + // Visit code in reverse control flow order, because architecture-specific + // matching may cover more than one node at a time. + for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend(); + ++i) { + Node* node = *i; + // Skip nodes that are unused or already defined. + if (!IsUsed(node) || IsDefined(node)) continue; + // Generate code for this node "top down", but schedule the code "bottom + // up". + size_t current_node_end = instructions_.size(); + VisitNode(node); + std::reverse(instructions_.begin() + current_node_end, instructions_.end()); + } + + // We're done with the block. + // TODO(bmeurer): We should not mutate the schedule. + block->code_end_ = current_block_end; + block->code_start_ = static_cast<int>(instructions_.size()); + + current_block_ = NULL; +} + + +static inline void CheckNoPhis(const BasicBlock* block) { +#ifdef DEBUG + // Branch targets should not have phis. + for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) { + const Node* node = *i; + CHECK_NE(IrOpcode::kPhi, node->opcode()); + } +#endif +} + + +void InstructionSelector::VisitControl(BasicBlock* block) { + Node* input = block->control_input_; + switch (block->control_) { + case BasicBlockData::kGoto: + return VisitGoto(block->SuccessorAt(0)); + case BasicBlockData::kBranch: { + DCHECK_EQ(IrOpcode::kBranch, input->opcode()); + BasicBlock* tbranch = block->SuccessorAt(0); + BasicBlock* fbranch = block->SuccessorAt(1); + // SSA deconstruction requires targets of branches not to have phis. + // Edge split form guarantees this property, but is more strict. + CheckNoPhis(tbranch); + CheckNoPhis(fbranch); + if (tbranch == fbranch) return VisitGoto(tbranch); + return VisitBranch(input, tbranch, fbranch); + } + case BasicBlockData::kReturn: { + // If the result itself is a return, return its input. + Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn) + ? input->InputAt(0) + : input; + return VisitReturn(value); + } + case BasicBlockData::kThrow: + return VisitThrow(input); + case BasicBlockData::kDeoptimize: + return VisitDeoptimize(input); + case BasicBlockData::kCall: { + BasicBlock* deoptimization = block->SuccessorAt(0); + BasicBlock* continuation = block->SuccessorAt(1); + VisitCall(input, continuation, deoptimization); + break; + } + case BasicBlockData::kNone: { + // TODO(titzer): exit block doesn't have control. + DCHECK(input == NULL); + break; + } + default: + UNREACHABLE(); + break; + } +} + + +void InstructionSelector::VisitNode(Node* node) { + DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. + SourcePosition source_position = source_positions_->GetSourcePosition(node); + if (!source_position.IsUnknown()) { + DCHECK(!source_position.IsInvalid()); + if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) { + Emit(SourcePositionInstruction::New(instruction_zone(), source_position)); + } + } + switch (node->opcode()) { + case IrOpcode::kStart: + case IrOpcode::kLoop: + case IrOpcode::kEnd: + case IrOpcode::kBranch: + case IrOpcode::kIfTrue: + case IrOpcode::kIfFalse: + case IrOpcode::kEffectPhi: + case IrOpcode::kMerge: + case IrOpcode::kLazyDeoptimization: + case IrOpcode::kContinuation: + // No code needed for these graph artifacts. + return; + case IrOpcode::kParameter: { + int index = OpParameter<int>(node); + MachineType rep = linkage() + ->GetIncomingDescriptor() + ->GetInputLocation(index) + .representation(); + MarkAsRepresentation(rep, node); + return VisitParameter(node); + } + case IrOpcode::kPhi: + return VisitPhi(node); + case IrOpcode::kProjection: + return VisitProjection(node); + case IrOpcode::kInt32Constant: + case IrOpcode::kInt64Constant: + case IrOpcode::kExternalConstant: + return VisitConstant(node); + case IrOpcode::kFloat64Constant: + return MarkAsDouble(node), VisitConstant(node); + case IrOpcode::kHeapConstant: + case IrOpcode::kNumberConstant: + // TODO(turbofan): only mark non-smis as references. + return MarkAsReference(node), VisitConstant(node); + case IrOpcode::kCall: + return VisitCall(node, NULL, NULL); + case IrOpcode::kFrameState: + case IrOpcode::kStateValues: + return; + case IrOpcode::kLoad: { + MachineType load_rep = OpParameter<MachineType>(node); + MarkAsRepresentation(load_rep, node); + return VisitLoad(node); + } + case IrOpcode::kStore: + return VisitStore(node); + case IrOpcode::kWord32And: + return VisitWord32And(node); + case IrOpcode::kWord32Or: + return VisitWord32Or(node); + case IrOpcode::kWord32Xor: + return VisitWord32Xor(node); + case IrOpcode::kWord32Shl: + return VisitWord32Shl(node); + case IrOpcode::kWord32Shr: + return VisitWord32Shr(node); + case IrOpcode::kWord32Sar: + return VisitWord32Sar(node); + case IrOpcode::kWord32Equal: + return VisitWord32Equal(node); + case IrOpcode::kWord64And: + return VisitWord64And(node); + case IrOpcode::kWord64Or: + return VisitWord64Or(node); + case IrOpcode::kWord64Xor: + return VisitWord64Xor(node); + case IrOpcode::kWord64Shl: + return VisitWord64Shl(node); + case IrOpcode::kWord64Shr: + return VisitWord64Shr(node); + case IrOpcode::kWord64Sar: + return VisitWord64Sar(node); + case IrOpcode::kWord64Equal: + return VisitWord64Equal(node); + case IrOpcode::kInt32Add: + return VisitInt32Add(node); + case IrOpcode::kInt32AddWithOverflow: + return VisitInt32AddWithOverflow(node); + case IrOpcode::kInt32Sub: + return VisitInt32Sub(node); + case IrOpcode::kInt32SubWithOverflow: + return VisitInt32SubWithOverflow(node); + case IrOpcode::kInt32Mul: + return VisitInt32Mul(node); + case IrOpcode::kInt32Div: + return VisitInt32Div(node); + case IrOpcode::kInt32UDiv: + return VisitInt32UDiv(node); + case IrOpcode::kInt32Mod: + return VisitInt32Mod(node); + case IrOpcode::kInt32UMod: + return VisitInt32UMod(node); + case IrOpcode::kInt32LessThan: + return VisitInt32LessThan(node); + case IrOpcode::kInt32LessThanOrEqual: + return VisitInt32LessThanOrEqual(node); + case IrOpcode::kUint32LessThan: + return VisitUint32LessThan(node); + case IrOpcode::kUint32LessThanOrEqual: + return VisitUint32LessThanOrEqual(node); + case IrOpcode::kInt64Add: + return VisitInt64Add(node); + case IrOpcode::kInt64Sub: + return VisitInt64Sub(node); + case IrOpcode::kInt64Mul: + return VisitInt64Mul(node); + case IrOpcode::kInt64Div: + return VisitInt64Div(node); + case IrOpcode::kInt64UDiv: + return VisitInt64UDiv(node); + case IrOpcode::kInt64Mod: + return VisitInt64Mod(node); + case IrOpcode::kInt64UMod: + return VisitInt64UMod(node); + case IrOpcode::kInt64LessThan: + return VisitInt64LessThan(node); + case IrOpcode::kInt64LessThanOrEqual: + return VisitInt64LessThanOrEqual(node); + case IrOpcode::kConvertInt32ToInt64: + return VisitConvertInt32ToInt64(node); + case IrOpcode::kConvertInt64ToInt32: + return VisitConvertInt64ToInt32(node); + case IrOpcode::kChangeInt32ToFloat64: + return MarkAsDouble(node), VisitChangeInt32ToFloat64(node); + case IrOpcode::kChangeUint32ToFloat64: + return MarkAsDouble(node), VisitChangeUint32ToFloat64(node); + case IrOpcode::kChangeFloat64ToInt32: + return VisitChangeFloat64ToInt32(node); + case IrOpcode::kChangeFloat64ToUint32: + return VisitChangeFloat64ToUint32(node); + case IrOpcode::kFloat64Add: + return MarkAsDouble(node), VisitFloat64Add(node); + case IrOpcode::kFloat64Sub: + return MarkAsDouble(node), VisitFloat64Sub(node); + case IrOpcode::kFloat64Mul: + return MarkAsDouble(node), VisitFloat64Mul(node); + case IrOpcode::kFloat64Div: + return MarkAsDouble(node), VisitFloat64Div(node); + case IrOpcode::kFloat64Mod: + return MarkAsDouble(node), VisitFloat64Mod(node); + case IrOpcode::kFloat64Equal: + return VisitFloat64Equal(node); + case IrOpcode::kFloat64LessThan: + return VisitFloat64LessThan(node); + case IrOpcode::kFloat64LessThanOrEqual: + return VisitFloat64LessThanOrEqual(node); + default: + V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d", + node->opcode(), node->op()->mnemonic(), node->id()); + } +} + + +#if V8_TURBOFAN_BACKEND + +void InstructionSelector::VisitWord32Equal(Node* node) { + FlagsContinuation cont(kEqual, node); + Int32BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWord32Test(m.left().node(), &cont); + } + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitInt32LessThan(Node* node) { + FlagsContinuation cont(kSignedLessThan, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { + FlagsContinuation cont(kSignedLessThanOrEqual, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitUint32LessThan(Node* node) { + FlagsContinuation cont(kUnsignedLessThan, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { + FlagsContinuation cont(kUnsignedLessThanOrEqual, node); + VisitWord32Compare(node, &cont); +} + + +void InstructionSelector::VisitWord64Equal(Node* node) { + FlagsContinuation cont(kEqual, node); + Int64BinopMatcher m(node); + if (m.right().Is(0)) { + return VisitWord64Test(m.left().node(), &cont); + } + VisitWord64Compare(node, &cont); +} + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { + if (Node* ovf = node->FindProjection(1)) { + FlagsContinuation cont(kOverflow, ovf); + return VisitInt32AddWithOverflow(node, &cont); + } + FlagsContinuation cont; + VisitInt32AddWithOverflow(node, &cont); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { + if (Node* ovf = node->FindProjection(1)) { + FlagsContinuation cont(kOverflow, ovf); + return VisitInt32SubWithOverflow(node, &cont); + } + FlagsContinuation cont; + VisitInt32SubWithOverflow(node, &cont); +} + + +void InstructionSelector::VisitInt64LessThan(Node* node) { + FlagsContinuation cont(kSignedLessThan, node); + VisitWord64Compare(node, &cont); +} + + +void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { + FlagsContinuation cont(kSignedLessThanOrEqual, node); + VisitWord64Compare(node, &cont); +} + + +void InstructionSelector::VisitFloat64Equal(Node* node) { + FlagsContinuation cont(kUnorderedEqual, node); + VisitFloat64Compare(node, &cont); +} + + +void InstructionSelector::VisitFloat64LessThan(Node* node) { + FlagsContinuation cont(kUnorderedLessThan, node); + VisitFloat64Compare(node, &cont); +} + + +void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { + FlagsContinuation cont(kUnorderedLessThanOrEqual, node); + VisitFloat64Compare(node, &cont); +} + +#endif // V8_TURBOFAN_BACKEND + +// 32 bit targets do not implement the following instructions. +#if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND + +void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); } + + +void InstructionSelector::VisitConvertInt64ToInt32(Node* node) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitConvertInt32ToInt64(Node* node) { + UNIMPLEMENTED(); +} + +#endif // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND + + +// 32-bit targets and unsupported architectures need dummy implementations of +// selected 64-bit ops. +#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND + +void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitWord64Compare(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + +#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND + + +void InstructionSelector::VisitParameter(Node* node) { + OperandGenerator g(this); + Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation( + OpParameter<int>(node)))); +} + + +void InstructionSelector::VisitPhi(Node* node) { + // TODO(bmeurer): Emit a PhiInstruction here. + for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { + MarkAsUsed(*i); + } +} + + +void InstructionSelector::VisitProjection(Node* node) { + OperandGenerator g(this); + Node* value = node->InputAt(0); + switch (value->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + case IrOpcode::kInt32SubWithOverflow: + if (OpParameter<int32_t>(node) == 0) { + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + } else { + DCHECK_EQ(1, OpParameter<int32_t>(node)); + MarkAsUsed(value); + } + break; + default: + break; + } +} + + +void InstructionSelector::VisitConstant(Node* node) { + // We must emit a NOP here because every live range needs a defining + // instruction in the register allocator. + OperandGenerator g(this); + Emit(kArchNop, g.DefineAsConstant(node)); +} + + +void InstructionSelector::VisitGoto(BasicBlock* target) { + if (IsNextInAssemblyOrder(target)) { + // fall through to the next block. + Emit(kArchNop, NULL)->MarkAsControl(); + } else { + // jump to the next block. + OperandGenerator g(this); + Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl(); + } +} + + +void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch, + BasicBlock* fbranch) { + OperandGenerator g(this); + Node* user = branch; + Node* value = branch->InputAt(0); + + FlagsContinuation cont(kNotEqual, tbranch, fbranch); + + // If we can fall through to the true block, invert the branch. + if (IsNextInAssemblyOrder(tbranch)) { + cont.Negate(); + cont.SwapBlocks(); + } + + // Try to combine with comparisons against 0 by simply inverting the branch. + while (CanCover(user, value)) { + if (value->opcode() == IrOpcode::kWord32Equal) { + Int32BinopMatcher m(value); + if (m.right().Is(0)) { + user = value; + value = m.left().node(); + cont.Negate(); + } else { + break; + } + } else if (value->opcode() == IrOpcode::kWord64Equal) { + Int64BinopMatcher m(value); + if (m.right().Is(0)) { + user = value; + value = m.left().node(); + cont.Negate(); + } else { + break; + } + } else { + break; + } + } + + // Try to combine the branch with a comparison. + if (CanCover(user, value)) { + switch (value->opcode()) { + case IrOpcode::kWord32Equal: + cont.OverwriteAndNegateIfEqual(kEqual); + return VisitWord32Compare(value, &cont); + case IrOpcode::kInt32LessThan: + cont.OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWord32Compare(value, &cont); + case IrOpcode::kInt32LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWord32Compare(value, &cont); + case IrOpcode::kUint32LessThan: + cont.OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitWord32Compare(value, &cont); + case IrOpcode::kUint32LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitWord32Compare(value, &cont); + case IrOpcode::kWord64Equal: + cont.OverwriteAndNegateIfEqual(kEqual); + return VisitWord64Compare(value, &cont); + case IrOpcode::kInt64LessThan: + cont.OverwriteAndNegateIfEqual(kSignedLessThan); + return VisitWord64Compare(value, &cont); + case IrOpcode::kInt64LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); + return VisitWord64Compare(value, &cont); + case IrOpcode::kFloat64Equal: + cont.OverwriteAndNegateIfEqual(kUnorderedEqual); + return VisitFloat64Compare(value, &cont); + case IrOpcode::kFloat64LessThan: + cont.OverwriteAndNegateIfEqual(kUnorderedLessThan); + return VisitFloat64Compare(value, &cont); + case IrOpcode::kFloat64LessThanOrEqual: + cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual); + return VisitFloat64Compare(value, &cont); + case IrOpcode::kProjection: + // Check if this is the overflow output projection of an + // <Operation>WithOverflow node. + if (OpParameter<int32_t>(value) == 1) { + // We cannot combine the <Operation>WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // <Operation> is either NULL, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + Node* node = value->InputAt(0); + Node* result = node->FindProjection(0); + if (result == NULL || IsDefined(result)) { + switch (node->opcode()) { + case IrOpcode::kInt32AddWithOverflow: + cont.OverwriteAndNegateIfEqual(kOverflow); + return VisitInt32AddWithOverflow(node, &cont); + case IrOpcode::kInt32SubWithOverflow: + cont.OverwriteAndNegateIfEqual(kOverflow); + return VisitInt32SubWithOverflow(node, &cont); + default: + break; + } + } + } + break; + default: + break; + } + } + + // Branch could not be combined with a compare, emit compare against 0. + VisitWord32Test(value, &cont); +} + + +void InstructionSelector::VisitReturn(Node* value) { + OperandGenerator g(this); + if (value != NULL) { + Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation())); + } else { + Emit(kArchRet, NULL); + } +} + + +void InstructionSelector::VisitThrow(Node* value) { + UNIMPLEMENTED(); // TODO(titzer) +} + + +static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) { + switch (input->opcode()) { + case IrOpcode::kInt32Constant: + case IrOpcode::kNumberConstant: + case IrOpcode::kFloat64Constant: + case IrOpcode::kHeapConstant: + return g->UseImmediate(input); + default: + return g->Use(input); + } +} + + +void InstructionSelector::VisitDeoptimize(Node* deopt) { + DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize); + Node* state = deopt->InputAt(0); + DCHECK(state->op()->opcode() == IrOpcode::kFrameState); + BailoutId ast_id = OpParameter<BailoutId>(state); + + // Add the inputs. + Node* parameters = state->InputAt(0); + int parameters_count = OpParameter<int>(parameters); + + Node* locals = state->InputAt(1); + int locals_count = OpParameter<int>(locals); + + Node* stack = state->InputAt(2); + int stack_count = OpParameter<int>(stack); + + OperandGenerator g(this); + std::vector<InstructionOperand*> inputs; + inputs.reserve(parameters_count + locals_count + stack_count); + for (int i = 0; i < parameters_count; i++) { + inputs.push_back(UseOrImmediate(&g, parameters->InputAt(i))); + } + for (int i = 0; i < locals_count; i++) { + inputs.push_back(UseOrImmediate(&g, locals->InputAt(i))); + } + for (int i = 0; i < stack_count; i++) { + inputs.push_back(UseOrImmediate(&g, stack->InputAt(i))); + } + + FrameStateDescriptor* descriptor = new (instruction_zone()) + FrameStateDescriptor(ast_id, parameters_count, locals_count, stack_count); + + DCHECK_EQ(descriptor->size(), inputs.size()); + + int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor); + Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), 0, NULL, + inputs.size(), &inputs.front(), 0, NULL); +} + + +#if !V8_TURBOFAN_BACKEND + +#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \ + void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); } +MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR) +#undef DECLARE_UNIMPLEMENTED_SELECTOR + + +void InstructionSelector::VisitInt32AddWithOverflow(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitInt32SubWithOverflow(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitWord32Compare(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitFloat64Compare(Node* node, + FlagsContinuation* cont) { + UNIMPLEMENTED(); +} + + +void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, + BasicBlock* deoptimization) {} + +#endif // !V8_TURBOFAN_BACKEND + +} // namespace compiler +} // namespace internal +} // namespace v8 |