summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc848
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h86
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc943
-rw-r--r--deps/v8/src/compiler/arm/linkage-arm.cc67
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc854
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h103
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc667
-rw-r--r--deps/v8/src/compiler/arm64/linkage-arm64.cc68
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc2055
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h428
-rw-r--r--deps/v8/src/compiler/change-lowering.cc260
-rw-r--r--deps/v8/src/compiler/change-lowering.h79
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h132
-rw-r--r--deps/v8/src/compiler/code-generator.cc381
-rw-r--r--deps/v8/src/compiler/code-generator.h146
-rw-r--r--deps/v8/src/compiler/common-node-cache.h51
-rw-r--r--deps/v8/src/compiler/common-operator.h284
-rw-r--r--deps/v8/src/compiler/control-builders.cc144
-rw-r--r--deps/v8/src/compiler/control-builders.h144
-rw-r--r--deps/v8/src/compiler/frame.h104
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc136
-rw-r--r--deps/v8/src/compiler/gap-resolver.h46
-rw-r--r--deps/v8/src/compiler/generic-algorithm-inl.h48
-rw-r--r--deps/v8/src/compiler/generic-algorithm.h136
-rw-r--r--deps/v8/src/compiler/generic-graph.h53
-rw-r--r--deps/v8/src/compiler/generic-node-inl.h245
-rw-r--r--deps/v8/src/compiler/generic-node.h271
-rw-r--r--deps/v8/src/compiler/graph-builder.cc241
-rw-r--r--deps/v8/src/compiler/graph-builder.h226
-rw-r--r--deps/v8/src/compiler/graph-inl.h37
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc94
-rw-r--r--deps/v8/src/compiler/graph-reducer.h77
-rw-r--r--deps/v8/src/compiler/graph-replay.cc81
-rw-r--r--deps/v8/src/compiler/graph-replay.h44
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc265
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h29
-rw-r--r--deps/v8/src/compiler/graph.cc54
-rw-r--r--deps/v8/src/compiler/graph.h97
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc956
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h88
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc560
-rw-r--r--deps/v8/src/compiler/ia32/linkage-ia32.cc63
-rw-r--r--deps/v8/src/compiler/instruction-codes.h117
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h371
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc1053
-rw-r--r--deps/v8/src/compiler/instruction-selector.h212
-rw-r--r--deps/v8/src/compiler/instruction.cc483
-rw-r--r--deps/v8/src/compiler/instruction.h871
-rw-r--r--deps/v8/src/compiler/ir-operations.txt0
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc157
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h37
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc550
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h83
-rw-r--r--deps/v8/src/compiler/js-graph.cc174
-rw-r--r--deps/v8/src/compiler/js-graph.h107
-rw-r--r--deps/v8/src/compiler/js-operator.h214
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc604
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h67
-rw-r--r--deps/v8/src/compiler/linkage-impl.h206
-rw-r--r--deps/v8/src/compiler/linkage.cc149
-rw-r--r--deps/v8/src/compiler/linkage.h193
-rw-r--r--deps/v8/src/compiler/lowering-builder.cc45
-rw-r--r--deps/v8/src/compiler/lowering-builder.h38
-rw-r--r--deps/v8/src/compiler/machine-node-factory.h381
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc343
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h52
-rw-r--r--deps/v8/src/compiler/machine-operator.h168
-rw-r--r--deps/v8/src/compiler/machine-type.h36
-rw-r--r--deps/v8/src/compiler/node-aux-data-inl.h43
-rw-r--r--deps/v8/src/compiler/node-aux-data.h38
-rw-r--r--deps/v8/src/compiler/node-cache.cc120
-rw-r--r--deps/v8/src/compiler/node-cache.h53
-rw-r--r--deps/v8/src/compiler/node-matchers.h133
-rw-r--r--deps/v8/src/compiler/node-properties-inl.h165
-rw-r--r--deps/v8/src/compiler/node-properties.h57
-rw-r--r--deps/v8/src/compiler/node.cc55
-rw-r--r--deps/v8/src/compiler/node.h95
-rw-r--r--deps/v8/src/compiler/opcodes.h297
-rw-r--r--deps/v8/src/compiler/operator-properties-inl.h191
-rw-r--r--deps/v8/src/compiler/operator-properties.h49
-rw-r--r--deps/v8/src/compiler/operator.h280
-rw-r--r--deps/v8/src/compiler/phi-reducer.h42
-rw-r--r--deps/v8/src/compiler/pipeline.cc341
-rw-r--r--deps/v8/src/compiler/pipeline.h68
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc158
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h129
-rw-r--r--deps/v8/src/compiler/register-allocator.cc2232
-rw-r--r--deps/v8/src/compiler/register-allocator.h548
-rw-r--r--deps/v8/src/compiler/representation-change.h411
-rw-r--r--deps/v8/src/compiler/schedule.cc92
-rw-r--r--deps/v8/src/compiler/schedule.h335
-rw-r--r--deps/v8/src/compiler/scheduler.cc1048
-rw-r--r--deps/v8/src/compiler/scheduler.h84
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc1014
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h71
-rw-r--r--deps/v8/src/compiler/simplified-node-factory.h128
-rw-r--r--deps/v8/src/compiler/simplified-operator.h189
-rw-r--r--deps/v8/src/compiler/source-position.cc55
-rw-r--r--deps/v8/src/compiler/source-position.h99
-rw-r--r--deps/v8/src/compiler/structured-machine-assembler.cc664
-rw-r--r--deps/v8/src/compiler/structured-machine-assembler.h311
-rw-r--r--deps/v8/src/compiler/typer.cc842
-rw-r--r--deps/v8/src/compiler/typer.h57
-rw-r--r--deps/v8/src/compiler/verifier.cc245
-rw-r--r--deps/v8/src/compiler/verifier.h28
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc1001
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h108
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc722
-rw-r--r--deps/v8/src/compiler/x64/linkage-x64.cc83
109 files changed, 31083 insertions, 0 deletions
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
new file mode 100644
index 000000000..90eb7cd4d
--- /dev/null
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -0,0 +1,848 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm/macro-assembler-arm.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r9
+
+
+// Adds Arm-specific methods to convert InstructionOperands.
+class ArmOperandConverter : public InstructionOperandConverter {
+ public:
+ ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ SBit OutputSBit() const {
+ switch (instr_->flags_mode()) {
+ case kFlags_branch:
+ case kFlags_set:
+ return SetCC;
+ case kFlags_none:
+ return LeaveCC;
+ }
+ UNREACHABLE();
+ return LeaveCC;
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ Operand InputOperand2(int first_index) {
+ const int index = first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ case kMode_Offset_RI:
+ case kMode_Offset_RR:
+ break;
+ case kMode_Operand2_I:
+ return InputImmediate(index + 0);
+ case kMode_Operand2_R:
+ return Operand(InputRegister(index + 0));
+ case kMode_Operand2_R_ASR_I:
+ return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
+ case kMode_Operand2_R_ASR_R:
+ return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
+ case kMode_Operand2_R_LSL_I:
+ return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
+ case kMode_Operand2_R_LSL_R:
+ return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
+ case kMode_Operand2_R_LSR_I:
+ return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
+ case kMode_Operand2_R_LSR_R:
+ return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
+ case kMode_Operand2_R_ROR_I:
+ return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
+ case kMode_Operand2_R_ROR_R:
+ return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand InputOffset(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ case kMode_Operand2_I:
+ case kMode_Operand2_R:
+ case kMode_Operand2_R_ASR_I:
+ case kMode_Operand2_R_ASR_R:
+ case kMode_Operand2_R_LSL_I:
+ case kMode_Operand2_R_LSL_R:
+ case kMode_Operand2_R_LSR_I:
+ case kMode_Operand2_R_LSR_R:
+ case kMode_Operand2_R_ROR_I:
+ case kMode_Operand2_R_ROR_R:
+ break;
+ case kMode_Offset_RI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_Offset_RR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand InputOffset() {
+ int index = 0;
+ return InputOffset(&index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchJmp:
+ __ b(code_->GetLabel(i.InputBlock(0)));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchRet:
+ AssembleReturn();
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmAdd:
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmAnd:
+ __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmBic:
+ __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmMul:
+ __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputSBit());
+ break;
+ case kArmMla:
+ __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2), i.OutputSBit());
+ break;
+ case kArmMls: {
+ CpuFeatureScope scope(masm(), MLS);
+ __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmSdiv: {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmUdiv: {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmMov:
+ __ Move(i.OutputRegister(), i.InputOperand2(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmMvn:
+ __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+ break;
+ case kArmOrr:
+ __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmEor:
+ __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmSub:
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmRsb:
+ __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmBfc: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmUbfx: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ ldr(reg, MemOperand(reg, entry));
+ __ Call(reg);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the call.
+ __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCallAddress: {
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmPush:
+ __ Push(i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmDrop: {
+ int words = MiscField::decode(instr->opcode());
+ __ Drop(words);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCmp:
+ __ cmp(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmCmn:
+ __ cmn(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmTst:
+ __ tst(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmTeq:
+ __ teq(i.InputRegister(0), i.InputOperand2(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVcmpF64:
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVaddF64:
+ __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVsubF64:
+ __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmulF64:
+ __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlaF64:
+ __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlsF64:
+ __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVdivF64:
+ __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmodF64: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVnegF64:
+ __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArmVcvtF64S32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtF64U32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtS32F64: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+ __ vmov(i.OutputRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtU32F64: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+ __ vmov(i.OutputRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLoadWord8:
+ __ ldrb(i.OutputRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmStoreWord8: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ strb(i.InputRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLoadWord16:
+ __ ldrh(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStoreWord16: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ strh(i.InputRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLoadWord32:
+ __ ldr(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStoreWord32: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ str(i.InputRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmFloat64Load:
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmFloat64Store: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ vstr(i.InputDoubleRegister(index), operand);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmStoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ add(index, object, index);
+ __ str(value, MemOperand(index));
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+ __ RecordWrite(object, index, value, lr_status, mode);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ ArmOperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ switch (condition) {
+ case kUnorderedEqual:
+ __ b(vs, flabel);
+ // Fall through.
+ case kEqual:
+ __ b(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ b(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ b(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ b(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ b(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ b(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ b(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ b(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ b(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ b(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ b(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ b(hi, tlabel);
+ break;
+ case kOverflow:
+ __ b(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ b(vc, tlabel);
+ break;
+ }
+ if (!fallthru) __ b(flabel); // no fallthru to flabel.
+ __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ ArmOperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnorderedLessThan:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnorderedGreaterThan:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
+ }
+ __ bind(&check);
+ __ mov(reg, Operand(0));
+ __ mov(reg, Operand(1), LeaveCC, cc);
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ stm(db_w, sp, saves);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+ __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ bind(&ok);
+ }
+
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ add(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ ldm(ia_w, sp, saves);
+ }
+ }
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ Ret();
+ } else {
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ Drop(pop_count);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ ArmOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ str(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ldr(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ ldr(temp, src);
+ __ str(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ Constant src = g.ToConstant(source);
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ UNREACHABLE();
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ Move(dst, src.ToHeapObject());
+ break;
+ }
+ if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
+ } else if (destination->IsDoubleRegister()) {
+ DwVfpRegister result = g.ToDoubleRegister(destination);
+ __ vmov(result, g.ToDouble(source));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vmov(temp, g.ToDouble(source));
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ vstr(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ vldr(g.ToDoubleRegister(destination), src);
+ } else {
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ ArmOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(temp, src);
+ __ ldr(src, dst);
+ __ str(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ SwVfpRegister temp_1 = kScratchDoubleReg.low();
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ ldr(temp_0, src);
+ __ vldr(temp_1, dst);
+ __ str(temp_0, dst);
+ __ vstr(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister temp = kScratchDoubleReg;
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(src, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ Register temp_0 = kScratchReg;
+ DwVfpRegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // On 32-bit ARM we do not insert nops for inlined Smi code.
+ UNREACHABLE();
+}
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc) {
+ return false;
+}
+
+#endif // DEBUG
+
+#undef __
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
new file mode 100644
index 000000000..1d5b5c7f3
--- /dev/null
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmCallCodeObject) \
+ V(ArmCallJSFunction) \
+ V(ArmCallAddress) \
+ V(ArmPush) \
+ V(ArmDrop) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVnegF64) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmFloat64Load) \
+ V(ArmFloat64Store) \
+ V(ArmLoadWord8) \
+ V(ArmStoreWord8) \
+ V(ArmLoadWord16) \
+ V(ArmStoreWord16) \
+ V(ArmLoadWord32) \
+ V(ArmStoreWord32) \
+ V(ArmStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(Offset_RI) /* [%r0 + K] */ \
+ V(Offset_RR) /* [%r0 + %r1] */ \
+ V(Operand2_I) /* K */ \
+ V(Operand2_R) /* %r0 */ \
+ V(Operand2_R_ASR_I) /* %r0 ASR K */ \
+ V(Operand2_R_LSL_I) /* %r0 LSL K */ \
+ V(Operand2_R_LSR_I) /* %r0 LSR K */ \
+ V(Operand2_R_ROR_I) /* %r0 ROR K */ \
+ V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
+ V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
+ V(Operand2_R_LSR_R) /* %r0 LSR %r1 */ \
+ V(Operand2_R_ROR_R) /* %r0 ROR %r1 */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
new file mode 100644
index 000000000..029d6e3b9
--- /dev/null
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -0,0 +1,943 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler-intrinsics.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds Arm-specific methods for generating InstructionOperands.
+class ArmOperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit ArmOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode opcode) {
+ int32_t value;
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ value = ValueOf<int32_t>(node->op());
+ break;
+ default:
+ return false;
+ }
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kArmAnd:
+ case kArmMov:
+ case kArmMvn:
+ case kArmBic:
+ return ImmediateFitsAddrMode1Instruction(value) ||
+ ImmediateFitsAddrMode1Instruction(~value);
+
+ case kArmAdd:
+ case kArmSub:
+ case kArmCmp:
+ case kArmCmn:
+ return ImmediateFitsAddrMode1Instruction(value) ||
+ ImmediateFitsAddrMode1Instruction(-value);
+
+ case kArmTst:
+ case kArmTeq:
+ case kArmOrr:
+ case kArmEor:
+ case kArmRsb:
+ return ImmediateFitsAddrMode1Instruction(value);
+
+ case kArmFloat64Load:
+ case kArmFloat64Store:
+ return value >= -1020 && value <= 1020 && (value % 4) == 0;
+
+ case kArmLoadWord8:
+ case kArmStoreWord8:
+ case kArmLoadWord32:
+ case kArmStoreWord32:
+ case kArmStoreWriteBarrier:
+ return value >= -4095 && value <= 4095;
+
+ case kArmLoadWord16:
+ case kArmStoreWord16:
+ return value >= -255 && value <= 255;
+
+ case kArchJmp:
+ case kArchNop:
+ case kArchRet:
+ case kArchDeoptimize:
+ case kArmMul:
+ case kArmMla:
+ case kArmMls:
+ case kArmSdiv:
+ case kArmUdiv:
+ case kArmBfc:
+ case kArmUbfx:
+ case kArmCallCodeObject:
+ case kArmCallJSFunction:
+ case kArmCallAddress:
+ case kArmPush:
+ case kArmDrop:
+ case kArmVcmpF64:
+ case kArmVaddF64:
+ case kArmVsubF64:
+ case kArmVmulF64:
+ case kArmVmlaF64:
+ case kArmVmlsF64:
+ case kArmVdivF64:
+ case kArmVmodF64:
+ case kArmVnegF64:
+ case kArmVcvtF64S32:
+ case kArmVcvtF64U32:
+ case kArmVcvtS32F64:
+ case kArmVcvtU32F64:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ return Assembler::ImmediateFitsAddrMode1Instruction(imm);
+ }
+};
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsDoubleRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+static bool TryMatchROR(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Or) return false;
+ Int32BinopMatcher m(node);
+ Node* shl = m.left().node();
+ Node* shr = m.right().node();
+ if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
+ std::swap(shl, shr);
+ } else if (!m.left().IsWord32Shl() || !m.right().IsWord32Shr()) {
+ return false;
+ }
+ Int32BinopMatcher mshr(shr);
+ Int32BinopMatcher mshl(shl);
+ Node* value = mshr.left().node();
+ if (value != mshl.left().node()) return false;
+ Node* shift = mshr.right().node();
+ Int32Matcher mshift(shift);
+ if (mshift.IsInRange(1, 31) && mshl.right().Is(32 - mshift.Value())) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
+ *value_return = g.UseRegister(value);
+ *shift_return = g.UseImmediate(shift);
+ return true;
+ }
+ if (mshl.right().IsInt32Sub()) {
+ Int32BinopMatcher mshlright(mshl.right().node());
+ if (!mshlright.left().Is(32)) return false;
+ if (mshlright.right().node() != shift) return false;
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
+ *value_return = g.UseRegister(value);
+ *shift_return = g.UseRegister(shift);
+ return true;
+ }
+ return false;
+}
+
+
+static inline bool TryMatchASR(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Sar) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(1, 32)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchLSL(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Shl) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(0, 31)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchLSR(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() != IrOpcode::kWord32Shr) return false;
+ Int32BinopMatcher m(node);
+ *value_return = g.UseRegister(m.left().node());
+ if (m.right().IsInRange(1, 32)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+ *shift_return = g.UseImmediate(m.right().node());
+ } else {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
+ *shift_return = g.UseRegister(m.right().node());
+ }
+ return true;
+}
+
+
+static inline bool TryMatchShift(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ return (
+ TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
+ TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
+ TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
+ TryMatchROR(selector, opcode_return, node, value_return, shift_return));
+}
+
+
+static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
+ InstructionCode* opcode_return,
+ Node* node,
+ size_t* input_count_return,
+ InstructionOperand** inputs) {
+ ArmOperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
+ *input_count_return = 2;
+ return true;
+ }
+ return false;
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[5];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
+ m.left().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(ARRAY_SIZE(inputs), input_count);
+ DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, reverse_opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = OpParameter<MachineType>(node);
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* result = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArmFloat64Load;
+ break;
+ case kMachineWord8:
+ opcode = kArmLoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArmLoadWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = kArmLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else if (g.CanBeImmediate(base, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
+ g.UseRegister(index), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ DCHECK(rep == kMachineTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+ Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
+ g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ InstructionOperand* val = rep == kMachineFloat64 ? g.UseDoubleRegister(value)
+ : g.UseRegister(value);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArmFloat64Store;
+ break;
+ case kMachineWord8:
+ opcode = kArmStoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArmStoreWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = kArmStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else if (g.CanBeImmediate(base, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+}
+
+
+static inline void EmitBic(InstructionSelector* selector, Node* node,
+ Node* left, Node* right) {
+ ArmOperandGenerator g(selector);
+ InstructionCode opcode = kArmBic;
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ value_operand, shift_operand);
+ return;
+ }
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(-1)) {
+ EmitBic(this, node, m.right().node(), mleft.left().node());
+ return;
+ }
+ }
+ if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().Is(-1)) {
+ EmitBic(this, node, m.left().node(), mright.left().node());
+ return;
+ }
+ }
+ if (IsSupported(ARMv7) && m.right().HasValue()) {
+ uint32_t value = m.right().Value();
+ uint32_t width = CompilerIntrinsics::CountSetBits(value);
+ uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+ if (width != 0 && msb + width == 32) {
+ DCHECK_EQ(0, CompilerIntrinsics::CountTrailingZeros(value));
+ if (m.left().IsWord32Shr()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ Emit(kArmUbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
+ return;
+ }
+ }
+ Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(width));
+ return;
+ }
+ // Try to interpret this AND as BFC.
+ width = 32 - width;
+ msb = CompilerIntrinsics::CountLeadingZeros(~value);
+ uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value);
+ if (msb + width + lsb == 32) {
+ Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(lsb), g.TempImmediate(width));
+ return;
+ }
+ }
+ VisitBinop(this, node, kArmAnd, kArmAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ ArmOperandGenerator g(this);
+ InstructionCode opcode = kArmMov;
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchROR(this, &opcode, node, &value_operand, &shift_operand)) {
+ Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
+ return;
+ }
+ VisitBinop(this, node, kArmOrr, kArmOrr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ InstructionCode opcode = kArmMvn;
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
+ &shift_operand)) {
+ Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmEor, kArmEor);
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ TryMatchShift try_match_shift) {
+ ArmOperandGenerator g(selector);
+ InstructionCode opcode = kArmMov;
+ InstructionOperand* value_operand = NULL;
+ InstructionOperand* shift_operand = NULL;
+ CHECK(
+ try_match_shift(selector, &opcode, node, &value_operand, &shift_operand));
+ selector->Emit(opcode, g.DefineAsRegister(node), value_operand,
+ shift_operand);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitShift(this, node, TryMatchLSL);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (IsSupported(ARMv7) && m.left().IsWord32And() &&
+ m.right().IsInRange(0, 31)) {
+ int32_t lsb = m.right().Value();
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t width = CompilerIntrinsics::CountSetBits(value);
+ uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+ if (msb + width + lsb == 32) {
+ DCHECK_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value));
+ Emit(kArmUbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(width));
+ return;
+ }
+ }
+ }
+ VisitShift(this, node, TryMatchLSR);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitShift(this, node, TryMatchASR);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmAdd, kArmAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+ CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmSub, kArmRsb);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue() && m.right().Value() > 0) {
+ int32_t value = m.right().Value();
+ if (IsPowerOf2(value - 1)) {
+ Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value - 1)));
+ return;
+ }
+ if (value < kMaxInt && IsPowerOf2(value + 1)) {
+ Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value + 1)));
+ return;
+ }
+ }
+ Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+ ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+ InstructionOperand* result_operand,
+ InstructionOperand* left_operand,
+ InstructionOperand* right_operand) {
+ ArmOperandGenerator g(selector);
+ if (selector->IsSupported(SUDIV)) {
+ selector->Emit(div_opcode, result_operand, left_operand, right_operand);
+ return;
+ }
+ InstructionOperand* left_double_operand = g.TempDoubleRegister();
+ InstructionOperand* right_double_operand = g.TempDoubleRegister();
+ InstructionOperand* result_double_operand = g.TempDoubleRegister();
+ selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+ selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+ selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+ right_double_operand);
+ selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+ ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+ ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+ ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+ ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* div_operand = g.TempRegister();
+ InstructionOperand* result_operand = g.DefineAsRegister(node);
+ InstructionOperand* left_operand = g.UseRegister(m.left().node());
+ InstructionOperand* right_operand = g.UseRegister(m.right().node());
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+ left_operand, right_operand);
+ if (selector->IsSupported(MLS)) {
+ selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+ left_operand);
+ return;
+ }
+ InstructionOperand* mul_operand = g.TempRegister();
+ selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+ selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF64U32, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRRFloat64(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRRFloat64(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ ArmOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.right().Is(-1.0)) {
+ Emit(kArmVnegF64, g.DefineAsRegister(node),
+ g.UseDoubleRegister(m.left().node()));
+ } else {
+ VisitRRRFloat64(this, kArmVmulF64, node);
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, kArmVdivF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0),
+ g.UseFixedDouble(node->InputAt(0), d0),
+ g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ ArmOperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(call, &buffer, true, false, continuation,
+ deoptimization);
+
+ // TODO(dcarney): might be possible to use claim/poke instead
+ // Push any stack arguments.
+ for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+ Node* input = buffer.pushed_nodes[i];
+ Emit(kArmPush, NULL, g.UseRegister(input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kArmCallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArmCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ DCHECK(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (descriptor->kind() == CallDescriptor::kCallAddress &&
+ buffer.pushed_count > 0) {
+ DCHECK(deoptimization == NULL && continuation == NULL);
+ Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
+ }
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kArmAdd, kArmAdd, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kArmSub, kArmRsb, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[5];
+ size_t input_count = 0;
+ InstructionOperand* outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ if (!commutative) cont->Commute();
+ inputs[0] = g.UseRegister(m.right().node());
+ input_count++;
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ } else {
+ DCHECK(cont->IsSet());
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_GE(ARRAY_SIZE(inputs), input_count);
+ DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, node, kArmCmn, cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kArmCmp, cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kArmTst, cont, true);
+ case IrOpcode::kWord32Or:
+ return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
+ case IrOpcode::kWord32Xor:
+ return VisitWordCompare(this, node, kArmTeq, cont, true);
+ default:
+ break;
+ }
+
+ ArmOperandGenerator g(this);
+ InstructionCode opcode =
+ cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+ if (cont->IsBranch()) {
+ Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
+ g.UseRegister(node));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArmCmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (cont->IsBranch()) {
+ Emit(cont->Encode(kArmVcmpF64), NULL, g.UseDoubleRegister(m.left().node()),
+ g.UseDoubleRegister(m.right().node()), g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+ g.UseDoubleRegister(m.left().node()),
+ g.UseDoubleRegister(m.right().node()));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm/linkage-arm.cc b/deps/v8/src/compiler/arm/linkage-arm.cc
new file mode 100644
index 000000000..3b5d5f7d0
--- /dev/null
+++ b/deps/v8/src/compiler/arm/linkage-arm.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return r0; }
+ static Register ReturnValue2Reg() { return r1; }
+ static Register JSCallFunctionReg() { return r1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return r1; }
+ static Register RuntimeCallArgCountReg() { return r0; }
+ static RegList CCalleeSaveRegisters() {
+ return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
+ r10.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {r0, r1, r2, r3};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 4; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ zone, descriptor, stack_parameter_count, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
new file mode 100644
index 000000000..065889cfb
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -0,0 +1,854 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds Arm64-specific methods to convert InstructionOperands.
+class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
+ public:
+ Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ Register InputRegister32(int index) {
+ return ToRegister(instr_->InputAt(index)).W();
+ }
+
+ Register InputRegister64(int index) { return InputRegister(index); }
+
+ Operand InputImmediate(int index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+ Operand InputOperand64(int index) { return InputOperand(index); }
+
+ Operand InputOperand32(int index) {
+ return ToOperand32(instr_->InputAt(index));
+ }
+
+ Register OutputRegister64() { return OutputRegister(); }
+
+ Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+
+ MemOperand MemoryOperand(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+ SXTW);
+ }
+ UNREACHABLE();
+ return MemOperand(no_reg);
+ }
+
+ MemOperand MemoryOperand() {
+ int index = 0;
+ return MemoryOperand(&index);
+ }
+
+ Operand ToOperand(InstructionOperand* op) {
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return ToImmediate(op);
+ }
+
+ Operand ToOperand32(InstructionOperand* op) {
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op).W());
+ }
+ return ToImmediate(op);
+ }
+
+ Operand ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kHeapObject:
+ return Operand(constant.ToHeapObject());
+ }
+ UNREACHABLE();
+ return Operand(-1);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
+ offset.offset());
+ }
+};
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width) \
+ do { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
+ i.InputRegister##width(1)); \
+ } else { \
+ int64_t imm = i.InputOperand##width(1).immediate().value(); \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
+ } \
+ } while (0);
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kArchJmp:
+ __ B(code_->GetLabel(i.InputBlock(0)));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ break;
+ }
+ case kArm64Add:
+ __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Add32:
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+ i.InputOperand32(1));
+ } else {
+ __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ }
+ break;
+ case kArm64And:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64And32:
+ __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Mul:
+ __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Mul32:
+ __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+ break;
+ case kArm64Idiv:
+ __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Idiv32:
+ __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+ break;
+ case kArm64Udiv:
+ __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Udiv32:
+ __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+ break;
+ case kArm64Imod: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
+ __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+ break;
+ }
+ case kArm64Imod32: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireW();
+ __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+ __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+ i.InputRegister32(0));
+ break;
+ }
+ case kArm64Umod: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
+ __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+ break;
+ }
+ case kArm64Umod32: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireW();
+ __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+ __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+ i.InputRegister32(0));
+ break;
+ }
+ // TODO(dcarney): use mvn instr??
+ case kArm64Not:
+ __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+ break;
+ case kArm64Not32:
+ __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
+ break;
+ case kArm64Neg:
+ __ Neg(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kArm64Neg32:
+ __ Neg(i.OutputRegister32(), i.InputOperand32(0));
+ break;
+ case kArm64Or:
+ __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Or32:
+ __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Xor:
+ __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Xor32:
+ __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Sub:
+ __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Sub32:
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+ i.InputOperand32(1));
+ } else {
+ __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+ }
+ break;
+ case kArm64Shl:
+ ASSEMBLE_SHIFT(Lsl, 64);
+ break;
+ case kArm64Shl32:
+ ASSEMBLE_SHIFT(Lsl, 32);
+ break;
+ case kArm64Shr:
+ ASSEMBLE_SHIFT(Lsr, 64);
+ break;
+ case kArm64Shr32:
+ ASSEMBLE_SHIFT(Lsr, 32);
+ break;
+ case kArm64Sar:
+ ASSEMBLE_SHIFT(Asr, 64);
+ break;
+ case kArm64Sar32:
+ ASSEMBLE_SHIFT(Asr, 32);
+ break;
+ case kArm64CallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ Ldr(reg, MemOperand(reg, entry));
+ __ Call(reg);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ // Meaningless instruction for ICs to overwrite.
+ AddNopForSmiCodeInlining();
+ break;
+ }
+ case kArm64CallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the call.
+ __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ break;
+ }
+ case kArm64CallAddress: {
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm(), i.InputRegister(0));
+ break;
+ }
+ case kArm64Claim: {
+ int words = MiscField::decode(instr->opcode());
+ __ Claim(words);
+ break;
+ }
+ case kArm64Poke: {
+ int slot = MiscField::decode(instr->opcode());
+ Operand operand(slot * kPointerSize);
+ __ Poke(i.InputRegister(0), operand);
+ break;
+ }
+ case kArm64PokePairZero: {
+ // TODO(dcarney): test slot offset and register order.
+ int slot = MiscField::decode(instr->opcode()) - 1;
+ __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
+ break;
+ }
+ case kArm64PokePair: {
+ int slot = MiscField::decode(instr->opcode()) - 1;
+ __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+ break;
+ }
+ case kArm64Drop: {
+ int words = MiscField::decode(instr->opcode());
+ __ Drop(words);
+ break;
+ }
+ case kArm64Cmp:
+ __ Cmp(i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Cmp32:
+ __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Tst:
+ __ Tst(i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Tst32:
+ __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Float64Cmp:
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Add:
+ __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Sub:
+ __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Mul:
+ __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Div:
+ __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Mod: {
+ // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ DCHECK(d0.is(i.InputDoubleRegister(0)));
+ DCHECK(d1.is(i.InputDoubleRegister(1)));
+ DCHECK(d0.is(i.OutputDoubleRegister()));
+ // TODO(dcarney): make sure this saves all relevant registers.
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ break;
+ }
+ case kArm64Int32ToInt64:
+ __ Sxtw(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kArm64Int64ToInt32:
+ if (!i.OutputRegister().is(i.InputRegister(0))) {
+ __ Mov(i.OutputRegister(), i.InputRegister(0));
+ }
+ break;
+ case kArm64Float64ToInt32:
+ __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Float64ToUint32:
+ __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Int32ToFloat64:
+ __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+ break;
+ case kArm64Uint32ToFloat64:
+ __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+ break;
+ case kArm64LoadWord8:
+ __ Ldrb(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord8:
+ __ Strb(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kArm64LoadWord16:
+ __ Ldrh(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord16:
+ __ Strh(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kArm64LoadWord32:
+ __ Ldr(i.OutputRegister32(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord32:
+ __ Str(i.InputRegister32(2), i.MemoryOperand());
+ break;
+ case kArm64LoadWord64:
+ __ Ldr(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord64:
+ __ Str(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kArm64Float64Load:
+ __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kArm64Float64Store:
+ __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+ break;
+ case kArm64StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ Add(index, object, Operand(index, SXTW));
+ __ Str(value, MemOperand(index));
+ SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ // TODO(dcarney): we shouldn't test write barriers from c calls.
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+ UseScratchRegisterScope scope(masm());
+ Register temp = no_reg;
+ if (csp.is(masm()->StackPointer())) {
+ temp = scope.AcquireX();
+ lr_status = kLRHasBeenSaved;
+ __ Push(lr, temp); // Need to push a pair
+ }
+ __ RecordWrite(object, index, value, lr_status, mode);
+ if (csp.is(masm()->StackPointer())) {
+ __ Pop(temp, lr);
+ }
+ break;
+ }
+ }
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ Arm64OperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ switch (condition) {
+ case kUnorderedEqual:
+ __ B(vs, flabel);
+ // Fall through.
+ case kEqual:
+ __ B(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ B(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ B(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ B(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ B(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ B(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ B(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ B(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ B(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ B(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ B(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ B(hi, tlabel);
+ break;
+ case kOverflow:
+ __ B(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ B(vc, tlabel);
+ break;
+ }
+ if (!fallthru) __ B(flabel); // no fallthru to flabel.
+ __ Bind(&done);
+}
+
+
+// Assemble boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ Arm64OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 64-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = nv;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 0);
+ __ B(&done);
+ // Fall through.
+ case kEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 1);
+ __ B(&done);
+ // Fall through.
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnorderedLessThan:
+ __ B(vc, &check);
+ __ Mov(reg, 0);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 1);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 0);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnorderedGreaterThan:
+ __ B(vc, &check);
+ __ Mov(reg, 1);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
+ }
+ __ bind(&check);
+ __ Cset(reg, cc);
+ __ Bind(&done);
+}
+
+
+// TODO(dcarney): increase stack slots in frame once before first use.
+static int AlignedStackSlots(int stack_slots) {
+ if (stack_slots & 1) stack_slots++;
+ return stack_slots;
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ SetStackPointer(csp);
+ __ Push(lr, fp);
+ __ Mov(fp, csp);
+ // TODO(dcarney): correct callee saved registers.
+ __ PushCalleeSavedRegisters();
+ frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ SetStackPointer(jssp);
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
+ __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
+ __ Bind(&ok);
+ }
+
+ } else {
+ __ SetStackPointer(jssp);
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ Register sp = __ StackPointer();
+ if (!sp.Is(csp)) {
+ __ Sub(sp, sp, stack_slots * kPointerSize);
+ }
+ __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+ }
+ // Restore registers.
+ // TODO(dcarney): correct callee saved registers.
+ __ PopCalleeSavedRegisters();
+ }
+ __ Mov(csp, fp);
+ __ Pop(fp, lr);
+ __ Ret();
+ } else {
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ Drop(pop_count);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Arm64OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(g.ToRegister(destination), src);
+ } else {
+ __ Str(src, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsStackSlot()) {
+ MemOperand src = g.ToMemOperand(source, masm());
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ if (destination->IsRegister()) {
+ __ Ldr(g.ToRegister(destination), src);
+ } else {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Ldr(temp, src);
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsConstant()) {
+ ConstantOperand* constant_source = ConstantOperand::cast(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ UseScratchRegisterScope scope(masm());
+ Register dst = destination->IsRegister() ? g.ToRegister(destination)
+ : scope.AcquireX();
+ Constant src = g.ToConstant(source);
+ if (src.type() == Constant::kHeapObject) {
+ __ LoadObject(dst, src.ToHeapObject());
+ } else {
+ __ Mov(dst, g.ToImmediate(source));
+ }
+ if (destination->IsStackSlot()) {
+ __ Str(dst, g.ToMemOperand(destination, masm()));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ FPRegister result = g.ToDoubleRegister(destination);
+ __ Fmov(result, g.ToDouble(constant_source));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Fmov(temp, g.ToDouble(constant_source));
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsDoubleRegister()) {
+ FPRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ Str(src, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source, masm());
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(g.ToDoubleRegister(destination), src);
+ } else {
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Ldr(temp, src);
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Arm64OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Mov(temp, src);
+ __ Mov(src, dst);
+ __ Mov(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination, masm());
+ __ Mov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ }
+ } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ UseScratchRegisterScope scope(masm());
+ CPURegister temp_0 = scope.AcquireX();
+ CPURegister temp_1 = scope.AcquireX();
+ MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
+ __ Ldr(temp_0, src);
+ __ Ldr(temp_1, dst);
+ __ Str(temp_0, dst);
+ __ Str(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ FPRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(temp, src);
+ __ Fmov(src, dst);
+ __ Fmov(src, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination, masm());
+ __ Fmov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
+
+#undef __
+
+#if DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc) {
+ if (start_pc + 4 != end_pc) {
+ return false;
+ }
+ Address instr_address = code->instruction_start() + start_pc;
+
+ v8::internal::Instruction* instr =
+ reinterpret_cast<v8::internal::Instruction*>(instr_address);
+ return instr->IsMovz() && instr->Rd() == xzr.code() && instr->SixtyFourBits();
+}
+
+#endif // DEBUG
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
new file mode 100644
index 000000000..2d71c02ef
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -0,0 +1,103 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Xor) \
+ V(Arm64Xor32) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Neg) \
+ V(Arm64Neg32) \
+ V(Arm64Shl) \
+ V(Arm64Shl32) \
+ V(Arm64Shr) \
+ V(Arm64Shr32) \
+ V(Arm64Sar) \
+ V(Arm64Sar32) \
+ V(Arm64CallCodeObject) \
+ V(Arm64CallJSFunction) \
+ V(Arm64CallAddress) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePairZero) \
+ V(Arm64PokePair) \
+ V(Arm64Drop) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Int32ToInt64) \
+ V(Arm64Int64ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Float64Load) \
+ V(Arm64Float64Store) \
+ V(Arm64LoadWord8) \
+ V(Arm64StoreWord8) \
+ V(Arm64LoadWord16) \
+ V(Arm64StoreWord16) \
+ V(Arm64LoadWord32) \
+ V(Arm64StoreWord32) \
+ V(Arm64LoadWord64) \
+ V(Arm64StoreWord64) \
+ V(Arm64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace internal
+} // namespace compiler
+} // namespace v8
+
+#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
new file mode 100644
index 000000000..111ca2d95
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -0,0 +1,667 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+ kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
+ kShift32Imm, // 0 - 31
+ kShift64Imm, // 0 -63
+ kLogical32Imm,
+ kLogical64Imm,
+ kLoadStoreImm, // unsigned 9 bit or signed 7 bit
+ kNoImmediate
+};
+
+
+// Adds Arm64-specific methods for generating operands.
+class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit Arm64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+ if (CanBeImmediate(node, mode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ int64_t value;
+ switch (node->opcode()) {
+ // TODO(turbofan): SMI number constants as immediates.
+ case IrOpcode::kInt32Constant:
+ value = ValueOf<int32_t>(node->op());
+ break;
+ default:
+ return false;
+ }
+ unsigned ignored;
+ switch (mode) {
+ case kLogical32Imm:
+ // TODO(dcarney): some unencodable values can be handled by
+ // switching instructions.
+ return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
+ &ignored, &ignored, &ignored);
+ case kLogical64Imm:
+ return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
+ &ignored, &ignored, &ignored);
+ case kArithimeticImm:
+ // TODO(dcarney): -values can be handled by instruction swapping
+ return Assembler::IsImmAddSub(value);
+ case kShift32Imm:
+ return 0 <= value && value < 31;
+ case kShift64Imm:
+ return 0 <= value && value < 63;
+ case kLoadStoreImm:
+ return (0 <= value && value < (1 << 9)) ||
+ (-(1 << 6) <= value && value < (1 << 6));
+ case kNoImmediate:
+ return false;
+ }
+ return false;
+ }
+};
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsDoubleRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node, ImmediateMode operand_mode) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(ARRAY_SIZE(inputs), input_count);
+ DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, operand_mode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = OpParameter<MachineType>(node);
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* result = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArm64Float64Load;
+ break;
+ case kMachineWord8:
+ opcode = kArm64LoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArm64LoadWord16;
+ break;
+ case kMachineWord32:
+ opcode = kArm64LoadWord32;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord64:
+ opcode = kArm64LoadWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else if (g.CanBeImmediate(base, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
+ g.UseRegister(index), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ DCHECK(rep == kMachineTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
+ Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
+ g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ InstructionOperand* val;
+ if (rep == kMachineFloat64) {
+ val = g.UseDoubleRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArm64Float64Store;
+ break;
+ case kMachineWord8:
+ opcode = kArm64StoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArm64StoreWord16;
+ break;
+ case kMachineWord32:
+ opcode = kArm64StoreWord32;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord64:
+ opcode = kArm64StoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else if (g.CanBeImmediate(base, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kArm64And32, kLogical32Imm);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ VisitBinop(this, node, kArm64And, kLogical64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kArm64Or32, kLogical32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kArm64Or, kLogical64Imm);
+}
+
+
+template <typename T>
+static void VisitXor(InstructionSelector* selector, Node* node,
+ ArchOpcode xor_opcode, ArchOpcode not_opcode) {
+ Arm64OperandGenerator g(selector);
+ BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+ if (m.right().Is(-1)) {
+ selector->Emit(not_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop(selector, node, xor_opcode, kLogical32Imm);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ VisitRRO(this, kArm64Shl, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ VisitRRO(this, kArm64Shr, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, kArm64Sar, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kArm64Add32, kArithimeticImm);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop(this, node, kArm64Add, kArithimeticImm);
+}
+
+
+template <typename T>
+static void VisitSub(InstructionSelector* selector, Node* node,
+ ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
+ Arm64OperandGenerator g(selector);
+ BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+ if (m.left().Is(0)) {
+ selector->Emit(neg_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop(selector, node, sub_opcode, kArithimeticImm);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitRRR(this, kArm64Mul32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitRRR(this, kArm64Mul, node);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitRRR(this, kArm64Idiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitRRR(this, kArm64Idiv, node);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitRRR(this, kArm64Udiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+ VisitRRR(this, kArm64Udiv, node);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitRRR(this, kArm64Imod32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitRRR(this, kArm64Imod, node);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitRRR(this, kArm64Umod32, node);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+ VisitRRR(this, kArm64Umod, node);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+ VisitRR(this, kArm64Int32ToInt64, node);
+}
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+ VisitRR(this, kArm64Int64ToInt32, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Uint32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Add, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Sub, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Mul, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Div, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
+ g.UseFixedDouble(node->InputAt(0), d0),
+ g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand* left, InstructionOperand* right,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ Arm64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, kArithimeticImm)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, kArithimeticImm)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kArm64Tst32, cont, true);
+ default:
+ break;
+ }
+
+ Arm64OperandGenerator g(this);
+ VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
+ cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, node, kArm64Tst, cont, true);
+ default:
+ break;
+ }
+
+ Arm64OperandGenerator g(this);
+ VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArm64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
+ g.UseDoubleRegister(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ Arm64OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(call, &buffer, true, false, continuation,
+ deoptimization);
+
+ // Push the arguments to the stack.
+ bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
+ bool pushed_count_uneven = buffer.pushed_count & 1;
+ int aligned_push_count = buffer.pushed_count;
+ if (is_c_frame && pushed_count_uneven) {
+ aligned_push_count++;
+ }
+ // TODO(dcarney): claim and poke probably take small immediates,
+ // loop here or whatever.
+ // Bump the stack pointer(s).
+ if (aligned_push_count > 0) {
+ // TODO(dcarney): it would be better to bump the csp here only
+ // and emit paired stores with increment for non c frames.
+ Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+ }
+ // Move arguments to the stack.
+ {
+ int slot = buffer.pushed_count - 1;
+ // Emit the uneven pushes.
+ if (pushed_count_uneven) {
+ Node* input = buffer.pushed_nodes[slot];
+ ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
+ Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+ slot--;
+ }
+ // Now all pushes can be done in pairs.
+ for (; slot >= 0; slot -= 2) {
+ Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+ g.UseRegister(buffer.pushed_nodes[slot]),
+ g.UseRegister(buffer.pushed_nodes[slot - 1]));
+ }
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kArm64CallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArm64CallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ DCHECK(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (is_c_frame && aligned_push_count > 0) {
+ DCHECK(deoptimization == NULL && continuation == NULL);
+ Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/linkage-arm64.cc b/deps/v8/src/compiler/arm64/linkage-arm64.cc
new file mode 100644
index 000000000..186f2d59d
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/linkage-arm64.cc
@@ -0,0 +1,68 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return x0; }
+ static Register ReturnValue2Reg() { return x1; }
+ static Register JSCallFunctionReg() { return x1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return x1; }
+ static Register RuntimeCallArgCountReg() { return x0; }
+ static RegList CCalleeSaveRegisters() {
+ // TODO(dcarney): correct callee saved registers.
+ return 0;
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 8; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ zone, descriptor, stack_parameter_count, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
new file mode 100644
index 000000000..49a67157c
--- /dev/null
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -0,0 +1,2055 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph)
+ : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()),
+ info_(info),
+ jsgraph_(jsgraph),
+ globals_(0, info->zone()),
+ breakable_(NULL),
+ execution_context_(NULL) {
+ InitializeAstVisitor(info->zone());
+}
+
+
+Node* AstGraphBuilder::GetFunctionClosure() {
+ if (!function_closure_.is_set()) {
+ // Parameter -1 is special for the function closure
+ Operator* op = common()->Parameter(-1);
+ Node* node = NewNode(op, graph()->start());
+ function_closure_.set(node);
+ }
+ return function_closure_.get();
+}
+
+
+Node* AstGraphBuilder::GetFunctionContext() {
+ if (!function_context_.is_set()) {
+ // Parameter (arity + 1) is special for the outer context of the function
+ Operator* op = common()->Parameter(info()->num_parameters() + 1);
+ Node* node = NewNode(op, graph()->start());
+ function_context_.set(node);
+ }
+ return function_context_.get();
+}
+
+
+bool AstGraphBuilder::CreateGraph() {
+ Scope* scope = info()->scope();
+ DCHECK(graph() != NULL);
+
+ // Set up the basic structure of the graph.
+ int parameter_count = info()->num_parameters();
+ graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+
+ // Initialize the top-level environment.
+ Environment env(this, scope, graph()->start());
+ set_environment(&env);
+
+ // Build node to initialize local function context.
+ Node* closure = GetFunctionClosure();
+ Node* outer = GetFunctionContext();
+ Node* inner = BuildLocalFunctionContext(outer, closure);
+
+ // Push top-level function scope for the function body.
+ ContextScope top_context(this, scope, inner);
+
+ // Build the arguments object if it is used.
+ BuildArgumentsObject(scope->arguments());
+
+ // Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0));
+ }
+
+ // Visit implicit declaration of the function name.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ VisitVariableDeclaration(scope->function());
+ }
+
+ // Visit declarations within the function scope.
+ VisitDeclarations(scope->declarations());
+
+ // TODO(mstarzinger): This should do an inlined stack check.
+ NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+
+ // Visit statements in the function body.
+ VisitStatements(info()->function()->body());
+ if (HasStackOverflow()) return false;
+
+ // Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ // TODO(mstarzinger): Only traces implicit return.
+ Node* return_value = jsgraph()->UndefinedConstant();
+ NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value);
+ }
+
+ // Return 'undefined' in case we can fall off the end.
+ Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant());
+ UpdateControlDependencyToLeaveFunction(control);
+
+ // Finish the basic structure of the graph.
+ environment()->UpdateControlDependency(exit_control());
+ graph()->SetEnd(NewNode(common()->End()));
+
+ return true;
+}
+
+
+// Left-hand side can only be a property, a global or a variable slot.
+enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+
+
+// Determine the left-hand side kind of an assignment.
+static LhsKind DetermineLhsKind(Expression* expr) {
+ Property* property = expr->AsProperty();
+ DCHECK(expr->IsValidReferenceExpression());
+ LhsKind lhs_kind =
+ (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ return lhs_kind;
+}
+
+
+// Helper to find an existing shared function info in the baseline code for the
+// given function literal. Used to canonicalize SharedFunctionInfo objects.
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+ Code* unoptimized_code, FunctionLiteral* expr) {
+ int start_position = expr->start_position();
+ for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+ Object* obj = rinfo->target_object();
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->start_position() == start_position) {
+ return Handle<SharedFunctionInfo>(shared);
+ }
+ }
+ }
+ return Handle<SharedFunctionInfo>();
+}
+
+
+StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
+ StructuredGraphBuilder::Environment* env) {
+ return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
+}
+
+
+AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
+ Scope* scope,
+ Node* control_dependency)
+ : StructuredGraphBuilder::Environment(builder, control_dependency),
+ parameters_count_(scope->num_parameters() + 1),
+ locals_count_(scope->num_stack_slots()),
+ parameters_node_(NULL),
+ locals_node_(NULL),
+ stack_node_(NULL),
+ parameters_dirty_(true),
+ locals_dirty_(true),
+ stack_dirty_(true) {
+ DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
+
+ // Bind the receiver variable.
+ Node* receiver = builder->graph()->NewNode(common()->Parameter(0),
+ builder->graph()->start());
+ values()->push_back(receiver);
+
+ // Bind all parameter variables. The parameter indices are shifted by 1
+ // (receiver is parameter index -1 but environment index 0).
+ for (int i = 0; i < scope->num_parameters(); ++i) {
+ Node* parameter = builder->graph()->NewNode(common()->Parameter(i + 1),
+ builder->graph()->start());
+ values()->push_back(parameter);
+ }
+
+ // Bind all local variables to undefined.
+ Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+ values()->insert(values()->end(), locals_count(), undefined_constant);
+}
+
+
+AstGraphBuilder::Environment::Environment(const Environment& copy)
+ : StructuredGraphBuilder::Environment(
+ static_cast<StructuredGraphBuilder::Environment>(copy)),
+ parameters_count_(copy.parameters_count_),
+ locals_count_(copy.locals_count_),
+ parameters_node_(copy.parameters_node_),
+ locals_node_(copy.locals_node_),
+ stack_node_(copy.stack_node_),
+ parameters_dirty_(copy.parameters_dirty_),
+ locals_dirty_(copy.locals_dirty_),
+ stack_dirty_(copy.stack_dirty_) {}
+
+
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) {
+ if (parameters_dirty_) {
+ Operator* op = common()->StateValues(parameters_count());
+ if (parameters_count() != 0) {
+ Node** parameters = &values()->front();
+ parameters_node_ = graph()->NewNode(op, parameters_count(), parameters);
+ } else {
+ parameters_node_ = graph()->NewNode(op);
+ }
+ parameters_dirty_ = false;
+ }
+ if (locals_dirty_) {
+ Operator* op = common()->StateValues(locals_count());
+ if (locals_count() != 0) {
+ Node** locals = &values()->at(parameters_count_);
+ locals_node_ = graph()->NewNode(op, locals_count(), locals);
+ } else {
+ locals_node_ = graph()->NewNode(op);
+ }
+ locals_dirty_ = false;
+ }
+ if (stack_dirty_) {
+ Operator* op = common()->StateValues(stack_height());
+ if (stack_height() != 0) {
+ Node** stack = &values()->at(parameters_count_ + locals_count_);
+ stack_node_ = graph()->NewNode(op, stack_height(), stack);
+ } else {
+ stack_node_ = graph()->NewNode(op);
+ }
+ stack_dirty_ = false;
+ }
+
+ Operator* op = common()->FrameState(ast_id);
+
+ return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_);
+}
+
+
+AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
+ Expression::Context kind,
+ BailoutId bailout_id)
+ : bailout_id_(bailout_id),
+ kind_(kind),
+ owner_(own),
+ outer_(own->ast_context()) {
+ owner()->set_ast_context(this); // Push.
+#ifdef DEBUG
+ original_height_ = environment()->stack_height();
+#endif
+}
+
+
+AstGraphBuilder::AstContext::~AstContext() {
+ owner()->set_ast_context(outer_); // Pop.
+}
+
+
+AstGraphBuilder::AstEffectContext::~AstEffectContext() {
+ DCHECK(environment()->stack_height() == original_height_);
+}
+
+
+AstGraphBuilder::AstValueContext::~AstValueContext() {
+ DCHECK(environment()->stack_height() == original_height_ + 1);
+}
+
+
+AstGraphBuilder::AstTestContext::~AstTestContext() {
+ DCHECK(environment()->stack_height() == original_height_ + 1);
+}
+
+
+void AstGraphBuilder::AstEffectContext::ProduceValueWithLazyBailout(
+ Node* value) {
+ ProduceValue(value);
+ owner()->BuildLazyBailout(value, bailout_id_);
+}
+
+
+void AstGraphBuilder::AstValueContext::ProduceValueWithLazyBailout(
+ Node* value) {
+ ProduceValue(value);
+ owner()->BuildLazyBailout(value, bailout_id_);
+}
+
+
+void AstGraphBuilder::AstTestContext::ProduceValueWithLazyBailout(Node* value) {
+ environment()->Push(value);
+ owner()->BuildLazyBailout(value, bailout_id_);
+ environment()->Pop();
+ ProduceValue(value);
+}
+
+
+void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+ // The value is ignored.
+}
+
+
+void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+ environment()->Push(value);
+}
+
+
+void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+ environment()->Push(owner()->BuildToBoolean(value));
+}
+
+
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+
+
+Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
+ return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::AstTestContext::ConsumeValue() {
+ return environment()->Pop();
+}
+
+
+AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable(
+ BreakableStatement* target) {
+ BreakableScope* current = this;
+ while (current != NULL && current->target_ != target) {
+ owner_->environment()->Drop(current->drop_extra_);
+ current = current->next_;
+ }
+ DCHECK(current != NULL); // Always found (unless stack is malformed).
+ return current;
+}
+
+
+void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) {
+ FindBreakable(stmt)->control_->Break();
+}
+
+
+void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) {
+ FindBreakable(stmt)->control_->Continue();
+}
+
+
+void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
+ if (expr == NULL) {
+ return environment()->Push(jsgraph()->NullConstant());
+ }
+ VisitForValue(expr);
+}
+
+
+void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
+ for (int i = 0; i < exprs->length(); ++i) {
+ VisitForValue(exprs->at(i));
+ }
+}
+
+
+void AstGraphBuilder::VisitForValue(Expression* expr) {
+ AstValueContext for_value(this, expr->id());
+ if (!HasStackOverflow()) {
+ expr->Accept(this);
+ }
+}
+
+
+void AstGraphBuilder::VisitForEffect(Expression* expr) {
+ AstEffectContext for_effect(this, expr->id());
+ if (!HasStackOverflow()) {
+ expr->Accept(this);
+ }
+}
+
+
+void AstGraphBuilder::VisitForTest(Expression* expr) {
+ AstTestContext for_condition(this, expr->id());
+ if (!HasStackOverflow()) {
+ expr->Accept(this);
+ }
+}
+
+
+void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
+ Variable* variable = decl->proxy()->var();
+ VariableMode mode = decl->mode();
+ bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Handle<Oddball> value = variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value();
+ globals()->Add(variable->name(), zone());
+ globals()->Add(value, zone());
+ break;
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Node* value = jsgraph()->TheHoleConstant();
+ environment()->Bind(variable, value);
+ }
+ break;
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Node* value = jsgraph()->TheHoleConstant();
+ Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, current_context(), value);
+ }
+ break;
+ case Variable::LOOKUP:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ Variable* variable = decl->proxy()->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals()->Add(variable->name(), zone());
+ globals()->Add(function, zone());
+ break;
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ VisitForValue(decl->fun());
+ Node* value = environment()->Pop();
+ environment()->Bind(variable, value);
+ break;
+ }
+ case Variable::CONTEXT: {
+ VisitForValue(decl->fun());
+ Node* value = environment()->Pop();
+ Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, current_context(), value);
+ break;
+ }
+ case Variable::LOOKUP:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitBlock(Block* stmt) {
+ BlockBuilder block(this);
+ BreakableScope scope(this, stmt, &block, 0);
+ if (stmt->labels() != NULL) block.BeginBlock();
+ if (stmt->scope() == NULL) {
+ // Visit statements in the same scope, no declarations.
+ VisitStatements(stmt->statements());
+ } else {
+ Operator* op = javascript()->CreateBlockContext();
+ Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
+ Node* context = NewNode(op, scope_info, GetFunctionClosure());
+ ContextScope scope(this, stmt->scope(), context);
+
+ // Visit declarations and statements in a block scope.
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
+ }
+ if (stmt->labels() != NULL) block.EndBlock();
+}
+
+
+void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ VisitForEffect(stmt->expression());
+}
+
+
+void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Do nothing.
+}
+
+
+void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+ IfBuilder compare_if(this);
+ VisitForTest(stmt->condition());
+ Node* condition = environment()->Pop();
+ compare_if.If(condition);
+ compare_if.Then();
+ Visit(stmt->then_statement());
+ compare_if.Else();
+ Visit(stmt->else_statement());
+ compare_if.End();
+}
+
+
+void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+ breakable()->ContinueTarget(stmt->target());
+ set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+ breakable()->BreakTarget(stmt->target());
+ set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ VisitForValue(stmt->expression());
+ Node* result = environment()->Pop();
+ Node* control = NewNode(common()->Return(), result);
+ UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+ VisitForValue(stmt->expression());
+ Node* value = environment()->Pop();
+ Operator* op = javascript()->CreateWithContext();
+ Node* context = NewNode(op, value, GetFunctionClosure());
+ ContextScope scope(this, stmt->scope(), context);
+ Visit(stmt->statement());
+}
+
+
+void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ SwitchBuilder compare_switch(this, clauses->length());
+ BreakableScope scope(this, stmt, &compare_switch, 0);
+ compare_switch.BeginSwitch();
+ int default_index = -1;
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForValue(stmt->tag());
+ Node* tag = environment()->Top();
+
+ // Iterate over all cases and create nodes for label comparison.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+
+ // The default is not a test, remember index.
+ if (clause->is_default()) {
+ default_index = i;
+ continue;
+ }
+
+ // Create nodes to perform label comparison as if via '==='. The switch
+ // value is still on the operand stack while the label is evaluated.
+ VisitForValue(clause->label());
+ Node* label = environment()->Pop();
+ Operator* op = javascript()->StrictEqual();
+ Node* condition = NewNode(op, tag, label);
+ compare_switch.BeginLabel(i, condition);
+
+ // Discard the switch value at label match.
+ environment()->Pop();
+ compare_switch.EndLabel();
+ }
+
+ // Discard the switch value and mark the default case.
+ environment()->Pop();
+ if (default_index >= 0) {
+ compare_switch.DefaultAt(default_index);
+ }
+
+ // Iterate over all cases and create nodes for case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ compare_switch.BeginCase(i);
+ VisitStatements(clause->statements());
+ compare_switch.EndCase();
+ }
+
+ compare_switch.EndSwitch();
+}
+
+
+void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ LoopBuilder while_loop(this);
+ while_loop.BeginLoop();
+ VisitIterationBody(stmt, &while_loop, 0);
+ while_loop.EndBody();
+ VisitForTest(stmt->cond());
+ Node* condition = environment()->Pop();
+ while_loop.BreakUnless(condition);
+ while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+ LoopBuilder while_loop(this);
+ while_loop.BeginLoop();
+ VisitForTest(stmt->cond());
+ Node* condition = environment()->Pop();
+ while_loop.BreakUnless(condition);
+ VisitIterationBody(stmt, &while_loop, 0);
+ while_loop.EndBody();
+ while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
+ LoopBuilder for_loop(this);
+ VisitIfNotNull(stmt->init());
+ for_loop.BeginLoop();
+ if (stmt->cond() != NULL) {
+ VisitForTest(stmt->cond());
+ Node* condition = environment()->Pop();
+ for_loop.BreakUnless(condition);
+ }
+ VisitIterationBody(stmt, &for_loop, 0);
+ for_loop.EndBody();
+ VisitIfNotNull(stmt->next());
+ for_loop.EndLoop();
+}
+
+
+// TODO(dcarney): this is a big function. Try to clean up some.
+void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+ VisitForValue(stmt->subject());
+ Node* obj = environment()->Pop();
+ // Check for undefined or null before entering loop.
+ IfBuilder is_undefined(this);
+ Node* is_undefined_cond =
+ NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant());
+ is_undefined.If(is_undefined_cond);
+ is_undefined.Then();
+ is_undefined.Else();
+ {
+ IfBuilder is_null(this);
+ Node* is_null_cond =
+ NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant());
+ is_null.If(is_null_cond);
+ is_null.Then();
+ is_null.Else();
+ // Convert object to jsobject.
+ // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ obj = NewNode(javascript()->ToObject(), obj);
+ environment()->Push(obj);
+ // TODO(dcarney): should do a fast enum cache check here to skip runtime.
+ environment()->Push(obj);
+ Node* cache_type = ProcessArguments(
+ javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1);
+ // TODO(dcarney): these next runtime calls should be removed in favour of
+ // a few simplified instructions.
+ environment()->Push(obj);
+ environment()->Push(cache_type);
+ Node* cache_pair =
+ ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2);
+ // cache_type may have been replaced.
+ Node* cache_array = NewNode(common()->Projection(0), cache_pair);
+ cache_type = NewNode(common()->Projection(1), cache_pair);
+ environment()->Push(cache_type);
+ environment()->Push(cache_array);
+ Node* cache_length = ProcessArguments(
+ javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2);
+ {
+ // TODO(dcarney): this check is actually supposed to be for the
+ // empty enum case only.
+ IfBuilder have_no_properties(this);
+ Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
+ cache_length, jsgraph()->ZeroConstant());
+ have_no_properties.If(empty_array_cond);
+ have_no_properties.Then();
+ // Pop obj and skip loop.
+ environment()->Pop();
+ have_no_properties.Else();
+ {
+ // Construct the rest of the environment.
+ environment()->Push(cache_type);
+ environment()->Push(cache_array);
+ environment()->Push(cache_length);
+ environment()->Push(jsgraph()->ZeroConstant());
+ // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ LoopBuilder for_loop(this);
+ for_loop.BeginLoop();
+ // Check loop termination condition.
+ Node* index = environment()->Peek(0);
+ Node* exit_cond =
+ NewNode(javascript()->LessThan(), index, cache_length);
+ // TODO(jarin): provide real bailout id.
+ BuildLazyBailout(exit_cond, BailoutId::None());
+ for_loop.BreakUnless(exit_cond);
+ // TODO(dcarney): this runtime call should be a handful of
+ // simplified instructions that
+ // basically produce
+ // value = array[index]
+ environment()->Push(obj);
+ environment()->Push(cache_array);
+ environment()->Push(cache_type);
+ environment()->Push(index);
+ Node* pair =
+ ProcessArguments(javascript()->Runtime(Runtime::kForInNext, 4), 4);
+ Node* value = NewNode(common()->Projection(0), pair);
+ Node* should_filter = NewNode(common()->Projection(1), pair);
+ environment()->Push(value);
+ {
+ // Test if FILTER_KEY needs to be called.
+ IfBuilder test_should_filter(this);
+ Node* should_filter_cond =
+ NewNode(javascript()->StrictEqual(), should_filter,
+ jsgraph()->TrueConstant());
+ test_should_filter.If(should_filter_cond);
+ test_should_filter.Then();
+ value = environment()->Pop();
+ // TODO(dcarney): Better load from function context.
+ // See comment in BuildLoadBuiltinsObject.
+ Handle<JSFunction> function(JSFunction::cast(
+ info()->context()->builtins()->javascript_builtin(
+ Builtins::FILTER_KEY)));
+ // Callee.
+ environment()->Push(jsgraph()->HeapConstant(function));
+ // Receiver.
+ environment()->Push(obj);
+ // Args.
+ environment()->Push(value);
+ // result is either the string key or Smi(0) indicating the property
+ // is gone.
+ Node* res = ProcessArguments(
+ javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
+ // TODO(jarin): provide real bailout id.
+ BuildLazyBailout(res, BailoutId::None());
+ Node* property_missing = NewNode(javascript()->StrictEqual(), res,
+ jsgraph()->ZeroConstant());
+ {
+ IfBuilder is_property_missing(this);
+ is_property_missing.If(property_missing);
+ is_property_missing.Then();
+ // Inc counter and continue.
+ Node* index_inc =
+ NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+ environment()->Poke(0, index_inc);
+ // TODO(jarin): provide real bailout id.
+ BuildLazyBailout(index_inc, BailoutId::None());
+ for_loop.Continue();
+ is_property_missing.Else();
+ is_property_missing.End();
+ }
+ // Replace 'value' in environment.
+ environment()->Push(res);
+ test_should_filter.Else();
+ test_should_filter.End();
+ }
+ value = environment()->Pop();
+ // Bind value and do loop body.
+ VisitForInAssignment(stmt->each(), value);
+ VisitIterationBody(stmt, &for_loop, 5);
+ // Inc counter and continue.
+ Node* index_inc =
+ NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+ environment()->Poke(0, index_inc);
+ // TODO(jarin): provide real bailout id.
+ BuildLazyBailout(index_inc, BailoutId::None());
+ for_loop.EndBody();
+ for_loop.EndLoop();
+ environment()->Drop(5);
+ // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ }
+ have_no_properties.End();
+ }
+ is_null.End();
+ }
+ is_undefined.End();
+}
+
+
+void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+ VisitForValue(stmt->subject());
+ environment()->Pop();
+ // TODO(turbofan): create and use loop builder.
+}
+
+
+void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ // TODO(turbofan): Do we really need a separate reloc-info for this?
+ NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+}
+
+
+void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Node* context = current_context();
+
+ // Build a new shared function info if we cannot find one in the baseline
+ // code. We also have a stack overflow if the recursive compilation did.
+ Handle<SharedFunctionInfo> shared_info =
+ SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+ if (shared_info.is_null()) {
+ shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
+ CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow?
+ }
+
+ // Create node to instantiate a new closure.
+ Node* info = jsgraph()->Constant(shared_info);
+ Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
+ : jsgraph()->FalseConstant();
+ Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+ Node* value = NewNode(op, context, info, pretenure);
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitConditional(Conditional* expr) {
+ IfBuilder compare_if(this);
+ VisitForTest(expr->condition());
+ Node* condition = environment()->Pop();
+ compare_if.If(condition);
+ compare_if.Then();
+ Visit(expr->then_expression());
+ compare_if.Else();
+ Visit(expr->else_expression());
+ compare_if.End();
+ ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ Node* value = BuildVariableLoad(expr->var(), expr->id());
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitLiteral(Literal* expr) {
+ Node* value = jsgraph()->Constant(expr->value());
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Handle<JSFunction> closure = info()->closure();
+
+ // Create node to materialize a regular expression literal.
+ Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literal_index = jsgraph()->Constant(expr->literal_index());
+ Node* pattern = jsgraph()->Constant(expr->pattern());
+ Node* flags = jsgraph()->Constant(expr->flags());
+ Operator* op = javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
+ Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+ ast_context()->ProduceValue(literal);
+}
+
+
+void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+ Handle<JSFunction> closure = info()->closure();
+
+ // Create node to deep-copy the literal boilerplate.
+ expr->BuildConstantProperties(isolate());
+ Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literal_index = jsgraph()->Constant(expr->literal_index());
+ Node* constants = jsgraph()->Constant(expr->constant_properties());
+ Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+ Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+ Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+ // The object is expected on the operand stack during computation of the
+ // property values and is the value of the entire expression.
+ environment()->Push(literal);
+
+ // Mark all computed expressions that are bound to a key that is shadowed by
+ // a later occurrence of the same key. For the marked expressions, no store
+ // code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ // Create nodes to store computed values into the literal.
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ PrintableUnique<Name> name = MakeUnique(key->AsPropertyName());
+ Node* store =
+ NewNode(javascript()->StoreNamed(name), literal, value);
+ BuildLazyBailout(store, key->id());
+ } else {
+ VisitForEffect(property->value());
+ }
+ break;
+ }
+ environment()->Push(literal); // Duplicate receiver.
+ VisitForValue(property->key());
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* key = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ if (property->emit_store()) {
+ Node* strict = jsgraph()->Constant(SLOPPY);
+ Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+ NewNode(op, receiver, key, value, strict);
+ }
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ environment()->Push(literal); // Duplicate receiver.
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ if (property->emit_store()) {
+ Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
+ NewNode(op, receiver, value);
+ }
+ break;
+ }
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = property->value();
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = property->value();
+ break;
+ }
+ }
+
+ // Create nodes to define accessors, using only a single call to the runtime
+ // for each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ VisitForValue(it->first);
+ VisitForValueOrNull(it->second->getter);
+ VisitForValueOrNull(it->second->setter);
+ Node* setter = environment()->Pop();
+ Node* getter = environment()->Pop();
+ Node* name = environment()->Pop();
+ Node* attr = jsgraph()->Constant(NONE);
+ Operator* op =
+ javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ NewNode(op, literal, name, getter, setter, attr);
+ }
+
+ // Transform literals that contain functions to fast properties.
+ if (expr->has_function()) {
+ Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+ NewNode(op, literal);
+ }
+
+ ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ Handle<JSFunction> closure = info()->closure();
+
+ // Create node to deep-copy the literal boilerplate.
+ expr->BuildConstantElements(isolate());
+ Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literal_index = jsgraph()->Constant(expr->literal_index());
+ Node* constants = jsgraph()->Constant(expr->constant_elements());
+ Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+ Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+ Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+ // The array and the literal index are both expected on the operand stack
+ // during computation of the element values.
+ environment()->Push(literal);
+ environment()->Push(literal_index);
+
+ // Create nodes to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < expr->values()->length(); i++) {
+ Expression* subexpr = expr->values()->at(i);
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ VisitForValue(subexpr);
+ Node* value = environment()->Pop();
+ Node* index = jsgraph()->Constant(i);
+ Node* store = NewNode(javascript()->StoreProperty(), literal, index, value);
+ BuildLazyBailout(store, expr->GetIdForElement(i));
+ }
+
+ environment()->Pop(); // Array literal index.
+ ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
+ DCHECK(expr->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = DetermineLhsKind(expr);
+
+ // Evaluate LHS expression and store the value.
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ // TODO(jarin) Fill in the correct bailout id.
+ BuildVariableAssignment(var, value, Token::ASSIGN, BailoutId::None());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ environment()->Push(value);
+ VisitForValue(property->obj());
+ Node* object = environment()->Pop();
+ value = environment()->Pop();
+ PrintableUnique<Name> name =
+ MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Node* store = NewNode(javascript()->StoreNamed(name), object, value);
+ // TODO(jarin) Fill in the correct bailout id.
+ BuildLazyBailout(store, BailoutId::None());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ environment()->Push(value);
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ value = environment()->Pop();
+ Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
+ // TODO(jarin) Fill in the correct bailout id.
+ BuildLazyBailout(store, BailoutId::None());
+ break;
+ }
+ }
+}
+
+
+void AstGraphBuilder::VisitAssignment(Assignment* expr) {
+ DCHECK(expr->target()->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->target()->AsProperty();
+ LhsKind assign_type = DetermineLhsKind(expr->target());
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ VisitForValue(property->obj());
+ break;
+ case KEYED_PROPERTY: {
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ break;
+ }
+ }
+
+ // Evaluate the value and potentially handle compound assignments by loading
+ // the left-hand side value and performing a binary operation.
+ if (expr->is_compound()) {
+ Node* old_value = NULL;
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->target()->AsVariableProxy()->var();
+ old_value = BuildVariableLoad(variable, expr->target()->id());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ Node* object = environment()->Top();
+ PrintableUnique<Name> name =
+ MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ old_value = NewNode(javascript()->LoadNamed(name), object);
+ BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Node* key = environment()->Top();
+ Node* object = environment()->Peek(1);
+ old_value = NewNode(javascript()->LoadProperty(), object, key);
+ BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ break;
+ }
+ }
+ environment()->Push(old_value);
+ VisitForValue(expr->value());
+ Node* right = environment()->Pop();
+ Node* left = environment()->Pop();
+ Node* value = BuildBinaryOp(left, right, expr->binary_op());
+ environment()->Push(value);
+ BuildLazyBailout(value, expr->binary_operation()->id());
+ } else {
+ VisitForValue(expr->value());
+ }
+
+ // Store the value.
+ Node* value = environment()->Pop();
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->target()->AsVariableProxy()->var();
+ BuildVariableAssignment(variable, value, expr->op(),
+ expr->AssignmentId());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ Node* object = environment()->Pop();
+ PrintableUnique<Name> name =
+ MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Node* store = NewNode(javascript()->StoreNamed(name), object, value);
+ BuildLazyBailout(store, expr->AssignmentId());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
+ BuildLazyBailout(store, expr->AssignmentId());
+ break;
+ }
+ }
+
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitYield(Yield* expr) {
+ VisitForValue(expr->generator_object());
+ VisitForValue(expr->expression());
+ environment()->Pop();
+ environment()->Pop();
+ // TODO(turbofan): VisitYield
+ ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+}
+
+
+void AstGraphBuilder::VisitThrow(Throw* expr) {
+ VisitForValue(expr->exception());
+ Node* exception = environment()->Pop();
+ Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
+ Node* value = NewNode(op, exception);
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitProperty(Property* expr) {
+ Node* value;
+ if (expr->key()->IsPropertyName()) {
+ VisitForValue(expr->obj());
+ Node* object = environment()->Pop();
+ PrintableUnique<Name> name =
+ MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
+ value = NewNode(javascript()->LoadNamed(name), object);
+ } else {
+ VisitForValue(expr->obj());
+ VisitForValue(expr->key());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ value = NewNode(javascript()->LoadProperty(), object, key);
+ }
+ ast_context()->ProduceValueWithLazyBailout(value);
+}
+
+
+void AstGraphBuilder::VisitCall(Call* expr) {
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ // Prepare the callee and the receiver to the function call. This depends on
+ // the semantics of the underlying call type.
+ CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+ Node* receiver_value = NULL;
+ Node* callee_value = NULL;
+ bool possibly_eval = false;
+ switch (call_type) {
+ case Call::GLOBAL_CALL: {
+ Variable* variable = callee->AsVariableProxy()->var();
+ callee_value = BuildVariableLoad(variable, expr->expression()->id());
+ receiver_value = jsgraph()->UndefinedConstant();
+ break;
+ }
+ case Call::LOOKUP_SLOT_CALL: {
+ Variable* variable = callee->AsVariableProxy()->var();
+ DCHECK(variable->location() == Variable::LOOKUP);
+ Node* name = jsgraph()->Constant(variable->name());
+ Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
+ Node* pair = NewNode(op, current_context(), name);
+ callee_value = NewNode(common()->Projection(0), pair);
+ receiver_value = NewNode(common()->Projection(1), pair);
+ break;
+ }
+ case Call::PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForValue(property->obj());
+ Node* object = environment()->Top();
+ if (property->key()->IsPropertyName()) {
+ PrintableUnique<Name> name =
+ MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ callee_value = NewNode(javascript()->LoadNamed(name), object);
+ } else {
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ callee_value = NewNode(javascript()->LoadProperty(), object, key);
+ }
+ BuildLazyBailoutWithPushedNode(callee_value, property->LoadId());
+ receiver_value = environment()->Pop();
+ // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
+ // object for sloppy callees. This could also be modeled explicitly here,
+ // thereby obsoleting the need for a flag to the call operator.
+ flags = CALL_AS_METHOD;
+ break;
+ }
+ case Call::POSSIBLY_EVAL_CALL:
+ possibly_eval = true;
+ // Fall through.
+ case Call::OTHER_CALL:
+ VisitForValue(callee);
+ callee_value = environment()->Pop();
+ receiver_value = jsgraph()->UndefinedConstant();
+ break;
+ }
+
+ // The callee and the receiver both have to be pushed onto the operand stack
+ // before arguments are being evaluated.
+ environment()->Push(callee_value);
+ environment()->Push(receiver_value);
+
+ // Evaluate all arguments to the function call,
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForValues(args);
+
+ // Resolve callee and receiver for a potential direct eval call. This block
+ // will mutate the callee and receiver values pushed onto the environment.
+ if (possibly_eval && args->length() > 0) {
+ int arg_count = args->length();
+
+ // Extract callee and source string from the environment.
+ Node* callee = environment()->Peek(arg_count + 1);
+ Node* source = environment()->Peek(arg_count - 1);
+
+ // Create node to ask for help resolving potential eval call. This will
+ // provide a fully resolved callee and the corresponding receiver.
+ Node* receiver = environment()->Lookup(info()->scope()->receiver());
+ Node* strict = jsgraph()->Constant(strict_mode());
+ Node* position = jsgraph()->Constant(info()->scope()->start_position());
+ Operator* op =
+ javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 5);
+ Node* pair = NewNode(op, callee, source, receiver, strict, position);
+ Node* new_callee = NewNode(common()->Projection(0), pair);
+ Node* new_receiver = NewNode(common()->Projection(1), pair);
+
+ // Patch callee and receiver on the environment.
+ environment()->Poke(arg_count + 1, new_callee);
+ environment()->Poke(arg_count + 0, new_receiver);
+ }
+
+ // Create node to perform the function call.
+ Operator* call = javascript()->Call(args->length() + 2, flags);
+ Node* value = ProcessArguments(call, args->length() + 2);
+ ast_context()->ProduceValueWithLazyBailout(value);
+}
+
+
+void AstGraphBuilder::VisitCallNew(CallNew* expr) {
+ VisitForValue(expr->expression());
+
+ // Evaluate all arguments to the construct call.
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForValues(args);
+
+ // Create node to perform the construct call.
+ Operator* call = javascript()->CallNew(args->length() + 1);
+ Node* value = ProcessArguments(call, args->length() + 1);
+ ast_context()->ProduceValueWithLazyBailout(value);
+}
+
+
+void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+
+ // The callee and the receiver both have to be pushed onto the operand stack
+ // before arguments are being evaluated.
+ CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+ Node* receiver_value = BuildLoadBuiltinsObject();
+ PrintableUnique<String> unique = MakeUnique(name);
+ Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
+ environment()->Push(callee_value);
+ // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
+ // refuses to optimize functions with jsruntime calls).
+ BuildLazyBailout(callee_value, BailoutId::None());
+ environment()->Push(receiver_value);
+
+ // Evaluate all arguments to the JS runtime call.
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForValues(args);
+
+ // Create node to perform the JS runtime call.
+ Operator* call = javascript()->Call(args->length() + 2, flags);
+ Node* value = ProcessArguments(call, args->length() + 2);
+ ast_context()->ProduceValueWithLazyBailout(value);
+}
+
+
+void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+ const Runtime::Function* function = expr->function();
+
+ // Handle calls to runtime functions implemented in JavaScript separately as
+ // the call follows JavaScript ABI and the callee is statically unknown.
+ if (expr->is_jsruntime()) {
+ DCHECK(function == NULL && expr->name()->length() > 0);
+ return VisitCallJSRuntime(expr);
+ }
+
+ // Evaluate all arguments to the runtime call.
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForValues(args);
+
+ // Create node to perform the runtime call.
+ Runtime::FunctionId functionId = function->function_id;
+ Operator* call = javascript()->Runtime(functionId, args->length());
+ Node* value = ProcessArguments(call, args->length());
+ ast_context()->ProduceValueWithLazyBailout(value);
+}
+
+
+void AstGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE:
+ return VisitDelete(expr);
+ case Token::VOID:
+ return VisitVoid(expr);
+ case Token::TYPEOF:
+ return VisitTypeof(expr);
+ case Token::NOT:
+ return VisitNot(expr);
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
+ DCHECK(expr->expression()->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->expression()->AsProperty();
+ LhsKind assign_type = DetermineLhsKind(expr->expression());
+
+ // Reserve space for result of postfix operation.
+ bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
+ if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+
+ // Evaluate LHS expression and get old value.
+ Node* old_value = NULL;
+ int stack_depth = -1;
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ old_value = BuildVariableLoad(variable, expr->expression()->id());
+ stack_depth = 0;
+ break;
+ }
+ case NAMED_PROPERTY: {
+ VisitForValue(property->obj());
+ Node* object = environment()->Top();
+ PrintableUnique<Name> name =
+ MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ old_value = NewNode(javascript()->LoadNamed(name), object);
+ BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ stack_depth = 1;
+ break;
+ }
+ case KEYED_PROPERTY: {
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ Node* key = environment()->Top();
+ Node* object = environment()->Peek(1);
+ old_value = NewNode(javascript()->LoadProperty(), object, key);
+ BuildLazyBailoutWithPushedNode(old_value, property->LoadId());
+ stack_depth = 2;
+ break;
+ }
+ }
+
+ // Convert old value into a number.
+ old_value = NewNode(javascript()->ToNumber(), old_value);
+
+ // Save result for postfix expressions at correct stack depth.
+ if (is_postfix) environment()->Poke(stack_depth, old_value);
+
+ // Create node to perform +1/-1 operation.
+ Node* value =
+ BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+ // TODO(jarin) Insert proper bailout id here (will need to change
+ // full code generator).
+ BuildLazyBailout(value, BailoutId::None());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ BuildVariableAssignment(variable, value, expr->op(),
+ expr->AssignmentId());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ Node* object = environment()->Pop();
+ PrintableUnique<Name> name =
+ MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Node* store = NewNode(javascript()->StoreNamed(name), object, value);
+ BuildLazyBailout(store, expr->AssignmentId());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ Node* store = NewNode(javascript()->StoreProperty(), object, key, value);
+ BuildLazyBailout(store, expr->AssignmentId());
+ break;
+ }
+ }
+
+ // Restore old value for postfix expressions.
+ if (is_postfix) value = environment()->Pop();
+
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::COMMA:
+ return VisitComma(expr);
+ case Token::OR:
+ case Token::AND:
+ return VisitLogicalExpression(expr);
+ default: {
+ VisitForValue(expr->left());
+ VisitForValue(expr->right());
+ Node* right = environment()->Pop();
+ Node* left = environment()->Pop();
+ Node* value = BuildBinaryOp(left, right, expr->op());
+ ast_context()->ProduceValueWithLazyBailout(value);
+ }
+ }
+}
+
+
+void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+ Operator* op;
+ switch (expr->op()) {
+ case Token::EQ:
+ op = javascript()->Equal();
+ break;
+ case Token::NE:
+ op = javascript()->NotEqual();
+ break;
+ case Token::EQ_STRICT:
+ op = javascript()->StrictEqual();
+ break;
+ case Token::NE_STRICT:
+ op = javascript()->StrictNotEqual();
+ break;
+ case Token::LT:
+ op = javascript()->LessThan();
+ break;
+ case Token::GT:
+ op = javascript()->GreaterThan();
+ break;
+ case Token::LTE:
+ op = javascript()->LessThanOrEqual();
+ break;
+ case Token::GTE:
+ op = javascript()->GreaterThanOrEqual();
+ break;
+ case Token::INSTANCEOF:
+ op = javascript()->InstanceOf();
+ break;
+ case Token::IN:
+ op = javascript()->HasProperty();
+ break;
+ default:
+ op = NULL;
+ UNREACHABLE();
+ }
+ VisitForValue(expr->left());
+ VisitForValue(expr->right());
+ Node* right = environment()->Pop();
+ Node* left = environment()->Pop();
+ Node* value = NewNode(op, left, right);
+ ast_context()->ProduceValue(value);
+
+ BuildLazyBailout(value, expr->id());
+}
+
+
+void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+ Node* value = GetFunctionClosure();
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+ DCHECK(globals()->is_empty());
+ AstVisitor::VisitDeclarations(declarations);
+ if (globals()->is_empty()) return;
+ Handle<FixedArray> data =
+ isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
+ for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
+ int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+ DeclareGlobalsStrictMode::encode(info()->strict_mode());
+ Node* flags = jsgraph()->Constant(encoded_flags);
+ Node* pairs = jsgraph()->Constant(data);
+ Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
+ NewNode(op, current_context(), pairs, flags);
+ globals()->Rewind(0);
+}
+
+
+void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
+ if (stmt == NULL) return;
+ Visit(stmt);
+}
+
+
+void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
+ LoopBuilder* loop, int drop_extra) {
+ BreakableScope scope(this, stmt, loop, drop_extra);
+ Visit(stmt->body());
+}
+
+
+void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
+ Node* value;
+ if (expr->expression()->IsVariableProxy()) {
+ // Delete of an unqualified identifier is only allowed in classic mode but
+ // deleting "this" is allowed in all language modes.
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ DCHECK(strict_mode() == SLOPPY || variable->is_this());
+ value = BuildVariableDelete(variable);
+ } else if (expr->expression()->IsProperty()) {
+ Property* property = expr->expression()->AsProperty();
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+ } else {
+ VisitForEffect(expr->expression());
+ value = jsgraph()->TrueConstant();
+ }
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
+ VisitForEffect(expr->expression());
+ Node* value = jsgraph()->UndefinedConstant();
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+ Node* operand;
+ if (expr->expression()->IsVariableProxy()) {
+ // Typeof does not throw a reference error on global variables, hence we
+ // perform a non-contextual load in case the operand is a variable proxy.
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ operand =
+ BuildVariableLoad(variable, expr->expression()->id(), NOT_CONTEXTUAL);
+ } else {
+ VisitForValue(expr->expression());
+ operand = environment()->Pop();
+ }
+ Node* value = NewNode(javascript()->TypeOf(), operand);
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
+ VisitForValue(expr->expression());
+ Node* operand = environment()->Pop();
+ // TODO(mstarzinger): Possible optimization when we are in effect context.
+ Node* value = NewNode(javascript()->UnaryNot(), operand);
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitComma(BinaryOperation* expr) {
+ VisitForEffect(expr->left());
+ Visit(expr->right());
+ ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+ bool is_logical_and = expr->op() == Token::AND;
+ IfBuilder compare_if(this);
+ VisitForValue(expr->left());
+ Node* condition = environment()->Top();
+ compare_if.If(BuildToBoolean(condition));
+ compare_if.Then();
+ if (is_logical_and) {
+ environment()->Pop();
+ Visit(expr->right());
+ } else if (ast_context()->IsEffect()) {
+ environment()->Pop();
+ }
+ compare_if.Else();
+ if (!is_logical_and) {
+ environment()->Pop();
+ Visit(expr->right());
+ } else if (ast_context()->IsEffect()) {
+ environment()->Pop();
+ }
+ compare_if.End();
+ ast_context()->ReplaceValue();
+}
+
+
+Node* AstGraphBuilder::ProcessArguments(Operator* op, int arity) {
+ DCHECK(environment()->stack_height() >= arity);
+ Node** all = info()->zone()->NewArray<Node*>(arity); // XXX: alloca?
+ for (int i = arity - 1; i >= 0; --i) {
+ all[i] = environment()->Pop();
+ }
+ Node* value = NewNode(op, arity, all);
+ return value;
+}
+
+
+Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots <= 0) return context;
+ set_current_context(context);
+
+ // Allocate a new local context.
+ Operator* op = javascript()->CreateFunctionContext();
+ Node* local_context = NewNode(op, closure);
+ set_current_context(local_context);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = info()->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* variable = info()->scope()->parameter(i);
+ if (!variable->IsContextSlot()) continue;
+ // Temporary parameter node. The parameter indices are shifted by 1
+ // (receiver is parameter index -1 but environment index 0).
+ Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start());
+ // Context variable (at bottom of the context chain).
+ DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
+ Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, local_context, parameter);
+ }
+
+ return local_context;
+}
+
+
+Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
+ if (arguments == NULL) return NULL;
+
+ // Allocate and initialize a new arguments object.
+ Node* callee = GetFunctionClosure();
+ Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
+ Node* object = NewNode(op, callee);
+
+ // Assign the object to the arguments variable.
+ DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
+ // This should never lazy deopt, so it is fine to send invalid bailout id.
+ BuildVariableAssignment(arguments, object, Token::ASSIGN, BailoutId::None());
+
+ return object;
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
+ Node* not_hole) {
+ IfBuilder hole_check(this);
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ hole_check.If(check);
+ hole_check.Then();
+ environment()->Push(for_hole);
+ hole_check.Else();
+ environment()->Push(not_hole);
+ hole_check.End();
+ return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
+ Node* not_hole) {
+ IfBuilder hole_check(this);
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ hole_check.If(check);
+ hole_check.Then();
+ environment()->Push(BuildThrowReferenceError(variable));
+ hole_check.Else();
+ environment()->Push(not_hole);
+ hole_check.End();
+ return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
+ BailoutId bailout_id,
+ ContextualMode contextual_mode) {
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ VariableMode mode = variable->mode();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ // Global var, const, or let variable.
+ Node* global = BuildLoadGlobalObject();
+ PrintableUnique<Name> name = MakeUnique(variable->name());
+ Operator* op = javascript()->LoadNamed(name, contextual_mode);
+ Node* node = NewNode(op, global);
+ BuildLazyBailoutWithPushedNode(node, bailout_id);
+ return node;
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ // Local var, const, or let variable.
+ Node* value = environment()->Lookup(variable);
+ if (mode == CONST_LEGACY) {
+ // Perform check for uninitialized legacy const variables.
+ if (value->op() == the_hole->op()) {
+ value = jsgraph()->UndefinedConstant();
+ } else if (value->opcode() == IrOpcode::kPhi) {
+ Node* undefined = jsgraph()->UndefinedConstant();
+ value = BuildHoleCheckSilent(value, undefined, value);
+ }
+ } else if (mode == LET || mode == CONST) {
+ // Perform check for uninitialized let/const variables.
+ if (value->op() == the_hole->op()) {
+ value = BuildThrowReferenceError(variable);
+ } else if (value->opcode() == IrOpcode::kPhi) {
+ value = BuildHoleCheckThrow(value, variable, value);
+ }
+ }
+ return value;
+ }
+ case Variable::CONTEXT: {
+ // Context variable (potentially up the context chain).
+ int depth = current_scope()->ContextChainLength(variable->scope());
+ bool immutable = variable->maybe_assigned() == kNotAssigned;
+ Operator* op =
+ javascript()->LoadContext(depth, variable->index(), immutable);
+ Node* value = NewNode(op, current_context());
+ // TODO(titzer): initialization checks are redundant for already
+ // initialized immutable context loads, but only specialization knows.
+ // Maybe specializer should be a parameter to the graph builder?
+ if (mode == CONST_LEGACY) {
+ // Perform check for uninitialized legacy const variables.
+ Node* undefined = jsgraph()->UndefinedConstant();
+ value = BuildHoleCheckSilent(value, undefined, value);
+ } else if (mode == LET || mode == CONST) {
+ // Perform check for uninitialized let/const variables.
+ value = BuildHoleCheckThrow(value, variable, value);
+ }
+ return value;
+ }
+ case Variable::LOOKUP: {
+ // Dynamic lookup of context variable (anywhere in the chain).
+ Node* name = jsgraph()->Constant(variable->name());
+ Runtime::FunctionId function_id =
+ (contextual_mode == CONTEXTUAL)
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ Operator* op = javascript()->Runtime(function_id, 2);
+ Node* pair = NewNode(op, current_context(), name);
+ return NewNode(common()->Projection(0), pair);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ // Global var, const, or let variable.
+ Node* global = BuildLoadGlobalObject();
+ Node* name = jsgraph()->Constant(variable->name());
+ Operator* op = javascript()->DeleteProperty(strict_mode());
+ return NewNode(op, global, name);
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT:
+ // Local var, const, or let variable or context variable.
+ return variable->is_this() ? jsgraph()->TrueConstant()
+ : jsgraph()->FalseConstant();
+ case Variable::LOOKUP: {
+ // Dynamic lookup of context variable (anywhere in the chain).
+ Node* name = jsgraph()->Constant(variable->name());
+ Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
+ return NewNode(op, current_context(), name);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
+ Token::Value op,
+ BailoutId bailout_id) {
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ VariableMode mode = variable->mode();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ // Global var, const, or let variable.
+ Node* global = BuildLoadGlobalObject();
+ PrintableUnique<Name> name = MakeUnique(variable->name());
+ Operator* op = javascript()->StoreNamed(name);
+ Node* store = NewNode(op, global, value);
+ BuildLazyBailout(store, bailout_id);
+ return store;
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ // Local var, const, or let variable.
+ if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ // Perform an initialization check for legacy const variables.
+ Node* current = environment()->Lookup(variable);
+ if (current->op() != the_hole->op()) {
+ value = BuildHoleCheckSilent(current, value, current);
+ }
+ } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+ // Non-initializing assignments to legacy const is ignored.
+ return value;
+ } else if (mode == LET && op != Token::INIT_LET) {
+ // Perform an initialization check for let declared variables.
+ // Also note that the dynamic hole-check is only done to ensure that
+ // this does not break in the presence of do-expressions within the
+ // temporal dead zone of a let declared variable.
+ Node* current = environment()->Lookup(variable);
+ if (current->op() == the_hole->op()) {
+ value = BuildThrowReferenceError(variable);
+ } else if (value->opcode() == IrOpcode::kPhi) {
+ value = BuildHoleCheckThrow(current, variable, value);
+ }
+ } else if (mode == CONST && op != Token::INIT_CONST) {
+ // All assignments to const variables are early errors.
+ UNREACHABLE();
+ }
+ environment()->Bind(variable, value);
+ return value;
+ case Variable::CONTEXT: {
+ // Context variable (potentially up the context chain).
+ int depth = current_scope()->ContextChainLength(variable->scope());
+ if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ // Perform an initialization check for legacy const variables.
+ Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ value = BuildHoleCheckSilent(current, value, current);
+ } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+ // Non-initializing assignments to legacy const is ignored.
+ return value;
+ } else if (mode == LET && op != Token::INIT_LET) {
+ // Perform an initialization check for let declared variables.
+ Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ value = BuildHoleCheckThrow(current, variable, value);
+ } else if (mode == CONST && op != Token::INIT_CONST) {
+ // All assignments to const variables are early errors.
+ UNREACHABLE();
+ }
+ Operator* op = javascript()->StoreContext(depth, variable->index());
+ return NewNode(op, current_context(), value);
+ }
+ case Variable::LOOKUP: {
+ // Dynamic lookup of context variable (anywhere in the chain).
+ Node* name = jsgraph()->Constant(variable->name());
+ Node* strict = jsgraph()->Constant(strict_mode());
+ // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
+ // initializations of const declarations.
+ Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
+ return NewNode(op, value, current_context(), name, strict);
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
+ // TODO(mstarzinger): Better load from function context, otherwise optimized
+ // code cannot be shared across native contexts.
+ return jsgraph()->Constant(handle(info()->context()->builtins()));
+}
+
+
+Node* AstGraphBuilder::BuildLoadGlobalObject() {
+#if 0
+ Node* context = GetFunctionContext();
+ // TODO(mstarzinger): Use mid-level operator on FixedArray instead of the
+ // JS-level operator that targets JSObject.
+ Node* index = jsgraph()->Constant(Context::GLOBAL_OBJECT_INDEX);
+ return NewNode(javascript()->LoadProperty(), context, index);
+#else
+ // TODO(mstarzinger): Better load from function context, otherwise optimized
+ // code cannot be shared across native contexts. See unused code above.
+ return jsgraph()->Constant(handle(info()->context()->global_object()));
+#endif
+}
+
+
+Node* AstGraphBuilder::BuildToBoolean(Node* value) {
+ // TODO(mstarzinger): Possible optimization is to NOP for boolean values.
+ return NewNode(javascript()->ToBoolean(), value);
+}
+
+
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
+ // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
+ Node* variable_name = jsgraph()->Constant(variable->name());
+ Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
+ return NewNode(op, variable_name);
+}
+
+
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+ Operator* js_op;
+ switch (op) {
+ case Token::BIT_OR:
+ js_op = javascript()->BitwiseOr();
+ break;
+ case Token::BIT_AND:
+ js_op = javascript()->BitwiseAnd();
+ break;
+ case Token::BIT_XOR:
+ js_op = javascript()->BitwiseXor();
+ break;
+ case Token::SHL:
+ js_op = javascript()->ShiftLeft();
+ break;
+ case Token::SAR:
+ js_op = javascript()->ShiftRight();
+ break;
+ case Token::SHR:
+ js_op = javascript()->ShiftRightLogical();
+ break;
+ case Token::ADD:
+ js_op = javascript()->Add();
+ break;
+ case Token::SUB:
+ js_op = javascript()->Subtract();
+ break;
+ case Token::MUL:
+ js_op = javascript()->Multiply();
+ break;
+ case Token::DIV:
+ js_op = javascript()->Divide();
+ break;
+ case Token::MOD:
+ js_op = javascript()->Modulus();
+ break;
+ default:
+ UNREACHABLE();
+ js_op = NULL;
+ }
+ return NewNode(js_op, left, right);
+}
+
+
+void AstGraphBuilder::BuildLazyBailout(Node* node, BailoutId ast_id) {
+ if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
+ // The deopting node should have an outgoing control dependency.
+ DCHECK(environment()->GetControlDependency() == node);
+
+ StructuredGraphBuilder::Environment* continuation_env = environment();
+ // Create environment for the deoptimization block, and build the block.
+ StructuredGraphBuilder::Environment* deopt_env =
+ CopyEnvironment(continuation_env);
+ set_environment(deopt_env);
+
+ NewNode(common()->LazyDeoptimization());
+
+ // TODO(jarin) If ast_id.IsNone(), perhaps we should generate an empty
+ // deopt block and make sure there is no patch entry for this (so
+ // that the deoptimizer dies when trying to deoptimize here).
+
+ Node* state_node = environment()->Checkpoint(ast_id);
+
+ Node* deoptimize_node = NewNode(common()->Deoptimize(), state_node);
+
+ UpdateControlDependencyToLeaveFunction(deoptimize_node);
+
+ // Continue with the original environment.
+ set_environment(continuation_env);
+
+ NewNode(common()->Continuation());
+ }
+}
+
+
+void AstGraphBuilder::BuildLazyBailoutWithPushedNode(Node* node,
+ BailoutId ast_id) {
+ environment()->Push(node);
+ BuildLazyBailout(node, ast_id);
+ environment()->Pop();
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
new file mode 100644
index 000000000..861bd5baa
--- /dev/null
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -0,0 +1,428 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
+#define V8_COMPILER_AST_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlBuilder;
+class LoopBuilder;
+class Graph;
+
+// The AstGraphBuilder produces a high-level IR graph, based on an
+// underlying AST. The produced graph can either be compiled into a
+// stand-alone function or be wired into another graph for the purposes
+// of function inlining.
+class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+ public:
+ AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph);
+
+ // Creates a graph by visiting the entire AST.
+ bool CreateGraph();
+
+ protected:
+ class AstContext;
+ class AstEffectContext;
+ class AstValueContext;
+ class AstTestContext;
+ class BreakableScope;
+ class ContextScope;
+ class Environment;
+
+ Environment* environment() {
+ return reinterpret_cast<Environment*>(
+ StructuredGraphBuilder::environment());
+ }
+
+ AstContext* ast_context() const { return ast_context_; }
+ BreakableScope* breakable() const { return breakable_; }
+ ContextScope* execution_context() const { return execution_context_; }
+
+ void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
+ void set_breakable(BreakableScope* brk) { breakable_ = brk; }
+ void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
+
+ // Support for control flow builders. The concrete type of the environment
+ // depends on the graph builder, but environments themselves are not virtual.
+ typedef StructuredGraphBuilder::Environment BaseEnvironment;
+ virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
+
+ // TODO(mstarzinger): The pipeline only needs to be a friend to access the
+ // function context. Remove as soon as the context is a parameter.
+ friend class Pipeline;
+
+ // Getters for values in the activation record.
+ Node* GetFunctionClosure();
+ Node* GetFunctionContext();
+
+ //
+ // The following build methods all generate graph fragments and return one
+ // resulting node. The operand stack height remains the same, variables and
+ // other dependencies tracked by the environment might be mutated though.
+ //
+
+ // Builder to create a local function context.
+ Node* BuildLocalFunctionContext(Node* context, Node* closure);
+
+ // Builder to create an arguments object if it is used.
+ Node* BuildArgumentsObject(Variable* arguments);
+
+ // Builders for variable load and assignment.
+ Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
+ BailoutId bailout_id);
+ Node* BuildVariableDelete(Variable* var);
+ Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
+ ContextualMode mode = CONTEXTUAL);
+
+ // Builders for accessing the function context.
+ Node* BuildLoadBuiltinsObject();
+ Node* BuildLoadGlobalObject();
+ Node* BuildLoadClosure();
+
+ // Builders for automatic type conversion.
+ Node* BuildToBoolean(Node* value);
+
+ // Builders for error reporting at runtime.
+ Node* BuildThrowReferenceError(Variable* var);
+
+ // Builders for dynamic hole-checks at runtime.
+ Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
+ Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
+
+ // Builders for binary operations.
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ // Visiting functions for AST nodes make this an AstVisitor.
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Visiting function for declarations list is overridden.
+ virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
+ private:
+ CompilationInfo* info_;
+ AstContext* ast_context_;
+ JSGraph* jsgraph_;
+
+ // List of global declarations for functions and variables.
+ ZoneList<Handle<Object> > globals_;
+
+ // Stack of breakable statements entered by the visitor.
+ BreakableScope* breakable_;
+
+ // Stack of context objects pushed onto the chain by the visitor.
+ ContextScope* execution_context_;
+
+ // Nodes representing values in the activation record.
+ SetOncePointer<Node> function_closure_;
+ SetOncePointer<Node> function_context_;
+
+ CompilationInfo* info() { return info_; }
+ StrictMode strict_mode() { return info()->strict_mode(); }
+ JSGraph* jsgraph() { return jsgraph_; }
+ JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+ ZoneList<Handle<Object> >* globals() { return &globals_; }
+
+ // Current scope during visitation.
+ inline Scope* current_scope() const;
+
+ // Process arguments to a call by popping {arity} elements off the operand
+ // stack and build a call node using the given call operator.
+ Node* ProcessArguments(Operator* op, int arity);
+
+ // Visit statements.
+ void VisitIfNotNull(Statement* stmt);
+
+ // Visit expressions.
+ void VisitForTest(Expression* expr);
+ void VisitForEffect(Expression* expr);
+ void VisitForValue(Expression* expr);
+ void VisitForValueOrNull(Expression* expr);
+ void VisitForValues(ZoneList<Expression*>* exprs);
+
+ // Common for all IterationStatement bodies.
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
+
+ // Dispatched from VisitCallRuntime.
+ void VisitCallJSRuntime(CallRuntime* expr);
+
+ // Dispatched from VisitUnaryOperation.
+ void VisitDelete(UnaryOperation* expr);
+ void VisitVoid(UnaryOperation* expr);
+ void VisitTypeof(UnaryOperation* expr);
+ void VisitNot(UnaryOperation* expr);
+
+ // Dispatched from VisitBinaryOperation.
+ void VisitComma(BinaryOperation* expr);
+ void VisitLogicalExpression(BinaryOperation* expr);
+ void VisitArithmeticExpression(BinaryOperation* expr);
+
+ // Dispatched from VisitForInStatement.
+ void VisitForInAssignment(Expression* expr, Node* value);
+
+ void BuildLazyBailout(Node* node, BailoutId ast_id);
+ void BuildLazyBailoutWithPushedNode(Node* node, BailoutId ast_id);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+ DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
+};
+
+
+// The abstract execution environment for generated code consists of
+// parameter variables, local variables and the operand stack. The
+// environment will perform proper SSA-renaming of all tracked nodes
+// at split and merge points in the control flow. Internally all the
+// values are stored in one list using the following layout:
+//
+// [parameters (+receiver)] [locals] [operand stack]
+//
+class AstGraphBuilder::Environment
+ : public StructuredGraphBuilder::Environment {
+ public:
+ Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
+ Environment(const Environment& copy);
+
+ int parameters_count() const { return parameters_count_; }
+ int locals_count() const { return locals_count_; }
+ int stack_height() {
+ return static_cast<int>(values()->size()) - parameters_count_ -
+ locals_count_;
+ }
+
+ // Operations on parameter or local variables. The parameter indices are
+ // shifted by 1 (receiver is parameter index -1 but environment index 0).
+ void Bind(Variable* variable, Node* node) {
+ DCHECK(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ values()->at(variable->index() + 1) = node;
+ parameters_dirty_ = true;
+ } else {
+ DCHECK(variable->IsStackLocal());
+ values()->at(variable->index() + parameters_count_) = node;
+ locals_dirty_ = true;
+ }
+ }
+ Node* Lookup(Variable* variable) {
+ DCHECK(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ return values()->at(variable->index() + 1);
+ } else {
+ DCHECK(variable->IsStackLocal());
+ return values()->at(variable->index() + parameters_count_);
+ }
+ }
+
+ // Operations on the operand stack.
+ void Push(Node* node) {
+ values()->push_back(node);
+ stack_dirty_ = true;
+ }
+ Node* Top() {
+ DCHECK(stack_height() > 0);
+ return values()->back();
+ }
+ Node* Pop() {
+ DCHECK(stack_height() > 0);
+ Node* back = values()->back();
+ values()->pop_back();
+ stack_dirty_ = true;
+ return back;
+ }
+
+ // Direct mutations of the operand stack.
+ void Poke(int depth, Node* node) {
+ DCHECK(depth >= 0 && depth < stack_height());
+ int index = static_cast<int>(values()->size()) - depth - 1;
+ values()->at(index) = node;
+ stack_dirty_ = true;
+ }
+ Node* Peek(int depth) {
+ DCHECK(depth >= 0 && depth < stack_height());
+ int index = static_cast<int>(values()->size()) - depth - 1;
+ return values()->at(index);
+ }
+ void Drop(int depth) {
+ DCHECK(depth >= 0 && depth <= stack_height());
+ values()->erase(values()->end() - depth, values()->end());
+ stack_dirty_ = true;
+ }
+
+ // Preserve a checkpoint of the environment for the IR graph. Any
+ // further mutation of the environment will not affect checkpoints.
+ Node* Checkpoint(BailoutId ast_id);
+
+ private:
+ int parameters_count_;
+ int locals_count_;
+ Node* parameters_node_;
+ Node* locals_node_;
+ Node* stack_node_;
+ bool parameters_dirty_;
+ bool locals_dirty_;
+ bool stack_dirty_;
+};
+
+
+// Each expression in the AST is evaluated in a specific context. This context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+ bool IsEffect() const { return kind_ == Expression::kEffect; }
+ bool IsValue() const { return kind_ == Expression::kValue; }
+ bool IsTest() const { return kind_ == Expression::kTest; }
+
+ // Plug a node into this expression context. Call this function in tail
+ // position in the Visit functions for expressions.
+ virtual void ProduceValue(Node* value) = 0;
+ virtual void ProduceValueWithLazyBailout(Node* value) = 0;
+
+ // Unplugs a node from this expression context. Call this to retrieve the
+ // result of another Visit function that already plugged the context.
+ virtual Node* ConsumeValue() = 0;
+
+ // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+ void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+ AstContext(AstGraphBuilder* owner, Expression::Context kind,
+ BailoutId bailout_id);
+ virtual ~AstContext();
+
+ AstGraphBuilder* owner() const { return owner_; }
+ Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+ int original_height_;
+#endif
+
+ BailoutId bailout_id_;
+
+ private:
+ Expression::Context kind_;
+ AstGraphBuilder* owner_;
+ AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
+ public:
+ explicit AstEffectContext(AstGraphBuilder* owner, BailoutId bailout_id)
+ : AstContext(owner, Expression::kEffect, bailout_id) {}
+ virtual ~AstEffectContext();
+ virtual void ProduceValue(Node* value) V8_OVERRIDE;
+ virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
+ virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
+ public:
+ explicit AstValueContext(AstGraphBuilder* owner, BailoutId bailout_id)
+ : AstContext(owner, Expression::kValue, bailout_id) {}
+ virtual ~AstValueContext();
+ virtual void ProduceValue(Node* value) V8_OVERRIDE;
+ virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
+ virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
+ public:
+ explicit AstTestContext(AstGraphBuilder* owner, BailoutId bailout_id)
+ : AstContext(owner, Expression::kTest, bailout_id) {}
+ virtual ~AstTestContext();
+ virtual void ProduceValue(Node* value) V8_OVERRIDE;
+ virtual void ProduceValueWithLazyBailout(Node* value) V8_OVERRIDE;
+ virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Scoped class tracking breakable statements entered by the visitor. Allows to
+// properly 'break' and 'continue' iteration statements as well as to 'break'
+// from blocks within switch statements.
+class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
+ public:
+ BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
+ ControlBuilder* control, int drop_extra)
+ : owner_(owner),
+ target_(target),
+ next_(owner->breakable()),
+ control_(control),
+ drop_extra_(drop_extra) {
+ owner_->set_breakable(this); // Push.
+ }
+
+ ~BreakableScope() {
+ owner_->set_breakable(next_); // Pop.
+ }
+
+ // Either 'break' or 'continue' the target statement.
+ void BreakTarget(BreakableStatement* target);
+ void ContinueTarget(BreakableStatement* target);
+
+ private:
+ AstGraphBuilder* owner_;
+ BreakableStatement* target_;
+ BreakableScope* next_;
+ ControlBuilder* control_;
+ int drop_extra_;
+
+ // Find the correct scope for the target statement. Note that this also drops
+ // extra operands from the environment for each scope skipped along the way.
+ BreakableScope* FindBreakable(BreakableStatement* target);
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+ ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
+ : owner_(owner),
+ next_(owner->execution_context()),
+ outer_(owner->current_context()),
+ scope_(scope) {
+ owner_->set_execution_context(this); // Push.
+ owner_->set_current_context(context);
+ }
+
+ ~ContextScope() {
+ owner_->set_execution_context(next_); // Pop.
+ owner_->set_current_context(outer_);
+ }
+
+ // Current scope during visitation.
+ Scope* scope() const { return scope_; }
+
+ private:
+ AstGraphBuilder* owner_;
+ ContextScope* next_;
+ Node* outer_;
+ Scope* scope_;
+};
+
+Scope* AstGraphBuilder::current_scope() const {
+ return execution_context_->scope();
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
new file mode 100644
index 000000000..3f8e45b9e
--- /dev/null
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -0,0 +1,260 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/change-lowering.h"
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ChangeLoweringBase::ChangeLoweringBase(Graph* graph, Linkage* linkage,
+ CommonNodeCache* cache)
+ : graph_(graph),
+ isolate_(graph->zone()->isolate()),
+ linkage_(linkage),
+ cache_(cache),
+ common_(graph->zone()),
+ machine_(graph->zone()) {}
+
+
+ChangeLoweringBase::~ChangeLoweringBase() {}
+
+
+Node* ChangeLoweringBase::ExternalConstant(ExternalReference reference) {
+ Node** loc = cache()->FindExternalConstant(reference);
+ if (*loc == NULL) {
+ *loc = graph()->NewNode(common()->ExternalConstant(reference));
+ }
+ return *loc;
+}
+
+
+Node* ChangeLoweringBase::HeapConstant(PrintableUnique<HeapObject> value) {
+ // TODO(bmeurer): Use common node cache.
+ return graph()->NewNode(common()->HeapConstant(value));
+}
+
+
+Node* ChangeLoweringBase::ImmovableHeapConstant(Handle<HeapObject> value) {
+ return HeapConstant(
+ PrintableUnique<HeapObject>::CreateImmovable(graph()->zone(), value));
+}
+
+
+Node* ChangeLoweringBase::Int32Constant(int32_t value) {
+ Node** loc = cache()->FindInt32Constant(value);
+ if (*loc == NULL) {
+ *loc = graph()->NewNode(common()->Int32Constant(value));
+ }
+ return *loc;
+}
+
+
+Node* ChangeLoweringBase::NumberConstant(double value) {
+ Node** loc = cache()->FindNumberConstant(value);
+ if (*loc == NULL) {
+ *loc = graph()->NewNode(common()->NumberConstant(value));
+ }
+ return *loc;
+}
+
+
+Node* ChangeLoweringBase::CEntryStubConstant() {
+ if (!c_entry_stub_constant_.is_set()) {
+ c_entry_stub_constant_.set(
+ ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+ }
+ return c_entry_stub_constant_.get();
+}
+
+
+Node* ChangeLoweringBase::TrueConstant() {
+ if (!true_constant_.is_set()) {
+ true_constant_.set(
+ ImmovableHeapConstant(isolate()->factory()->true_value()));
+ }
+ return true_constant_.get();
+}
+
+
+Node* ChangeLoweringBase::FalseConstant() {
+ if (!false_constant_.is_set()) {
+ false_constant_.set(
+ ImmovableHeapConstant(isolate()->factory()->false_value()));
+ }
+ return false_constant_.get();
+}
+
+
+Reduction ChangeLoweringBase::ChangeBitToBool(Node* val, Node* control) {
+ Node* branch = graph()->NewNode(common()->Branch(), val, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* true_value = TrueConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* false_value = FalseConstant();
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi =
+ graph()->NewNode(common()->Phi(2), true_value, false_value, merge);
+
+ return Replace(phi);
+}
+
+
+template <size_t kPointerSize>
+ChangeLowering<kPointerSize>::ChangeLowering(Graph* graph, Linkage* linkage)
+ : ChangeLoweringBase(graph, linkage,
+ new (graph->zone()) CommonNodeCache(graph->zone())) {}
+
+
+template <size_t kPointerSize>
+Reduction ChangeLowering<kPointerSize>::Reduce(Node* node) {
+ Node* control = graph()->start();
+ Node* effect = control;
+ switch (node->opcode()) {
+ case IrOpcode::kChangeBitToBool:
+ return ChangeBitToBool(node->InputAt(0), control);
+ case IrOpcode::kChangeBoolToBit:
+ return ChangeBoolToBit(node->InputAt(0));
+ case IrOpcode::kChangeInt32ToTagged:
+ return ChangeInt32ToTagged(node->InputAt(0), effect, control);
+ case IrOpcode::kChangeTaggedToFloat64:
+ return ChangeTaggedToFloat64(node->InputAt(0), effect, control);
+ default:
+ return NoChange();
+ }
+ UNREACHABLE();
+ return NoChange();
+}
+
+
+template <>
+Reduction ChangeLowering<4>::ChangeBoolToBit(Node* val) {
+ return Replace(
+ graph()->NewNode(machine()->Word32Equal(), val, TrueConstant()));
+}
+
+
+template <>
+Reduction ChangeLowering<8>::ChangeBoolToBit(Node* val) {
+ return Replace(
+ graph()->NewNode(machine()->Word64Equal(), val, TrueConstant()));
+}
+
+
+template <>
+Reduction ChangeLowering<4>::ChangeInt32ToTagged(Node* val, Node* effect,
+ Node* control) {
+ Node* context = NumberConstant(0);
+
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
+ Node* ovf = graph()->NewNode(common()->Projection(1), add);
+
+ Node* branch = graph()->NewNode(common()->Branch(), ovf, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(), val);
+
+ // TODO(bmeurer): Inline allocation if possible.
+ const Runtime::Function* fn =
+ Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
+ DCHECK_EQ(0, fn->nargs);
+ CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
+ fn->function_id, 0, Operator::kNoProperties);
+ Node* heap_number =
+ graph()->NewNode(common()->Call(desc), CEntryStubConstant(),
+ ExternalConstant(ExternalReference(fn, isolate())),
+ Int32Constant(0), context, effect, if_true);
+
+ Node* store = graph()->NewNode(
+ machine()->Store(kMachineFloat64, kNoWriteBarrier), heap_number,
+ Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), number, effect,
+ heap_number);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* smi = graph()->NewNode(common()->Projection(0), add);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), store, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(2), heap_number, smi, merge);
+
+ return Replace(phi);
+}
+
+
+template <>
+Reduction ChangeLowering<8>::ChangeInt32ToTagged(Node* val, Node* effect,
+ Node* control) {
+ return Replace(graph()->NewNode(
+ machine()->Word64Shl(), val,
+ Int32Constant(SmiTagging<8>::kSmiShiftSize + kSmiTagSize)));
+}
+
+
+template <>
+Reduction ChangeLowering<4>::ChangeTaggedToFloat64(Node* val, Node* effect,
+ Node* control) {
+ Node* branch = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32And(), val, Int32Constant(kSmiTagMask)),
+ control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* load = graph()->NewNode(
+ machine()->Load(kMachineFloat64), val,
+ Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* number = graph()->NewNode(
+ machine()->ChangeInt32ToFloat64(),
+ graph()->NewNode(
+ machine()->Word32Sar(), val,
+ Int32Constant(SmiTagging<4>::kSmiShiftSize + kSmiTagSize)));
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(2), load, number, merge);
+
+ return Replace(phi);
+}
+
+
+template <>
+Reduction ChangeLowering<8>::ChangeTaggedToFloat64(Node* val, Node* effect,
+ Node* control) {
+ Node* branch = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word64And(), val, Int32Constant(kSmiTagMask)),
+ control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* load = graph()->NewNode(
+ machine()->Load(kMachineFloat64), val,
+ Int32Constant(HeapNumber::kValueOffset - kHeapObjectTag), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* number = graph()->NewNode(
+ machine()->ChangeInt32ToFloat64(),
+ graph()->NewNode(
+ machine()->ConvertInt64ToInt32(),
+ graph()->NewNode(
+ machine()->Word64Sar(), val,
+ Int32Constant(SmiTagging<8>::kSmiShiftSize + kSmiTagSize))));
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(2), load, number, merge);
+
+ return Replace(phi);
+}
+
+
+template class ChangeLowering<4>;
+template class ChangeLowering<8>;
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
new file mode 100644
index 000000000..3e16d800d
--- /dev/null
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHANGE_LOWERING_H_
+#define V8_COMPILER_CHANGE_LOWERING_H_
+
+#include "include/v8.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonNodeCache;
+class Linkage;
+
+class ChangeLoweringBase : public Reducer {
+ public:
+ ChangeLoweringBase(Graph* graph, Linkage* linkage, CommonNodeCache* cache);
+ virtual ~ChangeLoweringBase();
+
+ protected:
+ Node* ExternalConstant(ExternalReference reference);
+ Node* HeapConstant(PrintableUnique<HeapObject> value);
+ Node* ImmovableHeapConstant(Handle<HeapObject> value);
+ Node* Int32Constant(int32_t value);
+ Node* NumberConstant(double value);
+ Node* CEntryStubConstant();
+ Node* TrueConstant();
+ Node* FalseConstant();
+
+ Reduction ChangeBitToBool(Node* val, Node* control);
+
+ Graph* graph() const { return graph_; }
+ Isolate* isolate() const { return isolate_; }
+ Linkage* linkage() const { return linkage_; }
+ CommonNodeCache* cache() const { return cache_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ Graph* graph_;
+ Isolate* isolate_;
+ Linkage* linkage_;
+ CommonNodeCache* cache_;
+ CommonOperatorBuilder common_;
+ MachineOperatorBuilder machine_;
+
+ SetOncePointer<Node> c_entry_stub_constant_;
+ SetOncePointer<Node> true_constant_;
+ SetOncePointer<Node> false_constant_;
+};
+
+
+template <size_t kPointerSize = kApiPointerSize>
+class ChangeLowering V8_FINAL : public ChangeLoweringBase {
+ public:
+ ChangeLowering(Graph* graph, Linkage* linkage);
+ ChangeLowering(Graph* graph, Linkage* linkage, CommonNodeCache* cache)
+ : ChangeLoweringBase(graph, linkage, cache) {}
+ virtual ~ChangeLowering() {}
+
+ virtual Reduction Reduce(Node* node) V8_OVERRIDE;
+
+ private:
+ Reduction ChangeBoolToBit(Node* val);
+ Reduction ChangeInt32ToTagged(Node* val, Node* effect, Node* control);
+ Reduction ChangeTaggedToFloat64(Node* val, Node* effect, Node* control);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CHANGE_LOWERING_H_
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
new file mode 100644
index 000000000..a3f7e4c11
--- /dev/null
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Converts InstructionOperands from a given instruction to
+// architecture-specific
+// registers and operands after they have been assigned by the register
+// allocator.
+class InstructionOperandConverter {
+ public:
+ InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : gen_(gen), instr_(instr) {}
+
+ Register InputRegister(int index) {
+ return ToRegister(instr_->InputAt(index));
+ }
+
+ DoubleRegister InputDoubleRegister(int index) {
+ return ToDoubleRegister(instr_->InputAt(index));
+ }
+
+ double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
+
+ int32_t InputInt32(int index) {
+ return ToConstant(instr_->InputAt(index)).ToInt32();
+ }
+
+ int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
+
+ int16_t InputInt16(int index) {
+ return static_cast<int16_t>(InputInt32(index));
+ }
+
+ uint8_t InputInt5(int index) {
+ return static_cast<uint8_t>(InputInt32(index) & 0x1F);
+ }
+
+ uint8_t InputInt6(int index) {
+ return static_cast<uint8_t>(InputInt32(index) & 0x3F);
+ }
+
+ Handle<HeapObject> InputHeapObject(int index) {
+ return ToHeapObject(instr_->InputAt(index));
+ }
+
+ Label* InputLabel(int index) {
+ return gen_->code()->GetLabel(InputBlock(index));
+ }
+
+ BasicBlock* InputBlock(int index) {
+ NodeId block_id = static_cast<NodeId>(InputInt32(index));
+ // operand should be a block id.
+ DCHECK(block_id >= 0);
+ DCHECK(block_id < gen_->schedule()->BasicBlockCount());
+ return gen_->schedule()->GetBlockById(block_id);
+ }
+
+ Register OutputRegister(int index = 0) {
+ return ToRegister(instr_->OutputAt(index));
+ }
+
+ DoubleRegister OutputDoubleRegister() {
+ return ToDoubleRegister(instr_->Output());
+ }
+
+ Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+
+ Register ToRegister(InstructionOperand* op) {
+ DCHECK(op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+ }
+
+ DoubleRegister ToDoubleRegister(InstructionOperand* op) {
+ DCHECK(op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+ }
+
+ Constant ToConstant(InstructionOperand* operand) {
+ if (operand->IsImmediate()) {
+ return gen_->code()->GetImmediate(operand->index());
+ }
+ return gen_->code()->GetConstant(operand->index());
+ }
+
+ double ToDouble(InstructionOperand* operand) {
+ return ToConstant(operand).ToFloat64();
+ }
+
+ Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
+ return ToConstant(operand).ToHeapObject();
+ }
+
+ Frame* frame() const { return gen_->frame(); }
+ Isolate* isolate() const { return gen_->isolate(); }
+ Linkage* linkage() const { return gen_->linkage(); }
+
+ protected:
+ CodeGenerator* gen_;
+ Instruction* instr_;
+};
+
+
+// TODO(dcarney): generify this on bleeding_edge and replace this call
+// when merged.
+static inline void FinishCode(MacroAssembler* masm) {
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+ masm->CheckConstPool(true, false);
+#endif
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
new file mode 100644
index 000000000..878ace3be
--- /dev/null
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -0,0 +1,381 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeGenerator::CodeGenerator(InstructionSequence* code)
+ : code_(code),
+ current_block_(NULL),
+ current_source_position_(SourcePosition::Invalid()),
+ masm_(code->zone()->isolate(), NULL, 0),
+ resolver_(this),
+ safepoints_(code->zone()),
+ lazy_deoptimization_entries_(
+ LazyDeoptimizationEntries::allocator_type(code->zone())),
+ deoptimization_states_(
+ DeoptimizationStates::allocator_type(code->zone())),
+ deoptimization_literals_(Literals::allocator_type(code->zone())),
+ translations_(code->zone()) {
+ deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL);
+}
+
+
+Handle<Code> CodeGenerator::GenerateCode() {
+ CompilationInfo* info = linkage()->info();
+
+ // Emit a code line info recording start event.
+ PositionsRecorder* recorder = masm()->positions_recorder();
+ LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
+
+ // Place function entry hook if requested to do so.
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm());
+ }
+
+ // Architecture-specific, linkage-specific prologue.
+ info->set_prologue_offset(masm()->pc_offset());
+ AssemblePrologue();
+
+ // Assemble all instructions.
+ for (InstructionSequence::const_iterator i = code()->begin();
+ i != code()->end(); ++i) {
+ AssembleInstruction(*i);
+ }
+
+ FinishCode(masm());
+
+ safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+
+ // TODO(titzer): what are the right code flags here?
+ Code::Kind kind = Code::STUB;
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ kind = Code::OPTIMIZED_FUNCTION;
+ }
+ Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+ masm(), Code::ComputeFlags(kind), info);
+ result->set_is_turbofanned(true);
+ result->set_stack_slots(frame()->GetSpillSlotCount());
+ result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+
+ PopulateDeoptimizationData(result);
+
+ // Emit a code line info recording stop event.
+ void* line_info = recorder->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+
+ return result;
+}
+
+
+void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ const ZoneList<InstructionOperand*>* operands =
+ pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ InstructionOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ Register reg = Register::FromAllocationIndex(pointer->index());
+ safepoint.DefinePointerRegister(reg, zone());
+ }
+ }
+}
+
+
+void CodeGenerator::AssembleInstruction(Instruction* instr) {
+ if (instr->IsBlockStart()) {
+ // Bind a label for a block start and handle parallel moves.
+ BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
+ current_block_ = block_start->block();
+ if (FLAG_code_comments) {
+ // TODO(titzer): these code comments are a giant memory leak.
+ Vector<char> buffer = Vector<char>::New(32);
+ SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
+ masm()->RecordComment(buffer.start());
+ }
+ masm()->bind(block_start->label());
+ }
+ if (instr->IsGapMoves()) {
+ // Handle parallel moves associated with the gap instruction.
+ AssembleGap(GapInstruction::cast(instr));
+ } else if (instr->IsSourcePosition()) {
+ AssembleSourcePosition(SourcePositionInstruction::cast(instr));
+ } else {
+ // Assemble architecture-specific code for the instruction.
+ AssembleArchInstruction(instr);
+
+ // Assemble branches or boolean materializations after this instruction.
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+ switch (mode) {
+ case kFlags_none:
+ return;
+ case kFlags_set:
+ return AssembleArchBoolean(instr, condition);
+ case kFlags_branch:
+ return AssembleArchBranch(instr, condition);
+ }
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
+ SourcePosition source_position = instr->source_position();
+ if (source_position == current_source_position_) return;
+ DCHECK(!source_position.IsInvalid());
+ if (!source_position.IsUnknown()) {
+ int code_pos = source_position.raw();
+ masm()->positions_recorder()->RecordPosition(source_position.raw());
+ masm()->positions_recorder()->WriteRecordedPositions();
+ if (FLAG_code_comments) {
+ Vector<char> buffer = Vector<char>::New(256);
+ CompilationInfo* info = linkage()->info();
+ int ln = Script::GetLineNumber(info->script(), code_pos);
+ int cn = Script::GetColumnNumber(info->script(), code_pos);
+ if (info->script()->name()->IsString()) {
+ Handle<String> file(String::cast(info->script()->name()));
+ base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
+ file->ToCString().get(), ln, cn);
+ } else {
+ base::OS::SNPrintF(buffer.start(), buffer.length(),
+ "-- <unknown>:%d:%d --", ln, cn);
+ }
+ masm()->RecordComment(buffer.start());
+ }
+ }
+ current_source_position_ = source_position;
+}
+
+
+void CodeGenerator::AssembleGap(GapInstruction* instr) {
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ GapInstruction::InnerPosition inner_pos =
+ static_cast<GapInstruction::InnerPosition>(i);
+ ParallelMove* move = instr->GetParallelMove(inner_pos);
+ if (move != NULL) resolver()->Resolve(move);
+ }
+}
+
+
+void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
+ CompilationInfo* info = linkage()->info();
+ int deopt_count = code()->GetDeoptimizationEntryCount();
+ int patch_count = static_cast<int>(lazy_deoptimization_entries_.size());
+ if (patch_count == 0 && deopt_count == 0) return;
+ Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
+ isolate(), deopt_count, patch_count, TENURED);
+
+ Handle<ByteArray> translation_array =
+ translations_.CreateByteArray(isolate()->factory());
+
+ data->SetTranslationByteArray(*translation_array);
+ data->SetInlinedFunctionCount(Smi::FromInt(0));
+ data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
+ // TODO(jarin) The following code was copied over from Lithium, not sure
+ // whether the scope or the IsOptimizing condition are really needed.
+ if (info->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
+ static_cast<int>(deoptimization_literals_.size()), TENURED);
+ {
+ AllowDeferredHandleDereference copy_handles;
+ for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ // No OSR in Turbofan yet...
+ BailoutId osr_ast_id = BailoutId::None();
+ data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(-1));
+
+ // Populate deoptimization entries.
+ for (int i = 0; i < deopt_count; i++) {
+ FrameStateDescriptor* descriptor = code()->GetDeoptimizationEntry(i);
+ data->SetAstId(i, descriptor->bailout_id());
+ CHECK_NE(NULL, deoptimization_states_[i]);
+ data->SetTranslationIndex(
+ i, Smi::FromInt(deoptimization_states_[i]->translation_id_));
+ data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+ data->SetPc(i, Smi::FromInt(-1));
+ }
+
+ // Populate the return address patcher entries.
+ for (int i = 0; i < patch_count; ++i) {
+ LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i];
+ DCHECK(entry.position_after_call() == entry.continuation()->pos() ||
+ IsNopForSmiCodeInlining(code_object, entry.position_after_call(),
+ entry.continuation()->pos()));
+ data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call()));
+ data->SetPatchedAddressPc(i, Smi::FromInt(entry.deoptimization()->pos()));
+ }
+
+ code_object->set_deoptimization_data(*data);
+}
+
+
+void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
+ InstructionOperandConverter i(this, instr);
+
+ Label after_call;
+ masm()->bind(&after_call);
+
+ // The continuation and deoptimization are the last two inputs:
+ BasicBlock* cont_block =
+ i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
+ BasicBlock* deopt_block =
+ i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
+
+ Label* cont_label = code_->GetLabel(cont_block);
+ Label* deopt_label = code_->GetLabel(deopt_block);
+
+ lazy_deoptimization_entries_.push_back(
+ LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label));
+}
+
+
+int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = static_cast<int>(deoptimization_literals_.size());
+ for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.push_back(literal);
+ return result;
+}
+
+
+void CodeGenerator::BuildTranslation(Instruction* instr,
+ int deoptimization_id) {
+ // We should build translation only once.
+ DCHECK_EQ(NULL, deoptimization_states_[deoptimization_id]);
+
+ FrameStateDescriptor* descriptor =
+ code()->GetDeoptimizationEntry(deoptimization_id);
+ Translation translation(&translations_, 1, 1, zone());
+ translation.BeginJSFrame(descriptor->bailout_id(),
+ Translation::kSelfLiteralId,
+ descriptor->size() - descriptor->parameters_count());
+
+ for (int i = 0; i < descriptor->size(); i++) {
+ AddTranslationForOperand(&translation, instr, instr->InputAt(i));
+ }
+
+ deoptimization_states_[deoptimization_id] =
+ new (zone()) DeoptimizationState(translation.index());
+}
+
+
+void CodeGenerator::AddTranslationForOperand(Translation* translation,
+ Instruction* instr,
+ InstructionOperand* op) {
+ if (op->IsStackSlot()) {
+ translation->StoreStackSlot(op->index());
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ InstructionOperandConverter converter(this, instr);
+ translation->StoreRegister(converter.ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ InstructionOperandConverter converter(this, instr);
+ translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ } else if (op->IsImmediate()) {
+ InstructionOperandConverter converter(this, instr);
+ Constant constant = converter.ToConstant(op);
+ Handle<Object> constant_object;
+ switch (constant.type()) {
+ case Constant::kInt32:
+ constant_object =
+ isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ break;
+ case Constant::kFloat64:
+ constant_object =
+ isolate()->factory()->NewHeapNumber(constant.ToFloat64());
+ break;
+ case Constant::kHeapObject:
+ constant_object = constant.ToHeapObject();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ int literal_id = DefineDeoptimizationLiteral(constant_object);
+ translation->StoreLiteral(literal_id);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+#if !V8_TURBOFAN_BACKEND
+
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
+
+
+void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
+
+
+#ifdef DEBUG
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc) {
+ UNIMPLEMENTED();
+ return false;
+}
+#endif
+
+#endif // !V8_TURBOFAN_BACKEND
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
new file mode 100644
index 000000000..b603c555c
--- /dev/null
+++ b/deps/v8/src/compiler/code-generator.h
@@ -0,0 +1,146 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_H_
+#define V8_COMPILER_CODE_GENERATOR_H_
+
+#include <deque>
+
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/instruction.h"
+#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Generates native code for a sequence of instructions.
+class CodeGenerator V8_FINAL : public GapResolver::Assembler {
+ public:
+ explicit CodeGenerator(InstructionSequence* code);
+
+ // Generate native code.
+ Handle<Code> GenerateCode();
+
+ InstructionSequence* code() const { return code_; }
+ Frame* frame() const { return code()->frame(); }
+ Graph* graph() const { return code()->graph(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ Linkage* linkage() const { return code()->linkage(); }
+ Schedule* schedule() const { return code()->schedule(); }
+
+ private:
+ MacroAssembler* masm() { return &masm_; }
+ GapResolver* resolver() { return &resolver_; }
+ SafepointTableBuilder* safepoints() { return &safepoints_; }
+ Zone* zone() const { return code()->zone(); }
+
+ // Checks if {block} will appear directly after {current_block_} when
+ // assembling code, in which case, a fall-through can be used.
+ bool IsNextInAssemblyOrder(const BasicBlock* block) const {
+ return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+ block->deferred_ == current_block_->deferred_;
+ }
+
+ // Record a safepoint with the given pointer map.
+ void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode);
+
+ // Assemble code for the specified instruction.
+ void AssembleInstruction(Instruction* instr);
+ void AssembleSourcePosition(SourcePositionInstruction* instr);
+ void AssembleGap(GapInstruction* gap);
+
+ // ===========================================================================
+ // ============= Architecture-specific code generation methods. ==============
+ // ===========================================================================
+
+ void AssembleArchInstruction(Instruction* instr);
+ void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+ void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+
+ // Generates an architecture-specific, descriptor-specific prologue
+ // to set up a stack frame.
+ void AssemblePrologue();
+ // Generates an architecture-specific, descriptor-specific return sequence
+ // to tear down a stack frame.
+ void AssembleReturn();
+
+ // ===========================================================================
+ // ============== Architecture-specific gap resolver methods. ================
+ // ===========================================================================
+
+ // Interface used by the gap resolver to emit moves and swaps.
+ virtual void AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) V8_OVERRIDE;
+ virtual void AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) V8_OVERRIDE;
+
+ // ===========================================================================
+ // Deoptimization table construction
+ void RecordLazyDeoptimizationEntry(Instruction* instr);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void BuildTranslation(Instruction* instr, int deoptimization_id);
+ void AddTranslationForOperand(Translation* translation, Instruction* instr,
+ InstructionOperand* op);
+ void AddNopForSmiCodeInlining();
+#if DEBUG
+ static bool IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc);
+#endif // DEBUG
+ // ===========================================================================
+
+ class LazyDeoptimizationEntry V8_FINAL {
+ public:
+ LazyDeoptimizationEntry(int position_after_call, Label* continuation,
+ Label* deoptimization)
+ : position_after_call_(position_after_call),
+ continuation_(continuation),
+ deoptimization_(deoptimization) {}
+
+ int position_after_call() const { return position_after_call_; }
+ Label* continuation() const { return continuation_; }
+ Label* deoptimization() const { return deoptimization_; }
+
+ private:
+ int position_after_call_;
+ Label* continuation_;
+ Label* deoptimization_;
+ };
+
+ struct DeoptimizationState : ZoneObject {
+ int translation_id_;
+
+ explicit DeoptimizationState(int translation_id)
+ : translation_id_(translation_id) {}
+ };
+
+ typedef std::deque<LazyDeoptimizationEntry,
+ zone_allocator<LazyDeoptimizationEntry> >
+ LazyDeoptimizationEntries;
+ typedef std::deque<DeoptimizationState*,
+ zone_allocator<DeoptimizationState*> >
+ DeoptimizationStates;
+ typedef std::deque<Handle<Object>, zone_allocator<Handle<Object> > > Literals;
+
+ InstructionSequence* code_;
+ BasicBlock* current_block_;
+ SourcePosition current_source_position_;
+ MacroAssembler masm_;
+ GapResolver resolver_;
+ SafepointTableBuilder safepoints_;
+ LazyDeoptimizationEntries lazy_deoptimization_entries_;
+ DeoptimizationStates deoptimization_states_;
+ Literals deoptimization_literals_;
+ TranslationBuffer translations_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_GENERATOR_H
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
new file mode 100644
index 000000000..2b0ac0b6e
--- /dev/null
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
+#define V8_COMPILER_COMMON_NODE_CACHE_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Bundles various caches for common nodes.
+class CommonNodeCache V8_FINAL : public ZoneObject {
+ public:
+ explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+
+ Node** FindInt32Constant(int32_t value) {
+ return int32_constants_.Find(zone_, value);
+ }
+
+ Node** FindFloat64Constant(double value) {
+ // We canonicalize double constants at the bit representation level.
+ return float64_constants_.Find(zone_, BitCast<int64_t>(value));
+ }
+
+ Node** FindExternalConstant(ExternalReference reference) {
+ return external_constants_.Find(zone_, reference.address());
+ }
+
+ Node** FindNumberConstant(double value) {
+ // We canonicalize double constants at the bit representation level.
+ return number_constants_.Find(zone_, BitCast<int64_t>(value));
+ }
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Int32NodeCache int32_constants_;
+ Int64NodeCache float64_constants_;
+ PtrNodeCache external_constants_;
+ Int64NodeCache number_constants_;
+ Zone* zone_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_COMMON_NODE_CACHE_H_
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
new file mode 100644
index 000000000..3b581ae0c
--- /dev/null
+++ b/deps/v8/src/compiler/common-operator.h
@@ -0,0 +1,284 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_H_
+#define V8_COMPILER_COMMON_OPERATOR_H_
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class ControlOperator : public Operator1<int> {
+ public:
+ ControlOperator(IrOpcode::Value opcode, uint16_t properties, int inputs,
+ int outputs, int controls, const char* mnemonic)
+ : Operator1<int>(opcode, properties, inputs, outputs, mnemonic,
+ controls) {}
+
+ virtual OStream& PrintParameter(OStream& os) const { return os; } // NOLINT
+ int ControlInputCount() const { return parameter(); }
+};
+
+class CallOperator : public Operator1<CallDescriptor*> {
+ public:
+ CallOperator(CallDescriptor* descriptor, const char* mnemonic)
+ : Operator1<CallDescriptor*>(
+ IrOpcode::kCall, descriptor->properties(), descriptor->InputCount(),
+ descriptor->ReturnCount(), mnemonic, descriptor) {}
+
+ virtual OStream& PrintParameter(OStream& os) const { // NOLINT
+ return os << "[" << *parameter() << "]";
+ }
+};
+
+// Interface for building common operators that can be used at any level of IR,
+// including JavaScript, mid-level, and low-level.
+// TODO(titzer): Move the mnemonics into SimpleOperator and Operator1 classes.
+class CommonOperatorBuilder {
+ public:
+ explicit CommonOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define CONTROL_OP(name, inputs, controls) \
+ return new (zone_) ControlOperator(IrOpcode::k##name, Operator::kFoldable, \
+ inputs, 0, controls, #name);
+
+ Operator* Start(int num_formal_parameters) {
+ // Outputs are formal parameters, plus context, receiver, and JSFunction.
+ int outputs = num_formal_parameters + 3;
+ return new (zone_) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0,
+ outputs, 0, "Start");
+ }
+ Operator* Dead() { CONTROL_OP(Dead, 0, 0); }
+ Operator* End() { CONTROL_OP(End, 0, 1); }
+ Operator* Branch() { CONTROL_OP(Branch, 1, 1); }
+ Operator* IfTrue() { CONTROL_OP(IfTrue, 0, 1); }
+ Operator* IfFalse() { CONTROL_OP(IfFalse, 0, 1); }
+ Operator* Throw() { CONTROL_OP(Throw, 1, 1); }
+ Operator* LazyDeoptimization() { CONTROL_OP(LazyDeoptimization, 0, 1); }
+ Operator* Continuation() { CONTROL_OP(Continuation, 0, 1); }
+
+ Operator* Deoptimize() {
+ return new (zone_)
+ ControlOperator(IrOpcode::kDeoptimize, 0, 1, 0, 1, "Deoptimize");
+ }
+
+ Operator* Return() {
+ return new (zone_) ControlOperator(IrOpcode::kReturn, 0, 1, 0, 1, "Return");
+ }
+
+ Operator* Merge(int controls) {
+ return new (zone_) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
+ 0, controls, "Merge");
+ }
+
+ Operator* Loop(int controls) {
+ return new (zone_) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
+ 0, controls, "Loop");
+ }
+
+ Operator* Parameter(int index) {
+ return new (zone_) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1,
+ 1, "Parameter", index);
+ }
+ Operator* Int32Constant(int32_t value) {
+ return new (zone_) Operator1<int>(IrOpcode::kInt32Constant, Operator::kPure,
+ 0, 1, "Int32Constant", value);
+ }
+ Operator* Int64Constant(int64_t value) {
+ return new (zone_)
+ Operator1<int64_t>(IrOpcode::kInt64Constant, Operator::kPure, 0, 1,
+ "Int64Constant", value);
+ }
+ Operator* Float64Constant(double value) {
+ return new (zone_)
+ Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
+ "Float64Constant", value);
+ }
+ Operator* ExternalConstant(ExternalReference value) {
+ return new (zone_) Operator1<ExternalReference>(IrOpcode::kExternalConstant,
+ Operator::kPure, 0, 1,
+ "ExternalConstant", value);
+ }
+ Operator* NumberConstant(double value) {
+ return new (zone_)
+ Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
+ "NumberConstant", value);
+ }
+ Operator* HeapConstant(PrintableUnique<Object> value) {
+ return new (zone_) Operator1<PrintableUnique<Object> >(
+ IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
+ }
+ Operator* Phi(int arguments) {
+ DCHECK(arguments > 0); // Disallow empty phis.
+ return new (zone_) Operator1<int>(IrOpcode::kPhi, Operator::kPure,
+ arguments, 1, "Phi", arguments);
+ }
+ Operator* EffectPhi(int arguments) {
+ DCHECK(arguments > 0); // Disallow empty phis.
+ return new (zone_) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
+ 0, "EffectPhi", arguments);
+ }
+ Operator* StateValues(int arguments) {
+ return new (zone_) Operator1<int>(IrOpcode::kStateValues, Operator::kPure,
+ arguments, 1, "StateValues", arguments);
+ }
+ Operator* FrameState(BailoutId ast_id) {
+ return new (zone_) Operator1<BailoutId>(
+ IrOpcode::kFrameState, Operator::kPure, 3, 1, "FrameState", ast_id);
+ }
+ Operator* Call(CallDescriptor* descriptor) {
+ return new (zone_) CallOperator(descriptor, "Call");
+ }
+ Operator* Projection(int index) {
+ return new (zone_) Operator1<int>(IrOpcode::kProjection, Operator::kPure, 1,
+ 1, "Projection", index);
+ }
+
+ private:
+ Zone* zone_;
+};
+
+
+template <typename T>
+struct CommonOperatorTraits {
+ static inline bool Equals(T a, T b);
+ static inline bool HasValue(Operator* op);
+ static inline T ValueOf(Operator* op);
+};
+
+template <>
+struct CommonOperatorTraits<int32_t> {
+ static inline bool Equals(int32_t a, int32_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kInt32Constant ||
+ op->opcode() == IrOpcode::kNumberConstant;
+ }
+ static inline int32_t ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kNumberConstant) {
+ // TODO(titzer): cache the converted int32 value in NumberConstant.
+ return FastD2I(reinterpret_cast<Operator1<double>*>(op)->parameter());
+ }
+ CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
+ return static_cast<Operator1<int32_t>*>(op)->parameter();
+ }
+};
+
+template <>
+struct CommonOperatorTraits<uint32_t> {
+ static inline bool Equals(uint32_t a, uint32_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return CommonOperatorTraits<int32_t>::HasValue(op);
+ }
+ static inline uint32_t ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kNumberConstant) {
+ // TODO(titzer): cache the converted uint32 value in NumberConstant.
+ return FastD2UI(reinterpret_cast<Operator1<double>*>(op)->parameter());
+ }
+ return static_cast<uint32_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
+ }
+};
+
+template <>
+struct CommonOperatorTraits<int64_t> {
+ static inline bool Equals(int64_t a, int64_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kInt32Constant ||
+ op->opcode() == IrOpcode::kInt64Constant ||
+ op->opcode() == IrOpcode::kNumberConstant;
+ }
+ static inline int64_t ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kInt32Constant) {
+ return static_cast<int64_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
+ }
+ CHECK_EQ(IrOpcode::kInt64Constant, op->opcode());
+ return static_cast<Operator1<int64_t>*>(op)->parameter();
+ }
+};
+
+template <>
+struct CommonOperatorTraits<uint64_t> {
+ static inline bool Equals(uint64_t a, uint64_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return CommonOperatorTraits<int64_t>::HasValue(op);
+ }
+ static inline uint64_t ValueOf(Operator* op) {
+ return static_cast<uint64_t>(CommonOperatorTraits<int64_t>::ValueOf(op));
+ }
+};
+
+template <>
+struct CommonOperatorTraits<double> {
+ static inline bool Equals(double a, double b) {
+ return DoubleRepresentation(a).bits == DoubleRepresentation(b).bits;
+ }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kFloat64Constant ||
+ op->opcode() == IrOpcode::kInt32Constant ||
+ op->opcode() == IrOpcode::kNumberConstant;
+ }
+ static inline double ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kFloat64Constant ||
+ op->opcode() == IrOpcode::kNumberConstant) {
+ return reinterpret_cast<Operator1<double>*>(op)->parameter();
+ }
+ return static_cast<double>(CommonOperatorTraits<int32_t>::ValueOf(op));
+ }
+};
+
+template <>
+struct CommonOperatorTraits<ExternalReference> {
+ static inline bool Equals(ExternalReference a, ExternalReference b) {
+ return a == b;
+ }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kExternalConstant;
+ }
+ static inline ExternalReference ValueOf(Operator* op) {
+ CHECK_EQ(IrOpcode::kExternalConstant, op->opcode());
+ return static_cast<Operator1<ExternalReference>*>(op)->parameter();
+ }
+};
+
+template <typename T>
+struct CommonOperatorTraits<PrintableUnique<T> > {
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kHeapConstant;
+ }
+ static inline PrintableUnique<T> ValueOf(Operator* op) {
+ CHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+ return static_cast<Operator1<PrintableUnique<T> >*>(op)->parameter();
+ }
+};
+
+template <typename T>
+struct CommonOperatorTraits<Handle<T> > {
+ static inline bool HasValue(Operator* op) {
+ return CommonOperatorTraits<PrintableUnique<T> >::HasValue(op);
+ }
+ static inline Handle<T> ValueOf(Operator* op) {
+ return CommonOperatorTraits<PrintableUnique<T> >::ValueOf(op).handle();
+ }
+};
+
+
+template <typename T>
+inline T ValueOf(Operator* op) {
+ return CommonOperatorTraits<T>::ValueOf(op);
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_COMMON_OPERATOR_H_
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
new file mode 100644
index 000000000..3b7d05ba5
--- /dev/null
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "control-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+void IfBuilder::If(Node* condition) {
+ builder_->NewBranch(condition);
+ else_environment_ = environment()->CopyForConditional();
+}
+
+
+void IfBuilder::Then() { builder_->NewIfTrue(); }
+
+
+void IfBuilder::Else() {
+ builder_->NewMerge();
+ then_environment_ = environment();
+ set_environment(else_environment_);
+ builder_->NewIfFalse();
+}
+
+
+void IfBuilder::End() {
+ then_environment_->Merge(environment());
+ set_environment(then_environment_);
+}
+
+
+void LoopBuilder::BeginLoop() {
+ builder_->NewLoop();
+ loop_environment_ = environment()->CopyForLoop();
+ continue_environment_ = environment()->CopyAsUnreachable();
+ break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void LoopBuilder::Continue() {
+ continue_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::Break() {
+ break_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::EndBody() {
+ continue_environment_->Merge(environment());
+ set_environment(continue_environment_);
+}
+
+
+void LoopBuilder::EndLoop() {
+ loop_environment_->Merge(environment());
+ set_environment(break_environment_);
+}
+
+
+void LoopBuilder::BreakUnless(Node* condition) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition);
+ control_if.Then();
+ control_if.Else();
+ Break();
+ control_if.End();
+}
+
+
+void SwitchBuilder::BeginSwitch() {
+ body_environment_ = environment()->CopyAsUnreachable();
+ label_environment_ = environment()->CopyAsUnreachable();
+ break_environment_ = environment()->CopyAsUnreachable();
+ body_environments_.AddBlock(NULL, case_count(), zone());
+}
+
+
+void SwitchBuilder::BeginLabel(int index, Node* condition) {
+ builder_->NewBranch(condition);
+ label_environment_ = environment()->CopyForConditional();
+ builder_->NewIfTrue();
+ body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::EndLabel() {
+ set_environment(label_environment_);
+ builder_->NewIfFalse();
+}
+
+
+void SwitchBuilder::DefaultAt(int index) {
+ label_environment_ = environment()->CopyAsUnreachable();
+ body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::BeginCase(int index) {
+ set_environment(body_environments_[index]);
+ environment()->Merge(body_environment_);
+}
+
+
+void SwitchBuilder::Break() {
+ break_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void SwitchBuilder::EndCase() { body_environment_ = environment(); }
+
+
+void SwitchBuilder::EndSwitch() {
+ break_environment_->Merge(label_environment_);
+ break_environment_->Merge(environment());
+ set_environment(break_environment_);
+}
+
+
+void BlockBuilder::BeginBlock() {
+ break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void BlockBuilder::Break() {
+ break_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void BlockBuilder::EndBlock() {
+ break_environment_->Merge(environment());
+ set_environment(break_environment_);
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
new file mode 100644
index 000000000..695282be8
--- /dev/null
+++ b/deps/v8/src/compiler/control-builders.h
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
+#define V8_COMPILER_CONTROL_BUILDERS_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Base class for all control builders. Also provides a common interface for
+// control builders to handle 'break' and 'continue' statements when they are
+// used to model breakable statements.
+class ControlBuilder {
+ public:
+ explicit ControlBuilder(StructuredGraphBuilder* builder)
+ : builder_(builder) {}
+ virtual ~ControlBuilder() {}
+
+ // Interface for break and continue.
+ virtual void Break() { UNREACHABLE(); }
+ virtual void Continue() { UNREACHABLE(); }
+
+ protected:
+ typedef StructuredGraphBuilder Builder;
+ typedef StructuredGraphBuilder::Environment Environment;
+
+ Zone* zone() const { return builder_->zone(); }
+ Environment* environment() { return builder_->environment(); }
+ void set_environment(Environment* env) { builder_->set_environment(env); }
+
+ Builder* builder_;
+};
+
+
+// Tracks control flow for a conditional statement.
+class IfBuilder : public ControlBuilder {
+ public:
+ explicit IfBuilder(StructuredGraphBuilder* builder)
+ : ControlBuilder(builder),
+ then_environment_(NULL),
+ else_environment_(NULL) {}
+
+ // Primitive control commands.
+ void If(Node* condition);
+ void Then();
+ void Else();
+ void End();
+
+ private:
+ Environment* then_environment_; // Environment after the 'then' body.
+ Environment* else_environment_; // Environment for the 'else' body.
+};
+
+
+// Tracks control flow for an iteration statement.
+class LoopBuilder : public ControlBuilder {
+ public:
+ explicit LoopBuilder(StructuredGraphBuilder* builder)
+ : ControlBuilder(builder),
+ loop_environment_(NULL),
+ continue_environment_(NULL),
+ break_environment_(NULL) {}
+
+ // Primitive control commands.
+ void BeginLoop();
+ void EndBody();
+ void EndLoop();
+
+ // Primitive support for break and continue.
+ virtual void Continue();
+ virtual void Break();
+
+ // Compound control command for conditional break.
+ void BreakUnless(Node* condition);
+
+ private:
+ Environment* loop_environment_; // Environment of the loop header.
+ Environment* continue_environment_; // Environment after the loop body.
+ Environment* break_environment_; // Environment after the loop exits.
+};
+
+
+// Tracks control flow for a switch statement.
+class SwitchBuilder : public ControlBuilder {
+ public:
+ explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+ : ControlBuilder(builder),
+ body_environment_(NULL),
+ label_environment_(NULL),
+ break_environment_(NULL),
+ body_environments_(case_count, zone()) {}
+
+ // Primitive control commands.
+ void BeginSwitch();
+ void BeginLabel(int index, Node* condition);
+ void EndLabel();
+ void DefaultAt(int index);
+ void BeginCase(int index);
+ void EndCase();
+ void EndSwitch();
+
+ // Primitive support for break.
+ virtual void Break();
+
+ // The number of cases within a switch is statically known.
+ int case_count() const { return body_environments_.capacity(); }
+
+ private:
+ Environment* body_environment_; // Environment after last case body.
+ Environment* label_environment_; // Environment for next label condition.
+ Environment* break_environment_; // Environment after the switch exits.
+ ZoneList<Environment*> body_environments_;
+};
+
+
+// Tracks control flow for a block statement.
+class BlockBuilder : public ControlBuilder {
+ public:
+ explicit BlockBuilder(StructuredGraphBuilder* builder)
+ : ControlBuilder(builder), break_environment_(NULL) {}
+
+ // Primitive control commands.
+ void BeginBlock();
+ void EndBlock();
+
+ // Primitive support for break.
+ virtual void Break();
+
+ private:
+ Environment* break_environment_; // Environment after the block exits.
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_CONTROL_BUILDERS_H_
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
new file mode 100644
index 000000000..afcbc3706
--- /dev/null
+++ b/deps/v8/src/compiler/frame.h
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_H_
+#define V8_COMPILER_FRAME_H_
+
+#include "src/v8.h"
+
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Collects the spill slot requirements and the allocated general and double
+// registers for a compiled function. Frames are usually populated by the
+// register allocator and are used by Linkage to generate code for the prologue
+// and epilogue to compiled code.
+class Frame {
+ public:
+ Frame()
+ : register_save_area_size_(0),
+ spill_slot_count_(0),
+ double_spill_slot_count_(0),
+ allocated_registers_(NULL),
+ allocated_double_registers_(NULL) {}
+
+ inline int GetSpillSlotCount() { return spill_slot_count_; }
+ inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
+
+ void SetAllocatedRegisters(BitVector* regs) {
+ DCHECK(allocated_registers_ == NULL);
+ allocated_registers_ = regs;
+ }
+
+ void SetAllocatedDoubleRegisters(BitVector* regs) {
+ DCHECK(allocated_double_registers_ == NULL);
+ allocated_double_registers_ = regs;
+ }
+
+ bool DidAllocateDoubleRegisters() {
+ return !allocated_double_registers_->IsEmpty();
+ }
+
+ void SetRegisterSaveAreaSize(int size) {
+ DCHECK(IsAligned(size, kPointerSize));
+ register_save_area_size_ = size;
+ }
+
+ int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+
+ int AllocateSpillSlot(bool is_double) {
+ // If 32-bit, skip one if the new slot is a double.
+ if (is_double) {
+ if (kDoubleSize > kPointerSize) {
+ DCHECK(kDoubleSize == kPointerSize * 2);
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ }
+ double_spill_slot_count_++;
+ }
+ return spill_slot_count_++;
+ }
+
+ private:
+ int register_save_area_size_;
+ int spill_slot_count_;
+ int double_spill_slot_count_;
+ BitVector* allocated_registers_;
+ BitVector* allocated_double_registers_;
+};
+
+
+// Represents an offset from either the stack pointer or frame pointer.
+class FrameOffset {
+ public:
+ inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; }
+ inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; }
+ inline int offset() { return offset_ & ~1; }
+
+ inline static FrameOffset FromStackPointer(int offset) {
+ DCHECK((offset & 1) == 0);
+ return FrameOffset(offset | kFromSp);
+ }
+
+ inline static FrameOffset FromFramePointer(int offset) {
+ DCHECK((offset & 1) == 0);
+ return FrameOffset(offset | kFromFp);
+ }
+
+ private:
+ explicit FrameOffset(int offset) : offset_(offset) {}
+
+ int offset_; // Encodes SP or FP in the low order bit.
+
+ static const int kFromSp = 1;
+ static const int kFromFp = 0;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_FRAME_H_
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
new file mode 100644
index 000000000..f36960717
--- /dev/null
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -0,0 +1,136 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef ZoneList<MoveOperands>::iterator op_iterator;
+
+#ifdef ENABLE_SLOW_DCHECKS
+// TODO(svenpanne) Brush up InstructionOperand with comparison?
+struct InstructionOperandComparator {
+ bool operator()(const InstructionOperand* x,
+ const InstructionOperand* y) const {
+ return (x->kind() < y->kind()) ||
+ (x->kind() == y->kind() && x->index() < y->index());
+ }
+};
+#endif
+
+// No operand should be the destination for more than one move.
+static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
+#ifdef ENABLE_SLOW_DCHECKS
+ std::set<InstructionOperand*, InstructionOperandComparator> seen;
+ for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
+ SLOW_DCHECK(seen.find(i->destination()) == seen.end());
+ seen.insert(i->destination());
+ }
+#endif
+}
+
+
+void GapResolver::Resolve(ParallelMove* parallel_move) const {
+ ZoneList<MoveOperands>* moves = parallel_move->move_operands();
+ // TODO(svenpanne) Use the member version of remove_if when we use real lists.
+ op_iterator end =
+ std::remove_if(moves->begin(), moves->end(),
+ std::mem_fun_ref(&MoveOperands::IsRedundant));
+ moves->Rewind(static_cast<int>(end - moves->begin()));
+
+ VerifyMovesAreInjective(moves);
+
+ for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
+ if (!move->IsEliminated()) PerformMove(moves, &*move);
+ }
+}
+
+
+void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
+ MoveOperands* move) const {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We mark a
+ // move as "pending" on entry to PerformMove in order to detect cycles in the
+ // move graph. We use operand swaps to resolve cycles, which means that a
+ // call to PerformMove could change any source operand in the move graph.
+ DCHECK(!move->IsPending());
+ DCHECK(!move->IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved on the side.
+ DCHECK_NOT_NULL(move->source()); // Or else it will look eliminated.
+ InstructionOperand* destination = move->destination();
+ move->set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve dependencies.
+ // Any unperformed, unpending move with a source the same as this one's
+ // destination blocks this one so recursively perform all such moves.
+ for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+ if (other->Blocks(destination) && !other->IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does not
+ // miss any). Assume there is a non-blocking move with source A and this
+ // move is blocked on source B and there is a swap of A and B. Then A and
+ // B must be involved in the same cycle (or they would not be swapped).
+ // Since this move's destination is B and there is only a single incoming
+ // edge to an operand, this move must also be involved in the same cycle.
+ // In that case, the blocking move will be created but will be "pending"
+ // when we return from PerformMove.
+ PerformMove(moves, other);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as pending, so
+ // restore its destination.
+ move->set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and so
+ // it may now be the last move in the cycle. If so remove it.
+ InstructionOperand* source = move->source();
+ if (source->Equals(destination)) {
+ move->Eliminate();
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case we
+ // have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ op_iterator blocker = std::find_if(
+ moves->begin(), moves->end(),
+ std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
+ if (blocker == moves->end()) {
+ // The easy case: This move is not blocked.
+ assembler_->AssembleMove(source, destination);
+ move->Eliminate();
+ return;
+ }
+
+ DCHECK(blocker->IsPending());
+ // Ensure source is a register or both are stack slots, to limit swap cases.
+ if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ std::swap(source, destination);
+ }
+ assembler_->AssembleSwap(source, destination);
+ move->Eliminate();
+
+ // Any unperformed (including pending) move with a source of either this
+ // move's source or destination needs to have their source changed to
+ // reflect the state of affairs after the swap.
+ for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+ if (other->Blocks(source)) {
+ other->set_source(destination);
+ } else if (other->Blocks(destination)) {
+ other->set_source(source);
+ }
+ }
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/gap-resolver.h b/deps/v8/src/compiler/gap-resolver.h
new file mode 100644
index 000000000..5c3aeada6
--- /dev/null
+++ b/deps/v8/src/compiler/gap-resolver.h
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GAP_RESOLVER_H_
+#define V8_COMPILER_GAP_RESOLVER_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GapResolver V8_FINAL {
+ public:
+ // Interface used by the gap resolver to emit moves and swaps.
+ class Assembler {
+ public:
+ virtual ~Assembler() {}
+
+ // Assemble move.
+ virtual void AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) = 0;
+ // Assemble swap.
+ virtual void AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) = 0;
+ };
+
+ explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(ParallelMove* parallel_move) const;
+
+ private:
+ // Perform the given move, possibly requiring other moves to satisfy
+ // dependencies.
+ void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
+
+ // Assembler used to emit moves and save registers.
+ Assembler* const assembler_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GAP_RESOLVER_H_
diff --git a/deps/v8/src/compiler/generic-algorithm-inl.h b/deps/v8/src/compiler/generic-algorithm-inl.h
new file mode 100644
index 000000000..a25131f69
--- /dev/null
+++ b/deps/v8/src/compiler/generic-algorithm-inl.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class N>
+class NodeInputIterationTraits {
+ public:
+ typedef N Node;
+ typedef typename N::Inputs::iterator Iterator;
+
+ static Iterator begin(Node* node) { return node->inputs().begin(); }
+ static Iterator end(Node* node) { return node->inputs().end(); }
+ static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+ static Node* to(Iterator iterator) { return *iterator; }
+ static Node* from(Iterator iterator) { return iterator.edge().from(); }
+};
+
+template <class N>
+class NodeUseIterationTraits {
+ public:
+ typedef N Node;
+ typedef typename N::Uses::iterator Iterator;
+
+ static Iterator begin(Node* node) { return node->uses().begin(); }
+ static Iterator end(Node* node) { return node->uses().end(); }
+ static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+ static Node* to(Iterator iterator) { return *iterator; }
+ static Node* from(Iterator iterator) { return iterator.edge().to(); }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
diff --git a/deps/v8/src/compiler/generic-algorithm.h b/deps/v8/src/compiler/generic-algorithm.h
new file mode 100644
index 000000000..607d339ae
--- /dev/null
+++ b/deps/v8/src/compiler/generic-algorithm.h
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_H_
+
+#include <deque>
+#include <stack>
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
+// post-order. Visitation uses an explicitly allocated stack rather than the
+// execution stack to avoid stack overflow. Although GenericGraphVisit is
+// primarily intended to traverse networks of nodes through their
+// dependencies and uses, it also can be used to visit any graph-like network
+// by specifying custom traits.
+class GenericGraphVisit {
+ public:
+ enum Control {
+ CONTINUE = 0x0, // Continue depth-first normally
+ SKIP = 0x1, // Skip this node and its successors
+ REENTER = 0x2, // Allow reentering this node
+ DEFER = SKIP | REENTER
+ };
+
+ // struct Visitor {
+ // Control Pre(Traits::Node* current);
+ // Control Post(Traits::Node* current);
+ // void PreEdge(Traits::Node* from, int index, Traits::Node* to);
+ // void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+ // }
+ template <class Visitor, class Traits, class RootIterator>
+ static void Visit(GenericGraphBase* graph, RootIterator root_begin,
+ RootIterator root_end, Visitor* visitor) {
+ // TODO(bmeurer): Pass "local" zone as parameter.
+ Zone* zone = graph->zone();
+ typedef typename Traits::Node Node;
+ typedef typename Traits::Iterator Iterator;
+ typedef std::pair<Iterator, Iterator> NodeState;
+ typedef zone_allocator<NodeState> ZoneNodeStateAllocator;
+ typedef std::deque<NodeState, ZoneNodeStateAllocator> NodeStateDeque;
+ typedef std::stack<NodeState, NodeStateDeque> NodeStateStack;
+ NodeStateStack stack((NodeStateDeque(ZoneNodeStateAllocator(zone))));
+ BoolVector visited(Traits::max_id(graph), false, ZoneBoolAllocator(zone));
+ Node* current = *root_begin;
+ while (true) {
+ DCHECK(current != NULL);
+ const int id = current->id();
+ DCHECK(id >= 0);
+ DCHECK(id < Traits::max_id(graph)); // Must be a valid id.
+ bool visit = !GetVisited(&visited, id);
+ if (visit) {
+ Control control = visitor->Pre(current);
+ visit = !IsSkip(control);
+ if (!IsReenter(control)) SetVisited(&visited, id, true);
+ }
+ Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
+ Iterator end(Traits::end(current));
+ stack.push(NodeState(begin, end));
+ Node* post_order_node = current;
+ while (true) {
+ NodeState top = stack.top();
+ if (top.first == top.second) {
+ if (visit) {
+ Control control = visitor->Post(post_order_node);
+ DCHECK(!IsSkip(control));
+ SetVisited(&visited, post_order_node->id(), !IsReenter(control));
+ }
+ stack.pop();
+ if (stack.empty()) {
+ if (++root_begin == root_end) return;
+ current = *root_begin;
+ break;
+ }
+ post_order_node = Traits::from(stack.top().first);
+ visit = true;
+ } else {
+ visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
+ Traits::to(top.first));
+ current = Traits::to(top.first);
+ if (!GetVisited(&visited, current->id())) break;
+ }
+ top = stack.top();
+ visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
+ Traits::to(top.first));
+ ++stack.top().first;
+ }
+ }
+ }
+
+ template <class Visitor, class Traits>
+ static void Visit(GenericGraphBase* graph, typename Traits::Node* current,
+ Visitor* visitor) {
+ typename Traits::Node* array[] = {current};
+ Visit<Visitor, Traits>(graph, &array[0], &array[1], visitor);
+ }
+
+ template <class B, class S>
+ struct NullNodeVisitor {
+ Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
+ Control Post(GenericNode<B, S>* node) { return CONTINUE; }
+ void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+ void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+ };
+
+ private:
+ static bool IsSkip(Control c) { return c & SKIP; }
+ static bool IsReenter(Control c) { return c & REENTER; }
+
+ // TODO(turbofan): resizing could be optionally templatized away.
+ static void SetVisited(BoolVector* visited, int id, bool value) {
+ if (id >= static_cast<int>(visited->size())) {
+ // Resize and set all values to unvisited.
+ visited->resize((3 * id) / 2, false);
+ }
+ visited->at(id) = value;
+ }
+
+ static bool GetVisited(BoolVector* visited, int id) {
+ if (id >= static_cast<int>(visited->size())) return false;
+ return visited->at(id);
+ }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/deps/v8/src/compiler/generic-graph.h b/deps/v8/src/compiler/generic-graph.h
new file mode 100644
index 000000000..a55545654
--- /dev/null
+++ b/deps/v8/src/compiler/generic-graph.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_GRAPH_H_
+#define V8_COMPILER_GENERIC_GRAPH_H_
+
+#include "src/compiler/generic-node.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class GenericGraphBase : public ZoneObject {
+ public:
+ explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
+
+ Zone* zone() const { return zone_; }
+
+ NodeId NextNodeID() { return next_node_id_++; }
+ NodeId NodeCount() const { return next_node_id_; }
+
+ private:
+ Zone* zone_;
+ NodeId next_node_id_;
+};
+
+template <class V>
+class GenericGraph : public GenericGraphBase {
+ public:
+ explicit GenericGraph(Zone* zone)
+ : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
+
+ V* start() { return start_; }
+ V* end() { return end_; }
+
+ void SetStart(V* start) { start_ = start; }
+ void SetEnd(V* end) { end_ = end; }
+
+ private:
+ V* start_;
+ V* end_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericGraph);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_GRAPH_H_
diff --git a/deps/v8/src/compiler/generic-node-inl.h b/deps/v8/src/compiler/generic-node-inl.h
new file mode 100644
index 000000000..51d1a5016
--- /dev/null
+++ b/deps/v8/src/compiler/generic-node-inl.h
@@ -0,0 +1,245 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
+#define V8_COMPILER_GENERIC_NODE_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class B, class S>
+GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
+ : BaseClass(graph->zone()),
+ input_count_(input_count),
+ has_appendable_inputs_(false),
+ use_count_(0),
+ first_use_(NULL),
+ last_use_(NULL) {
+ inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph);
+}
+
+template <class B, class S>
+inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
+ id_ = graph->NextNodeID();
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::begin() {
+ return typename GenericNode<B, S>::Inputs::iterator(this->node_, 0);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::end() {
+ return typename GenericNode<B, S>::Inputs::iterator(
+ this->node_, this->node_->InputCount());
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::begin() {
+ return typename GenericNode<B, S>::Uses::iterator(this->node_);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::end() {
+ return typename GenericNode<B, S>::Uses::iterator();
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
+ for (Use* use = first_use_; use != NULL; use = use->next) {
+ use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+ }
+ if (replace_to->last_use_ == NULL) {
+ DCHECK_EQ(NULL, replace_to->first_use_);
+ replace_to->first_use_ = first_use_;
+ } else {
+ DCHECK_NE(NULL, replace_to->first_use_);
+ replace_to->last_use_->next = first_use_;
+ first_use_->prev = replace_to->last_use_;
+ }
+ replace_to->last_use_ = last_use_;
+ replace_to->use_count_ += use_count_;
+ use_count_ = 0;
+ first_use_ = NULL;
+ last_use_ = NULL;
+}
+
+template <class B, class S>
+template <class UnaryPredicate>
+void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
+ GenericNode* replace_to) {
+ for (Use* use = first_use_; use != NULL;) {
+ Use* next = use->next;
+ if (pred(static_cast<S*>(use->from))) {
+ RemoveUse(use);
+ replace_to->AppendUse(use);
+ use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+ }
+ use = next;
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveAllInputs() {
+ for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
+ ++iter) {
+ iter.GetInput()->Update(NULL);
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::TrimInputCount(int new_input_count) {
+ if (new_input_count == input_count_) return; // Nothing to do.
+
+ DCHECK(new_input_count < input_count_);
+
+ // Update inline inputs.
+ for (int i = new_input_count; i < input_count_; i++) {
+ typename GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
+ input->Update(NULL);
+ }
+ input_count_ = new_input_count;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
+ Input* input = GetInputRecordPtr(index);
+ input->Update(new_to);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
+ GenericNode* old_to = this->to;
+ if (new_to == old_to) return; // Nothing to do.
+ // Snip out the use from where it used to be
+ if (old_to != NULL) {
+ old_to->RemoveUse(use);
+ }
+ to = new_to;
+ // And put it into the new node's use list.
+ if (new_to != NULL) {
+ new_to->AppendUse(use);
+ } else {
+ use->next = NULL;
+ use->prev = NULL;
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
+ if (!has_appendable_inputs_) {
+ void* deque_buffer = zone->New(sizeof(InputDeque));
+ InputDeque* deque = new (deque_buffer) InputDeque(ZoneInputAllocator(zone));
+ for (int i = 0; i < input_count_; ++i) {
+ deque->push_back(inputs_.static_[i]);
+ }
+ inputs_.appendable_ = deque;
+ has_appendable_inputs_ = true;
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
+ EnsureAppendableInputs(zone);
+ Use* new_use = new (zone) Use;
+ Input new_input;
+ new_input.to = to_append;
+ new_input.use = new_use;
+ inputs_.appendable_->push_back(new_input);
+ new_use->input_index = input_count_;
+ new_use->from = this;
+ to_append->AppendUse(new_use);
+ input_count_++;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::InsertInput(Zone* zone, int index,
+ GenericNode<B, S>* to_insert) {
+ DCHECK(index >= 0 && index < InputCount());
+ // TODO(turbofan): Optimize this implementation!
+ AppendInput(zone, InputAt(InputCount() - 1));
+ for (int i = InputCount() - 1; i > index; --i) {
+ ReplaceInput(i, InputAt(i - 1));
+ }
+ ReplaceInput(index, to_insert);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendUse(Use* use) {
+ use->next = NULL;
+ use->prev = last_use_;
+ if (last_use_ == NULL) {
+ first_use_ = use;
+ } else {
+ last_use_->next = use;
+ }
+ last_use_ = use;
+ ++use_count_;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveUse(Use* use) {
+ if (last_use_ == use) {
+ last_use_ = use->prev;
+ }
+ if (use->prev != NULL) {
+ use->prev->next = use->next;
+ } else {
+ first_use_ = use->next;
+ }
+ if (use->next != NULL) {
+ use->next->prev = use->prev;
+ }
+ --use_count_;
+}
+
+template <class B, class S>
+inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
+ return first_use_ != NULL && first_use_->from == owner &&
+ first_use_->next == NULL;
+}
+
+template <class B, class S>
+S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
+ S** inputs) {
+ size_t node_size = sizeof(GenericNode);
+ size_t inputs_size = input_count * sizeof(Input);
+ size_t uses_size = input_count * sizeof(Use);
+ int size = static_cast<int>(node_size + inputs_size + uses_size);
+ Zone* zone = graph->zone();
+ void* buffer = zone->New(size);
+ S* result = new (buffer) S(graph, input_count);
+ Input* input =
+ reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+ Use* use =
+ reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+ for (int current = 0; current < input_count; ++current) {
+ GenericNode* to = *inputs++;
+ input->to = to;
+ input->use = use;
+ use->input_index = current;
+ use->from = result;
+ to->AppendUse(use);
+ ++use;
+ ++input;
+ }
+ return result;
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_NODE_INL_H_
diff --git a/deps/v8/src/compiler/generic-node.h b/deps/v8/src/compiler/generic-node.h
new file mode 100644
index 000000000..287d852f5
--- /dev/null
+++ b/deps/v8/src/compiler/generic-node.h
@@ -0,0 +1,271 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_H_
+#define V8_COMPILER_GENERIC_NODE_H_
+
+#include <deque>
+
+#include "src/v8.h"
+
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+class GenericGraphBase;
+
+typedef int NodeId;
+
+// A GenericNode<> is the basic primitive of graphs. GenericNode's are
+// chained together by input/use chains but by default otherwise contain only an
+// identifying number which specific applications of graphs and nodes can use
+// to index auxiliary out-of-line data, especially transient data.
+// Specializations of the templatized GenericNode<> class must provide a base
+// class B that contains all of the members to be made available in each
+// specialized Node instance. GenericNode uses a mixin template pattern to
+// ensure that common accessors and methods expect the derived class S type
+// rather than the GenericNode<B, S> type.
+template <class B, class S>
+class GenericNode : public B {
+ public:
+ typedef B BaseClass;
+ typedef S DerivedClass;
+
+ inline NodeId id() const { return id_; }
+
+ int InputCount() const { return input_count_; }
+ S* InputAt(int index) const {
+ return static_cast<S*>(GetInputRecordPtr(index)->to);
+ }
+ void ReplaceInput(int index, GenericNode* new_input);
+ void AppendInput(Zone* zone, GenericNode* new_input);
+ void InsertInput(Zone* zone, int index, GenericNode* new_input);
+
+ int UseCount() { return use_count_; }
+ S* UseAt(int index) {
+ DCHECK(index < use_count_);
+ Use* current = first_use_;
+ while (index-- != 0) {
+ current = current->next;
+ }
+ return static_cast<S*>(current->from);
+ }
+ inline void ReplaceUses(GenericNode* replace_to);
+ template <class UnaryPredicate>
+ inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
+ void RemoveAllInputs();
+
+ void TrimInputCount(int input_count);
+
+ class Inputs {
+ public:
+ class iterator;
+ iterator begin();
+ iterator end();
+
+ explicit Inputs(GenericNode* node) : node_(node) {}
+
+ private:
+ GenericNode* node_;
+ };
+
+ Inputs inputs() { return Inputs(this); }
+
+ class Uses {
+ public:
+ class iterator;
+ iterator begin();
+ iterator end();
+ bool empty() { return begin() == end(); }
+
+ explicit Uses(GenericNode* node) : node_(node) {}
+
+ private:
+ GenericNode* node_;
+ };
+
+ Uses uses() { return Uses(this); }
+
+ class Edge;
+
+ bool OwnedBy(GenericNode* owner) const;
+
+ static S* New(GenericGraphBase* graph, int input_count, S** inputs);
+
+ protected:
+ friend class GenericGraphBase;
+
+ class Use : public ZoneObject {
+ public:
+ GenericNode* from;
+ Use* next;
+ Use* prev;
+ int input_index;
+ };
+
+ class Input {
+ public:
+ GenericNode* to;
+ Use* use;
+
+ void Update(GenericNode* new_to);
+ };
+
+ void EnsureAppendableInputs(Zone* zone);
+
+ Input* GetInputRecordPtr(int index) const {
+ if (has_appendable_inputs_) {
+ return &((*inputs_.appendable_)[index]);
+ } else {
+ return inputs_.static_ + index;
+ }
+ }
+
+ void AppendUse(Use* use);
+ void RemoveUse(Use* use);
+
+ void* operator new(size_t, void* location) { return location; }
+
+ GenericNode(GenericGraphBase* graph, int input_count);
+
+ private:
+ void AssignUniqueID(GenericGraphBase* graph);
+
+ typedef zone_allocator<Input> ZoneInputAllocator;
+ typedef std::deque<Input, ZoneInputAllocator> InputDeque;
+
+ NodeId id_;
+ int input_count_ : 31;
+ bool has_appendable_inputs_ : 1;
+ union {
+ // When a node is initially allocated, it uses a static buffer to hold its
+ // inputs under the assumption that the number of outputs will not increase.
+ // When the first input is appended, the static buffer is converted into a
+ // deque to allow for space-efficient growing.
+ Input* static_;
+ InputDeque* appendable_;
+ } inputs_;
+ int use_count_;
+ Use* first_use_;
+ Use* last_use_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericNode);
+};
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the ndoe having the input.
+template <class B, class S>
+class GenericNode<B, S>::Edge {
+ public:
+ S* from() const { return static_cast<S*>(input_->use->from); }
+ S* to() const { return static_cast<S*>(input_->to); }
+ int index() const {
+ int index = input_->use->input_index;
+ DCHECK(index < input_->use->from->input_count_);
+ return index;
+ }
+
+ private:
+ friend class GenericNode<B, S>::Uses::iterator;
+ friend class GenericNode<B, S>::Inputs::iterator;
+
+ explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
+
+ typename GenericNode<B, S>::Input* input_;
+};
+
+// A forward iterator to visit the nodes which are depended upon by a node
+// in the order of input.
+template <class B, class S>
+class GenericNode<B, S>::Inputs::iterator {
+ public:
+ iterator(const typename GenericNode<B, S>::Inputs::iterator& other) // NOLINT
+ : node_(other.node_),
+ index_(other.index_) {}
+
+ S* operator*() { return static_cast<S*>(GetInput()->to); }
+ typename GenericNode<B, S>::Edge edge() {
+ return typename GenericNode::Edge(GetInput());
+ }
+ bool operator==(const iterator& other) const {
+ return other.index_ == index_ && other.node_ == node_;
+ }
+ bool operator!=(const iterator& other) const { return !(other == *this); }
+ iterator& operator++() {
+ DCHECK(node_ != NULL);
+ DCHECK(index_ < node_->input_count_);
+ ++index_;
+ return *this;
+ }
+ int index() { return index_; }
+
+ private:
+ friend class GenericNode;
+
+ explicit iterator(GenericNode* node, int index)
+ : node_(node), index_(index) {}
+
+ Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
+
+ GenericNode* node_;
+ int index_;
+};
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+template <class B, class S>
+class GenericNode<B, S>::Uses::iterator {
+ public:
+ iterator(const typename GenericNode<B, S>::Uses::iterator& other) // NOLINT
+ : current_(other.current_),
+ index_(other.index_) {}
+
+ S* operator*() { return static_cast<S*>(current_->from); }
+ typename GenericNode<B, S>::Edge edge() {
+ return typename GenericNode::Edge(CurrentInput());
+ }
+
+ bool operator==(const iterator& other) { return other.current_ == current_; }
+ bool operator!=(const iterator& other) { return other.current_ != current_; }
+ iterator& operator++() {
+ DCHECK(current_ != NULL);
+ index_++;
+ current_ = current_->next;
+ return *this;
+ }
+ iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+ DCHECK(current_ != NULL);
+ index_++;
+ typename GenericNode<B, S>::Input* input = CurrentInput();
+ current_ = current_->next;
+ input->Update(new_to);
+ return *this;
+ }
+ int index() const { return index_; }
+
+ private:
+ friend class GenericNode<B, S>::Uses;
+
+ iterator() : current_(NULL), index_(0) {}
+ explicit iterator(GenericNode<B, S>* node)
+ : current_(node->first_use_), index_(0) {}
+
+ Input* CurrentInput() const {
+ return current_->from->GetInputRecordPtr(current_->input_index);
+ }
+
+ typename GenericNode<B, S>::Use* current_;
+ int index_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_NODE_H_
diff --git a/deps/v8/src/compiler/graph-builder.cc b/deps/v8/src/compiler/graph-builder.cc
new file mode 100644
index 000000000..9c414f1bf
--- /dev/null
+++ b/deps/v8/src/compiler/graph-builder.cc
@@ -0,0 +1,241 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
+ CommonOperatorBuilder* common)
+ : GraphBuilder(graph),
+ common_(common),
+ environment_(NULL),
+ current_context_(NULL),
+ exit_control_(NULL) {}
+
+
+Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
+ Node** value_inputs) {
+ bool has_context = OperatorProperties::HasContextInput(op);
+ bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+ bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+ DCHECK(OperatorProperties::GetControlInputCount(op) < 2);
+ DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
+
+ Node* result = NULL;
+ if (!has_context && !has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_context) ++input_count_with_deps;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ void* raw_buffer = alloca(kPointerSize * input_count_with_deps);
+ Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_context) {
+ *current_input++ = current_context();
+ }
+ if (has_effect) {
+ *current_input++ = environment_->GetEffectDependency();
+ }
+ if (has_control) {
+ *current_input++ = environment_->GetControlDependency();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer);
+ if (has_effect) {
+ environment_->UpdateEffectDependency(result);
+ }
+ if (OperatorProperties::HasControlOutput(result->op()) &&
+ !environment()->IsMarkedAsUnreachable()) {
+ environment_->UpdateControlDependency(result);
+ }
+ }
+
+ return result;
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
+ Node* exit) {
+ if (environment()->IsMarkedAsUnreachable()) return;
+ if (exit_control() != NULL) {
+ exit = MergeControl(exit_control(), exit);
+ }
+ environment()->MarkAsUnreachable();
+ set_exit_control(exit);
+}
+
+
+StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
+ Environment* env) {
+ return new (zone()) Environment(*env);
+}
+
+
+StructuredGraphBuilder::Environment::Environment(
+ StructuredGraphBuilder* builder, Node* control_dependency)
+ : builder_(builder),
+ control_dependency_(control_dependency),
+ effect_dependency_(control_dependency),
+ values_(NodeVector::allocator_type(zone())) {}
+
+
+StructuredGraphBuilder::Environment::Environment(const Environment& copy)
+ : builder_(copy.builder()),
+ control_dependency_(copy.control_dependency_),
+ effect_dependency_(copy.effect_dependency_),
+ values_(copy.values_) {}
+
+
+void StructuredGraphBuilder::Environment::Merge(Environment* other) {
+ DCHECK(values_.size() == other->values_.size());
+
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) return;
+
+ // Resurrect a dead environment by copying the contents of the other one and
+ // placing a singleton merge as the new control dependency.
+ if (this->IsMarkedAsUnreachable()) {
+ Node* other_control = other->control_dependency_;
+ control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
+ effect_dependency_ = other->effect_dependency_;
+ values_ = other->values_;
+ return;
+ }
+
+ // Create a merge of the control dependencies of both environments and update
+ // the current environment's control dependency accordingly.
+ Node* control = builder_->MergeControl(this->GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+ other->GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge points,
+ // potentially extending an existing Phi node if possible.
+ for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+ values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
+ }
+}
+
+
+void StructuredGraphBuilder::Environment::PrepareForLoop() {
+ Node* control = GetControlDependency();
+ for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
+ Node* phi = builder_->NewPhi(1, values()->at(i), control);
+ values()->at(i) = phi;
+ }
+ Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+}
+
+
+Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+ Operator* phi_op = common()->Phi(count);
+ void* raw_buffer = alloca(kPointerSize * (count + 1));
+ Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
+ Node* control) {
+ Operator* phi_op = common()->EffectPhi(count);
+ void* raw_buffer = alloca(kPointerSize * (count + 1));
+ Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
+ int inputs = OperatorProperties::GetControlInputCount(control->op()) + 1;
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Control node for loop exists, add input.
+ Operator* op = common()->Loop(inputs);
+ control->AppendInput(zone(), other);
+ control->set_op(op);
+ } else if (control->opcode() == IrOpcode::kMerge) {
+ // Control node for merge exists, add input.
+ Operator* op = common()->Merge(inputs);
+ control->AppendInput(zone(), other);
+ control->set_op(op);
+ } else {
+ // Control node is a singleton, introduce a merge.
+ Operator* op = common()->Merge(inputs);
+ control = graph()->NewNode(op, control, other);
+ }
+ return control;
+}
+
+
+Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
+ Node* control) {
+ int inputs = OperatorProperties::GetControlInputCount(control->op());
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->set_op(common()->EffectPhi(inputs));
+ value->InsertInput(zone(), inputs - 1, other);
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
+ Node* control) {
+ int inputs = OperatorProperties::GetControlInputCount(control->op());
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->set_op(common()->Phi(inputs));
+ value->InsertInput(zone(), inputs - 1, other);
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* StructuredGraphBuilder::dead_control() {
+ if (!dead_control_.is_set()) {
+ Node* dead_node = graph()->NewNode(common_->Dead());
+ dead_control_.set(dead_node);
+ return dead_node;
+ }
+ return dead_control_.get();
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph-builder.h b/deps/v8/src/compiler/graph-builder.h
new file mode 100644
index 000000000..fc9000855
--- /dev/null
+++ b/deps/v8/src/compiler/graph-builder.h
@@ -0,0 +1,226 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_BUILDER_H_
+#define V8_COMPILER_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+
+// A common base class for anything that creates nodes in a graph.
+class GraphBuilder {
+ public:
+ explicit GraphBuilder(Graph* graph) : graph_(graph) {}
+ virtual ~GraphBuilder() {}
+
+ Node* NewNode(Operator* op) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL));
+ }
+
+ Node* NewNode(Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* buffer[] = {n1, n2, n3, n4, n5};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
+ Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+
+ Node* NewNode(Operator* op, int value_input_count, Node** value_inputs) {
+ return MakeNode(op, value_input_count, value_inputs);
+ }
+
+ Graph* graph() const { return graph_; }
+
+ protected:
+ // Base implementation used by all factory methods.
+ virtual Node* MakeNode(Operator* op, int value_input_count,
+ Node** value_inputs) = 0;
+
+ private:
+ Graph* graph_;
+};
+
+
+// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
+// base class for concrete implementations (e.g the AstGraphBuilder or the
+// StubGraphBuilder).
+class StructuredGraphBuilder : public GraphBuilder {
+ public:
+ StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
+ virtual ~StructuredGraphBuilder() {}
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
+ Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* value, Node* other, Node* control);
+ Node* MergeValue(Node* value, Node* other, Node* control);
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1)); }
+ Node* NewLoop() { return NewNode(common()->Loop(1)); }
+ Node* NewBranch(Node* condition) {
+ return NewNode(common()->Branch(), condition);
+ }
+
+ protected:
+ class Environment;
+ friend class ControlBuilder;
+
+ // The following method creates a new node having the specified operator and
+ // ensures effect and control dependencies are wired up. The dependencies
+ // tracked by the environment might be mutated.
+ virtual Node* MakeNode(Operator* op, int value_input_count,
+ Node** value_inputs);
+
+ Environment* environment() const { return environment_; }
+ void set_environment(Environment* env) { environment_ = env; }
+
+ Node* current_context() const { return current_context_; }
+ void set_current_context(Node* context) { current_context_ = context; }
+
+ Node* exit_control() const { return exit_control_; }
+ void set_exit_control(Node* node) { exit_control_ = node; }
+
+ Node* dead_control();
+
+ // TODO(mstarzinger): Use phase-local zone instead!
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ CommonOperatorBuilder* common() const { return common_; }
+
+ // Helper to wrap a Handle<T> into a Unique<T>.
+ template <class T>
+ PrintableUnique<T> MakeUnique(Handle<T> object) {
+ return PrintableUnique<T>::CreateUninitialized(zone(), object);
+ }
+
+ // Support for control flow builders. The concrete type of the environment
+ // depends on the graph builder, but environments themselves are not virtual.
+ virtual Environment* CopyEnvironment(Environment* env);
+
+ // Helper to indicate a node exits the function body.
+ void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ private:
+ CommonOperatorBuilder* common_;
+ Environment* environment_;
+
+ // Node representing the control dependency for dead code.
+ SetOncePointer<Node> dead_control_;
+
+ // Node representing the current context within the function body.
+ Node* current_context_;
+
+ // Merge of all control nodes that exit the function body.
+ Node* exit_control_;
+
+ DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
+};
+
+
+// The abstract execution environment contains static knowledge about
+// execution state at arbitrary control-flow points. It allows for
+// simulation of the control-flow at compile time.
+class StructuredGraphBuilder::Environment : public ZoneObject {
+ public:
+ Environment(StructuredGraphBuilder* builder, Node* control_dependency);
+ Environment(const Environment& copy);
+
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
+ }
+
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
+ }
+
+ // Mark this environment as being unreachable.
+ void MarkAsUnreachable() {
+ UpdateControlDependency(builder()->dead_control());
+ }
+ bool IsMarkedAsUnreachable() {
+ return GetControlDependency()->opcode() == IrOpcode::kDead;
+ }
+
+ // Merge another environment into this one.
+ void Merge(Environment* other);
+
+ // Copies this environment at a control-flow split point.
+ Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
+
+ // Copies this environment to a potentially unreachable control-flow point.
+ Environment* CopyAsUnreachable() {
+ Environment* env = builder()->CopyEnvironment(this);
+ env->MarkAsUnreachable();
+ return env;
+ }
+
+ // Copies this environment at a loop header control-flow point.
+ Environment* CopyForLoop() {
+ PrepareForLoop();
+ return builder()->CopyEnvironment(this);
+ }
+
+ protected:
+ // TODO(mstarzinger): Use phase-local zone instead!
+ Zone* zone() const { return graph()->zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ StructuredGraphBuilder* builder() const { return builder_; }
+ CommonOperatorBuilder* common() { return builder_->common(); }
+ NodeVector* values() { return &values_; }
+
+ // Prepare environment to be used as loop header.
+ void PrepareForLoop();
+
+ private:
+ StructuredGraphBuilder* builder_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
+ NodeVector values_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/deps/v8/src/compiler/graph-inl.h b/deps/v8/src/compiler/graph-inl.h
new file mode 100644
index 000000000..f8423c3f8
--- /dev/null
+++ b/deps/v8/src/compiler/graph-inl.h
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_INL_H_
+#define V8_COMPILER_GRAPH_INL_H_
+
+#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class Visitor>
+void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
+ GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(this, node,
+ visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
+ VisitNodeUsesFrom(start(), visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
+ GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
+ this, end(), visitor);
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_INL_H_
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
new file mode 100644
index 000000000..f062d4bea
--- /dev/null
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -0,0 +1,94 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-reducer.h"
+
+#include <functional>
+
+#include "src/compiler/graph-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphReducer::GraphReducer(Graph* graph)
+ : graph_(graph), reducers_(Reducers::allocator_type(graph->zone())) {}
+
+
+static bool NodeIdIsLessThan(const Node* node, NodeId id) {
+ return node->id() < id;
+}
+
+
+void GraphReducer::ReduceNode(Node* node) {
+ Reducers::iterator skip = reducers_.end();
+ static const unsigned kMaxAttempts = 16;
+ bool reduce = true;
+ for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
+ if (!reduce) return;
+ reduce = false; // Assume we don't need to rerun any reducers.
+ int before = graph_->NodeCount();
+ for (Reducers::iterator i = reducers_.begin(); i != reducers_.end(); ++i) {
+ if (i == skip) continue; // Skip this reducer.
+ Reduction reduction = (*i)->Reduce(node);
+ Node* replacement = reduction.replacement();
+ if (replacement == NULL) {
+ // No change from this reducer.
+ } else if (replacement == node) {
+ // {replacement == node} represents an in-place reduction.
+ // Rerun all the reducers except the current one for this node,
+ // as now there may be more opportunities for reduction.
+ reduce = true;
+ skip = i;
+ break;
+ } else {
+ if (node == graph_->start()) graph_->SetStart(replacement);
+ if (node == graph_->end()) graph_->SetEnd(replacement);
+ // If {node} was replaced by an old node, unlink {node} and assume that
+ // {replacement} was already reduced and finish.
+ if (replacement->id() < before) {
+ node->RemoveAllInputs();
+ node->ReplaceUses(replacement);
+ return;
+ }
+ // Otherwise, {node} was replaced by a new node. Replace all old uses of
+ // {node} with {replacement}. New nodes created by this reduction can
+ // use {node}.
+ node->ReplaceUsesIf(
+ std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
+ // Unlink {node} if it's no longer used.
+ if (node->uses().empty()) node->RemoveAllInputs();
+ // Rerun all the reductions on the {replacement}.
+ skip = reducers_.end();
+ node = replacement;
+ reduce = true;
+ break;
+ }
+ }
+ }
+}
+
+
+// A helper class to reuse the node traversal algorithm.
+struct GraphReducerVisitor V8_FINAL : public NullNodeVisitor {
+ explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
+ GenericGraphVisit::Control Post(Node* node) {
+ reducer_->ReduceNode(node);
+ return GenericGraphVisit::CONTINUE;
+ }
+ GraphReducer* reducer_;
+};
+
+
+void GraphReducer::ReduceGraph() {
+ GraphReducerVisitor visitor(this);
+ // Perform a post-order reduction of all nodes starting from the end.
+ graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(titzer): partial graph reductions.
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
new file mode 100644
index 000000000..33cded65a
--- /dev/null
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REDUCER_H_
+#define V8_COMPILER_GRAPH_REDUCER_H_
+
+#include <list>
+
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// Represents the result of trying to reduce a node in the graph.
+class Reduction V8_FINAL {
+ public:
+ explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
+
+ Node* replacement() const { return replacement_; }
+ bool Changed() const { return replacement() != NULL; }
+
+ private:
+ Node* replacement_;
+};
+
+
+// A reducer can reduce or simplify a given node based on its operator and
+// inputs. This class functions as an extension point for the graph reducer for
+// language-specific reductions (e.g. reduction based on types or constant
+// folding of low-level operators) can be integrated into the graph reduction
+// phase.
+class Reducer {
+ public:
+ virtual ~Reducer() {}
+
+ // Try to reduce a node if possible.
+ virtual Reduction Reduce(Node* node) = 0;
+
+ // Helper functions for subclasses to produce reductions for a node.
+ static Reduction NoChange() { return Reduction(); }
+ static Reduction Replace(Node* node) { return Reduction(node); }
+ static Reduction Changed(Node* node) { return Reduction(node); }
+};
+
+
+// Performs an iterative reduction of a node graph.
+class GraphReducer V8_FINAL {
+ public:
+ explicit GraphReducer(Graph* graph);
+
+ Graph* graph() const { return graph_; }
+
+ void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+
+ // Reduce a single node.
+ void ReduceNode(Node* node);
+ // Reduce the whole graph.
+ void ReduceGraph();
+
+ private:
+ typedef std::list<Reducer*, zone_allocator<Reducer*> > Reducers;
+
+ Graph* graph_;
+ Reducers reducers_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_REDUCER_H_
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
new file mode 100644
index 000000000..efb1180a7
--- /dev/null
+++ b/deps/v8/src/compiler/graph-replay.cc
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-replay.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef DEBUG
+
+void GraphReplayPrinter::PrintReplay(Graph* graph) {
+ GraphReplayPrinter replay;
+ PrintF(" Node* nil = graph.NewNode(common_builder.Dead());\n");
+ graph->VisitNodeInputsFromEnd(&replay);
+}
+
+
+GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+ PrintReplayOpCreator(node->op());
+ PrintF(" Node* n%d = graph.NewNode(op", node->id());
+ for (int i = 0; i < node->InputCount(); ++i) {
+ PrintF(", nil");
+ }
+ PrintF("); USE(n%d);\n", node->id());
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
+ PrintF(" n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+}
+
+
+void GraphReplayPrinter::PrintReplayOpCreator(Operator* op) {
+ IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+ const char* builder =
+ IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
+ const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
+ ? IrOpcode::Mnemonic(opcode)
+ : IrOpcode::Mnemonic(opcode) + 2;
+ PrintF(" op = %s.%s(", builder, mnemonic);
+ switch (opcode) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kNumberConstant:
+ PrintF("0");
+ break;
+ case IrOpcode::kLoad:
+ PrintF("unique_name");
+ break;
+ case IrOpcode::kHeapConstant:
+ PrintF("unique_constant");
+ break;
+ case IrOpcode::kPhi:
+ PrintF("%d", op->InputCount());
+ break;
+ case IrOpcode::kEffectPhi:
+ PrintF("%d", OperatorProperties::GetEffectInputCount(op));
+ break;
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ PrintF("%d", OperatorProperties::GetControlInputCount(op));
+ break;
+ default:
+ break;
+ }
+ PrintF(");\n");
+}
+
+#endif // DEBUG
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph-replay.h b/deps/v8/src/compiler/graph-replay.h
new file mode 100644
index 000000000..cc186d77c
--- /dev/null
+++ b/deps/v8/src/compiler/graph-replay.h
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REPLAY_H_
+#define V8_COMPILER_GRAPH_REPLAY_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Graph;
+class Operator;
+
+// Helper class to print a full replay of a graph. This replay can be used to
+// materialize the same graph within a C++ unit test and hence test subsequent
+// optimization passes on a graph without going through the construction steps.
+class GraphReplayPrinter : public NullNodeVisitor {
+ public:
+#ifdef DEBUG
+ static void PrintReplay(Graph* graph);
+#else
+ static void PrintReplay(Graph* graph) {}
+#endif
+
+ GenericGraphVisit::Control Pre(Node* node);
+ void PostEdge(Node* from, int index, Node* to);
+
+ private:
+ GraphReplayPrinter() {}
+
+ static void PrintReplayOpCreator(Operator* op);
+
+ DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_REPLAY_H_
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
new file mode 100644
index 000000000..144512ad0
--- /dev/null
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -0,0 +1,265 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-visualizer.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define DEAD_COLOR "#999999"
+
+class GraphVisualizer : public NullNodeVisitor {
+ public:
+ GraphVisualizer(OStream& os, const Graph* graph); // NOLINT
+
+ void Print();
+
+ GenericGraphVisit::Control Pre(Node* node);
+ GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+
+ private:
+ void AnnotateNode(Node* node);
+ void PrintEdge(Node* from, int index, Node* to);
+
+ NodeSet all_nodes_;
+ NodeSet white_nodes_;
+ bool use_to_def_;
+ OStream& os_;
+ const Graph* const graph_;
+
+ DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
+};
+
+
+static Node* GetControlCluster(Node* node) {
+ if (OperatorProperties::IsBasicBlockBegin(node->op())) {
+ return node;
+ } else if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ return OperatorProperties::IsBasicBlockBegin(control->op()) ? control
+ : NULL;
+ } else {
+ return NULL;
+ }
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
+ if (all_nodes_.count(node) == 0) {
+ Node* control_cluster = GetControlCluster(node);
+ if (control_cluster != NULL) {
+ os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
+ }
+ os_ << " ID" << node->id() << " [\n";
+ AnnotateNode(node);
+ os_ << " ]\n";
+ if (control_cluster != NULL) os_ << " }\n";
+ all_nodes_.insert(node);
+ if (use_to_def_) white_nodes_.insert(node);
+ }
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
+ Node* to) {
+ if (use_to_def_) return GenericGraphVisit::CONTINUE;
+ // When going from def to use, only consider white -> other edges, which are
+ // the dead nodes that use live nodes. We're probably not interested in
+ // dead nodes that only use other dead nodes.
+ if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
+ return GenericGraphVisit::SKIP;
+}
+
+
+class Escaped {
+ public:
+ explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
+
+ friend OStream& operator<<(OStream& os, const Escaped& e) {
+ for (const char* s = e.str_; *s != '\0'; ++s) {
+ if (needs_escape(*s)) os << "\\";
+ os << *s;
+ }
+ return os;
+ }
+
+ private:
+ static bool needs_escape(char ch) {
+ switch (ch) {
+ case '>':
+ case '<':
+ case '|':
+ case '}':
+ case '{':
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ const char* const str_;
+};
+
+
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+ if (from->opcode() == IrOpcode::kPhi ||
+ from->opcode() == IrOpcode::kEffectPhi) {
+ Node* control = NodeProperties::GetControlInput(from, 0);
+ return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
+ } else if (from->opcode() == IrOpcode::kLoop) {
+ return index != 0;
+ } else {
+ return false;
+ }
+}
+
+
+void GraphVisualizer::AnnotateNode(Node* node) {
+ if (!use_to_def_) {
+ os_ << " style=\"filled\"\n"
+ << " fillcolor=\"" DEAD_COLOR "\"\n";
+ }
+
+ os_ << " shape=\"record\"\n";
+ switch (node->opcode()) {
+ case IrOpcode::kEnd:
+ case IrOpcode::kDead:
+ case IrOpcode::kStart:
+ os_ << " style=\"diagonals\"\n";
+ break;
+ case IrOpcode::kMerge:
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kLoop:
+ os_ << " style=\"rounded\"\n";
+ break;
+ default:
+ break;
+ }
+
+ OStringStream label;
+ label << *node->op();
+ os_ << " label=\"{{#" << node->id() << ":" << Escaped(label);
+
+ InputIter i = node->inputs().begin();
+ for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+ ++i, j--) {
+ os_ << "|<I" << i.index() << ">#" << (*i)->id();
+ }
+ for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+ ++i, j--) {
+ os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+ }
+ for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+ ++i, j--) {
+ os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+ }
+
+ if (!use_to_def_ || OperatorProperties::IsBasicBlockBegin(node->op()) ||
+ GetControlCluster(node) == NULL) {
+ for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+ ++i, j--) {
+ os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+ }
+ }
+ os_ << "}";
+
+ if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
+ Bounds bounds = NodeProperties::GetBounds(node);
+ OStringStream upper;
+ bounds.upper->PrintTo(upper);
+ OStringStream lower;
+ bounds.lower->PrintTo(lower);
+ os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
+ }
+ os_ << "}\"\n";
+}
+
+
+void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) {
+ bool unconstrained = IsLikelyBackEdge(from, index, to);
+ os_ << " ID" << from->id();
+ if (all_nodes_.count(to) == 0) {
+ os_ << ":I" << index << ":n -> DEAD_INPUT";
+ } else if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
+ GetControlCluster(from) == NULL ||
+ (OperatorProperties::GetControlInputCount(from->op()) > 0 &&
+ NodeProperties::GetControlInput(from) != to)) {
+ os_ << ":I" << index << ":n -> ID" << to->id() << ":s";
+ if (unconstrained) os_ << " [constraint=false,style=dotted]";
+ } else {
+ os_ << " -> ID" << to->id() << ":s [color=transparent"
+ << (unconstrained ? ", constraint=false" : "") << "]";
+ }
+ os_ << "\n";
+}
+
+
+void GraphVisualizer::Print() {
+ os_ << "digraph D {\n"
+ << " node [fontsize=8,height=0.25]\n"
+ << " rankdir=\"BT\"\n"
+ << " \n";
+
+ // Make sure all nodes have been output before writing out the edges.
+ use_to_def_ = true;
+ // TODO(svenpanne) Remove the need for the const_casts.
+ const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
+ white_nodes_.insert(const_cast<Graph*>(graph_)->start());
+
+ // Visit all uses of white nodes.
+ use_to_def_ = false;
+ GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
+ const_cast<Graph*>(graph_), white_nodes_.begin(), white_nodes_.end(),
+ this);
+
+ os_ << " DEAD_INPUT [\n"
+ << " style=\"filled\" \n"
+ << " fillcolor=\"" DEAD_COLOR "\"\n"
+ << " ]\n"
+ << "\n";
+
+ // With all the nodes written, add the edges.
+ for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
+ Node::Inputs inputs = (*i)->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter) {
+ PrintEdge(iter.edge().from(), iter.edge().index(), iter.edge().to());
+ }
+ }
+ os_ << "}\n";
+}
+
+
+GraphVisualizer::GraphVisualizer(OStream& os, const Graph* graph) // NOLINT
+ : all_nodes_(NodeSet::key_compare(),
+ NodeSet::allocator_type(graph->zone())),
+ white_nodes_(NodeSet::key_compare(),
+ NodeSet::allocator_type(graph->zone())),
+ use_to_def_(true),
+ os_(os),
+ graph_(graph) {}
+
+
+OStream& operator<<(OStream& os, const AsDOT& ad) {
+ GraphVisualizer(os, &ad.graph).Print();
+ return os;
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
new file mode 100644
index 000000000..12532bacf
--- /dev/null
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
+#define V8_COMPILER_GRAPH_VISUALIZER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class Graph;
+
+struct AsDOT {
+ explicit AsDOT(const Graph& g) : graph(g) {}
+ const Graph& graph;
+};
+
+OStream& operator<<(OStream& os, const AsDOT& ad);
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_VISUALIZER_H_
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
new file mode 100644
index 000000000..3f47eace8
--- /dev/null
+++ b/deps/v8/src/compiler/graph.cc
@@ -0,0 +1,54 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph::Graph(Zone* zone)
+ : GenericGraph<Node>(zone),
+ decorators_(DecoratorVector::allocator_type(zone)) {}
+
+
+Node* Graph::NewNode(Operator* op, int input_count, Node** inputs) {
+ DCHECK(op->InputCount() <= input_count);
+ Node* result = Node::New(this, input_count, inputs);
+ result->Initialize(op);
+ for (DecoratorVector::iterator i = decorators_.begin();
+ i != decorators_.end(); ++i) {
+ (*i)->Decorate(result);
+ }
+ return result;
+}
+
+
+void Graph::ChangeOperator(Node* node, Operator* op) { node->set_op(op); }
+
+
+void Graph::DeleteNode(Node* node) {
+#if DEBUG
+ // Nodes can't be deleted if they have uses.
+ Node::Uses::iterator use_iterator(node->uses().begin());
+ DCHECK(use_iterator == node->uses().end());
+#endif
+
+#if DEBUG
+ memset(node, 0xDE, sizeof(Node));
+#endif
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
new file mode 100644
index 000000000..65ea3b30a
--- /dev/null
+++ b/deps/v8/src/compiler/graph.h
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_H_
+#define V8_COMPILER_GRAPH_H_
+
+#include <map>
+#include <set>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/source-position.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphDecorator;
+
+
+class Graph : public GenericGraph<Node> {
+ public:
+ explicit Graph(Zone* zone);
+
+ // Base implementation used by all factory methods.
+ Node* NewNode(Operator* op, int input_count, Node** inputs);
+
+ // Factories for nodes with static input counts.
+ Node* NewNode(Operator* op) {
+ return NewNode(op, 0, static_cast<Node**>(NULL));
+ }
+ Node* NewNode(Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
+ Node* NewNode(Operator* op, Node* n1, Node* n2) {
+ Node* nodes[] = {n1, n2};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* nodes[] = {n1, n2, n3};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* nodes[] = {n1, n2, n3, n4};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* nodes[] = {n1, n2, n3, n4, n5};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, Node* n5,
+ Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+
+ void ChangeOperator(Node* node, Operator* op);
+ void DeleteNode(Node* node);
+
+ template <class Visitor>
+ void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+
+ template <class Visitor>
+ void VisitNodeUsesFromStart(Visitor* visitor);
+
+ template <class Visitor>
+ void VisitNodeInputsFromEnd(Visitor* visitor);
+
+ void AddDecorator(GraphDecorator* decorator) {
+ decorators_.push_back(decorator);
+ }
+
+ void RemoveDecorator(GraphDecorator* decorator) {
+ DecoratorVector::iterator it =
+ std::find(decorators_.begin(), decorators_.end(), decorator);
+ DCHECK(it != decorators_.end());
+ decorators_.erase(it, it + 1);
+ }
+
+ private:
+ typedef std::vector<GraphDecorator*, zone_allocator<GraphDecorator*> >
+ DecoratorVector;
+ DecoratorVector decorators_;
+};
+
+
+class GraphDecorator : public ZoneObject {
+ public:
+ virtual ~GraphDecorator() {}
+ virtual void Decorate(Node* node) = 0;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_H_
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
new file mode 100644
index 000000000..31a01798a
--- /dev/null
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -0,0 +1,956 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds IA-32 specific methods for decoding operands.
+class IA32OperandConverter : public InstructionOperandConverter {
+ public:
+ IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+ Immediate InputImmediate(int index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+ Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
+
+ Operand ToOperand(InstructionOperand* op, int extra = 0) {
+ if (op->IsRegister()) {
+ DCHECK(extra == 0);
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ DCHECK(extra == 0);
+ return Operand(ToDoubleRegister(op));
+ }
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
+ Operand HighOperand(InstructionOperand* op) {
+ DCHECK(op->IsDoubleStackSlot());
+ return ToOperand(op, kPointerSize);
+ }
+
+ Immediate ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Immediate(constant.ToInt32());
+ case Constant::kFloat64:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kExternalReference:
+ return Immediate(constant.ToExternalReference());
+ case Constant::kHeapObject:
+ return Immediate(constant.ToHeapObject());
+ case Constant::kInt64:
+ break;
+ }
+ UNREACHABLE();
+ return Immediate(-1);
+ }
+
+ Operand MemoryOperand(int* first_input) {
+ const int offset = *first_input;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_MR1I:
+ *first_input += 2;
+ return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
+ times_1,
+ 0); // TODO(dcarney): K != 0
+ case kMode_MRI:
+ *first_input += 2;
+ return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
+ InputImmediate(offset + 1));
+ case kMode_MI:
+ *first_input += 1;
+ return Operand(InputImmediate(offset + 0));
+ default:
+ UNREACHABLE();
+ return Operand(no_reg);
+ }
+ }
+
+ Operand MemoryOperand() {
+ int first_input = 0;
+ return MemoryOperand(&first_input);
+ }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsImmediate();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchJmp:
+ __ jmp(code()->GetLabel(i.InputBlock(0)));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ break;
+ }
+ case kIA32Add:
+ if (HasImmediateInput(instr, 1)) {
+ __ add(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ add(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32And:
+ if (HasImmediateInput(instr, 1)) {
+ __ and_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ and_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Cmp:
+ if (HasImmediateInput(instr, 1)) {
+ __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ cmp(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Test:
+ if (HasImmediateInput(instr, 1)) {
+ __ test(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ test(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Imul:
+ if (HasImmediateInput(instr, 1)) {
+ __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+ } else {
+ __ imul(i.OutputRegister(), i.InputOperand(1));
+ }
+ break;
+ case kIA32Idiv:
+ __ cdq();
+ __ idiv(i.InputOperand(1));
+ break;
+ case kIA32Udiv:
+ __ xor_(edx, edx);
+ __ div(i.InputOperand(1));
+ break;
+ case kIA32Not:
+ __ not_(i.OutputOperand());
+ break;
+ case kIA32Neg:
+ __ neg(i.OutputOperand());
+ break;
+ case kIA32Or:
+ if (HasImmediateInput(instr, 1)) {
+ __ or_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ or_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Xor:
+ if (HasImmediateInput(instr, 1)) {
+ __ xor_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ xor_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Sub:
+ if (HasImmediateInput(instr, 1)) {
+ __ sub(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ sub(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Shl:
+ if (HasImmediateInput(instr, 1)) {
+ __ shl(i.OutputRegister(), i.InputInt5(1));
+ } else {
+ __ shl_cl(i.OutputRegister());
+ }
+ break;
+ case kIA32Shr:
+ if (HasImmediateInput(instr, 1)) {
+ __ shr(i.OutputRegister(), i.InputInt5(1));
+ } else {
+ __ shr_cl(i.OutputRegister());
+ }
+ break;
+ case kIA32Sar:
+ if (HasImmediateInput(instr, 1)) {
+ __ sar(i.OutputRegister(), i.InputInt5(1));
+ } else {
+ __ sar_cl(i.OutputRegister());
+ }
+ break;
+ case kIA32Push:
+ if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ } else {
+ __ push(i.InputOperand(0));
+ }
+ break;
+ case kIA32CallCodeObject: {
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ call(Operand(reg, entry));
+ }
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ AddNopForSmiCodeInlining();
+ break;
+ }
+ case kIA32CallAddress:
+ if (HasImmediateInput(instr, 0)) {
+ // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
+ __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
+ RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ call(i.InputRegister(0));
+ }
+ break;
+ case kPopStack: {
+ int words = MiscField::decode(instr->opcode());
+ __ add(esp, Immediate(kPointerSize * words));
+ break;
+ }
+ case kIA32CallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the call.
+ __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ break;
+ }
+ case kSSEFloat64Cmp:
+ __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat64Add:
+ __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Sub:
+ __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Mul:
+ __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Div:
+ __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Mod: {
+ // TODO(dcarney): alignment is wrong.
+ __ sub(esp, Immediate(kDoubleSize));
+ // Move values to st(0) and st(1).
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ __ fld_d(Operand(esp, 0));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ __ fld_d(Operand(esp, 0));
+ // Loop while fprem isn't done.
+ Label mod_loop;
+ __ bind(&mod_loop);
+ // This instructions traps on all kinds inputs, but we are assuming the
+ // floating point control word is set to ignore them all.
+ __ fprem();
+ // The following 2 instruction implicitly use eax.
+ __ fnstsw_ax();
+ __ sahf();
+ __ j(parity_even, &mod_loop);
+ // Move output to stack and clean up.
+ __ fstp(1);
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ break;
+ }
+ case kSSEFloat64ToInt32:
+ __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kSSEFloat64ToUint32: {
+ XMMRegister scratch = xmm0;
+ __ Move(scratch, -2147483648.0);
+ // TODO(turbofan): IA32 SSE subsd() should take an operand.
+ __ addsd(scratch, i.InputDoubleRegister(0));
+ __ cvttsd2si(i.OutputRegister(), scratch);
+ __ add(i.OutputRegister(), Immediate(0x80000000));
+ break;
+ }
+ case kSSEInt32ToFloat64:
+ __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
+ case kSSEUint32ToFloat64:
+ // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
+ __ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kSSELoad:
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kSSEStore: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ break;
+ }
+ case kIA32LoadWord8:
+ __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kIA32StoreWord8: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_b(operand, i.InputRegister(index));
+ break;
+ }
+ case kIA32StoreWord8I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_b(operand, i.InputInt8(index));
+ break;
+ }
+ case kIA32LoadWord16:
+ __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kIA32StoreWord16: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_w(operand, i.InputRegister(index));
+ break;
+ }
+ case kIA32StoreWord16I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_w(operand, i.InputInt16(index));
+ break;
+ }
+ case kIA32LoadWord32:
+ __ mov(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kIA32StoreWord32: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov(operand, i.InputRegister(index));
+ break;
+ }
+ case kIA32StoreWord32I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov(operand, i.InputImmediate(index));
+ break;
+ }
+ case kIA32StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ mov(Operand(object, index, times_1, 0), value);
+ __ lea(index, Operand(object, index, times_1, 0));
+ SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ __ RecordWrite(object, index, value, mode);
+ break;
+ }
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ IA32OperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kEqual:
+ __ j(equal, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ j(not_equal, tlabel);
+ break;
+ case kSignedLessThan:
+ __ j(less, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ j(greater_equal, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ j(less_equal, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ j(greater, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ j(below, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ j(above_equal, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ j(below_equal, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ j(above, tlabel);
+ break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
+ }
+ if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
+ __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ IA32OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = no_condition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kEqual:
+ cc = equal;
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kNotEqual:
+ cc = not_equal;
+ break;
+ case kSignedLessThan:
+ cc = less;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = greater_equal;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = less_equal;
+ break;
+ case kSignedGreaterThan:
+ cc = greater;
+ break;
+ case kUnorderedLessThan:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = below;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = above_equal;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = below_equal;
+ break;
+ case kUnorderedGreaterThan:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = above;
+ break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
+ }
+ __ bind(&check);
+ if (reg.is_byte_register()) {
+ // setcc for byte registers (al, bl, cl, dl).
+ __ setcc(cc, reg);
+ __ movzx_b(reg, reg);
+ } else {
+ // Emit a branch to set a register to either 1 or 0.
+ Label set;
+ __ j(cc, &set, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&set);
+ __ mov(reg, Immediate(1));
+ }
+ __ bind(&done);
+}
+
+
+// The calling convention for JSFunctions on IA32 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ push arguments and setup ESI, EDI }--------------------------------------
+// | args + receiver | caller frame |
+// ^ esp ^ ebp
+// [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------
+// | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ ebp,esp
+
+// --{ push esi }---------------------------------------------------------------
+// | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// --{ push edi }---------------------------------------------------------------
+// | FNC | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ mov esp, ebp }-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// | | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// --{ ret #A+1 }-----------------------------------------------------------
+// | | caller frame |
+// ^ esp ^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On IA32 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
+// | args + receiver | caller frame |
+// ^ esp ^ ebp
+// [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub }-------------------------------------------------------
+// | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// =={ body of runtime function }===============================================
+
+// --{ runtime returns }--------------------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++) may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ set up arguments in registers on stack }---------------------------------
+// | args | caller frame |
+// ^ esp ^ ebp
+// [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code }--------------------------------------------------------------
+// | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ ebp,esp
+
+// --{ save registers }---------------------------------------------------------
+// | regs | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | regs | FP | RET | args | caller frame |
+// ^esp ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ restore registers }------------------------------------------------------
+// | regs | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov esp, ebp }-----------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ esp,ebp
+
+// --{ pop ebp }----------------------------------------------------------------
+// | RET | args | caller frame |
+// ^ esp ^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ Frame* frame = code_->frame();
+ int stack_slots = frame->GetSpillSlotCount();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ // Assemble a prologue similar the to cdecl calling convention.
+ __ push(ebp);
+ __ mov(ebp, esp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ register_save_area_size += kPointerSize;
+ }
+ frame->SetRegisterSaveAreaSize(register_save_area_size);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
+ __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
+ __ bind(&ok);
+ }
+
+ } else {
+ __ StubPrologue();
+ frame->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ if (stack_slots > 0) {
+ __ sub(esp, Immediate(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ add(esp, Immediate(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
+ }
+ }
+ __ pop(ebp); // Pop caller's frame pointer.
+ __ ret(0);
+ } else {
+ // No saved registers.
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ __ ret(0);
+ }
+ } else {
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ ret(pop_count * kPointerSize);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ IA32OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, src);
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ Operand dst = g.ToOperand(destination);
+ __ push(src);
+ __ pop(dst);
+ }
+ } else if (source->IsConstant()) {
+ Constant src_constant = g.ToConstant(source);
+ if (src_constant.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src = src_constant.ToHeapObject();
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ LoadHeapObject(dst, src);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ AllowDeferredHandleDereference embedding_raw_address;
+ if (isolate()->heap()->InNewSpace(*src)) {
+ __ PushHeapObject(src);
+ __ pop(dst);
+ } else {
+ __ mov(dst, src);
+ }
+ }
+ } else if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, g.ToImmediate(source));
+ } else if (destination->IsStackSlot()) {
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, g.ToImmediate(source));
+ } else {
+ double v = g.ToDouble(source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, v);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst0 = g.ToOperand(destination);
+ Operand dst1 = g.HighOperand(destination);
+ __ mov(dst0, Immediate(lower));
+ __ mov(dst1, Immediate(upper));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ XMMRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ movsd(dst, src);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movsd(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = g.ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ IA32OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ xchg(dst, src);
+ } else if (source->IsRegister() && destination->IsStackSlot()) {
+ // Register-memory.
+ __ xchg(g.ToRegister(source), g.ToOperand(destination));
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory.
+ Operand src = g.ToOperand(source);
+ Operand dst = g.ToOperand(destination);
+ __ push(dst);
+ __ push(src);
+ __ pop(dst);
+ __ pop(src);
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // XMM register-register swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movaps(xmm0, src);
+ __ movaps(src, dst);
+ __ movaps(dst, xmm0);
+ } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
+ // XMM register-memory swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister reg = g.ToDoubleRegister(source);
+ Operand other = g.ToOperand(destination);
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movaps(reg, xmm0);
+ } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ // Double-width memory-to-memory.
+ Operand src0 = g.ToOperand(source);
+ Operand src1 = g.HighOperand(source);
+ Operand dst0 = g.ToOperand(destination);
+ Operand dst1 = g.HighOperand(destination);
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
+ __ push(src0); // Then use stack to copy source to destination.
+ __ pop(dst0);
+ __ push(src1);
+ __ pop(dst1);
+ __ movsd(src0, xmm0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+#undef __
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc) {
+ if (start_pc + 1 != end_pc) {
+ return false;
+ }
+ return *(code->instruction_start() + start_pc) ==
+ v8::internal::Assembler::kNopByte;
+}
+
+#endif // DEBUG
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
new file mode 100644
index 000000000..f175ebb55
--- /dev/null
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -0,0 +1,88 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// IA32-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Test) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32Push) \
+ V(IA32CallCodeObject) \
+ V(IA32CallAddress) \
+ V(PopStack) \
+ V(IA32CallJSFunction) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEInt32ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSELoad) \
+ V(SSEStore) \
+ V(IA32LoadWord8) \
+ V(IA32StoreWord8) \
+ V(IA32StoreWord8I) \
+ V(IA32LoadWord16) \
+ V(IA32StoreWord16) \
+ V(IA32StoreWord16I) \
+ V(IA32LoadWord32) \
+ V(IA32StoreWord32) \
+ V(IA32StoreWord32I) \
+ V(IA32StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MI) /* [K] */ \
+ V(MR) /* [%r0] */ \
+ V(MRI) /* [%r0 + K] */ \
+ V(MR1I) /* [%r0 + %r1 * 1 + K] */ \
+ V(MR2I) /* [%r0 + %r1 * 2 + K] */ \
+ V(MR4I) /* [%r0 + %r1 * 4 + K] */ \
+ V(MR8I) /* [%r0 + %r1 * 8 + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
new file mode 100644
index 000000000..a057a1e71
--- /dev/null
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -0,0 +1,560 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds IA32-specific methods for generating operands.
+class IA32OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit IA32OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseByteRegister(Node* node) {
+ // TODO(dcarney): relax constraint.
+ return UseFixed(node, edx);
+ }
+
+ bool CanBeImmediate(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kExternalConstant:
+ return true;
+ case IrOpcode::kHeapConstant: {
+ // Constants in new space cannot be used as immediates in V8 because
+ // the GC does not scan code objects when collecting the new generation.
+ Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
+ return !isolate()->heap()->InNewSpace(*value);
+ }
+ default:
+ return false;
+ }
+ }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = OpParameter<MachineType>(node);
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* output = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kSSELoad;
+ break;
+ case kMachineWord8:
+ opcode = kIA32LoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kIA32LoadWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = kIA32LoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(base)) {
+ if (Int32Matcher(index).Is(0)) { // load [#base + #0]
+ Emit(opcode | AddressingModeField::encode(kMode_MI), output,
+ g.UseImmediate(base));
+ } else { // load [#base + %index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+ g.UseRegister(index), g.UseImmediate(base));
+ }
+ } else if (g.CanBeImmediate(index)) { // load [%base + #index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else { // load [%base + %index + K]
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+ // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ DCHECK_EQ(kMachineTagged, rep);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
+ Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
+ g.UseFixed(index, ecx), g.UseFixed(value, edx), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ bool is_immediate = false;
+ InstructionOperand* val;
+ if (rep == kMachineFloat64) {
+ val = g.UseDoubleRegister(value);
+ } else {
+ is_immediate = g.CanBeImmediate(value);
+ if (is_immediate) {
+ val = g.UseImmediate(value);
+ } else if (rep == kMachineWord8) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+ }
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kSSEStore;
+ break;
+ case kMachineWord8:
+ opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(base)) {
+ if (Int32Matcher(index).Is(0)) { // store [#base], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
+ g.UseImmediate(base), val);
+ } else { // store [#base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ }
+ } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else { // store [%base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+ // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(m.right().node())) {
+ inputs[input_count++] = g.Use(m.left().node());
+ inputs[input_count++] = g.UseImmediate(m.right().node());
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.Use(m.right().node());
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (cont->IsSet()) {
+ // TODO(turbofan): Use byte register here.
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(ARRAY_SIZE(inputs), input_count);
+ DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kIA32And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kIA32Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ IA32OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ } else {
+ VisitBinop(this, node, kIA32Xor);
+ }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // TODO(turbofan): assembler only supports some addressing modes for shifts.
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ Int32BinopMatcher m(node);
+ if (m.right().IsWord32And()) {
+ Int32BinopMatcher mright(right);
+ if (mright.right().Is(0x1F)) {
+ right = mright.left().node();
+ }
+ }
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseFixed(right, ecx));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitShift(this, node, kIA32Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitShift(this, node, kIA32Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitShift(this, node, kIA32Sar);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kIA32Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ IA32OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+ } else {
+ VisitBinop(this, node, kIA32Sub);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (g.CanBeImmediate(right)) {
+ Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
+ } else if (g.CanBeImmediate(left)) {
+ Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
+ g.UseImmediate(left));
+ } else {
+ // TODO(turbofan): select better left operand.
+ Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+
+static inline void VisitDiv(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(edx)};
+ size_t temp_count = ARRAY_SIZE(temps);
+ selector->Emit(opcode, g.DefineAsFixed(node, eax),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitDiv(this, node, kIA32Udiv);
+}
+
+
+static inline void VisitMod(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ size_t temp_count = ARRAY_SIZE(temps);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitMod(this, node, kIA32Udiv);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ IA32OperandGenerator g(this);
+ // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
+ Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ IA32OperandGenerator g(this);
+ // TODO(turbofan): IA32 SSE subsd() should take an operand.
+ Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand* temps[] = {g.TempRegister(eax)};
+ Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kIA32Add, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kIA32Sub, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static inline void VisitCompare(InstructionSelector* selector,
+ InstructionCode opcode,
+ InstructionOperand* left,
+ InstructionOperand* right,
+ FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(opcode), NULL, left, right,
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ // TODO(titzer): Needs byte register.
+ selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+ left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont, bool commutative) {
+ IA32OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right)) {
+ VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+ } else if (g.CanBeImmediate(left)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kIA32Cmp, cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kIA32Test, cont, true);
+ default:
+ break;
+ }
+
+ IA32OperandGenerator g(this);
+ VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kIA32Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
+ cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ IA32OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
+
+ // Push any stack arguments.
+ for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+ Node* input = buffer.pushed_nodes[i];
+ // TODO(titzer): handle pushing double parameters.
+ Emit(kIA32Push, NULL,
+ g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kIA32CallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kIA32CallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ DCHECK(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (descriptor->kind() == CallDescriptor::kCallAddress &&
+ buffer.pushed_count > 0) {
+ DCHECK(deoptimization == NULL && continuation == NULL);
+ Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ia32/linkage-ia32.cc b/deps/v8/src/compiler/ia32/linkage-ia32.cc
new file mode 100644
index 000000000..57a2c6918
--- /dev/null
+++ b/deps/v8/src/compiler/ia32/linkage-ia32.cc
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return eax; }
+ static Register ReturnValue2Reg() { return edx; }
+ static Register JSCallFunctionReg() { return edi; }
+ static Register ContextReg() { return esi; }
+ static Register RuntimeCallFunctionReg() { return ebx; }
+ static Register RuntimeCallArgCountReg() { return eax; }
+ static RegList CCalleeSaveRegisters() {
+ return esi.bit() | edi.bit() | ebx.bit();
+ }
+ static Register CRegisterParameter(int i) { return no_reg; }
+ static int CRegisterParametersLength() { return 0; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ zone, descriptor, stack_parameter_count, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
new file mode 100644
index 000000000..35c8e31f2
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -0,0 +1,117 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
+#define V8_COMPILER_INSTRUCTION_CODES_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/instruction-codes-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/instruction-codes-arm64.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/instruction-codes-x64.h"
+#else
+#define TARGET_ARCH_OPCODE_LIST(V)
+#define TARGET_ADDRESSING_MODE_LIST(V)
+#endif
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Target-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define ARCH_OPCODE_LIST(V) \
+ V(ArchDeoptimize) \
+ V(ArchJmp) \
+ V(ArchNop) \
+ V(ArchRet) \
+ TARGET_ARCH_OPCODE_LIST(V)
+
+enum ArchOpcode {
+#define DECLARE_ARCH_OPCODE(Name) k##Name,
+ ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
+#undef DECLARE_ARCH_OPCODE
+#define COUNT_ARCH_OPCODE(Name) +1
+ kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
+#undef COUNT_ARCH_OPCODE
+};
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao);
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define ADDRESSING_MODE_LIST(V) \
+ V(None) \
+ TARGET_ADDRESSING_MODE_LIST(V)
+
+enum AddressingMode {
+#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
+ ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
+#undef DECLARE_ADDRESSING_MODE
+#define COUNT_ADDRESSING_MODE(Name) +1
+ kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
+#undef COUNT_ADDRESSING_MODE
+};
+
+OStream& operator<<(OStream& os, const AddressingMode& am);
+
+// The mode of the flags continuation (see below).
+enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+
+OStream& operator<<(OStream& os, const FlagsMode& fm);
+
+// The condition of flags continuation (see below).
+enum FlagsCondition {
+ kEqual,
+ kNotEqual,
+ kSignedLessThan,
+ kSignedGreaterThanOrEqual,
+ kSignedLessThanOrEqual,
+ kSignedGreaterThan,
+ kUnsignedLessThan,
+ kUnsignedGreaterThanOrEqual,
+ kUnsignedLessThanOrEqual,
+ kUnsignedGreaterThan,
+ kUnorderedEqual,
+ kUnorderedNotEqual,
+ kUnorderedLessThan,
+ kUnorderedGreaterThanOrEqual,
+ kUnorderedLessThanOrEqual,
+ kUnorderedGreaterThan,
+ kOverflow,
+ kNotOverflow
+};
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc);
+
+// The InstructionCode is an opaque, target-specific integer that encodes
+// what code to emit for an instruction in the code generator. It is not
+// interesting to the register allocator, as the inputs and flags on the
+// instructions specify everything of interest.
+typedef int32_t InstructionCode;
+
+// Helpers for encoding / decoding InstructionCode into the fields needed
+// for code generation. We encode the instruction, addressing mode, and flags
+// continuation into a single InstructionCode which is stored as part of
+// the instruction.
+typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
+typedef BitField<AddressingMode, 7, 4> AddressingModeField;
+typedef BitField<FlagsMode, 11, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
+typedef BitField<int, 13, 19> MiscField;
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_CODES_H_
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
new file mode 100644
index 000000000..ac446b38e
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -0,0 +1,371 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper class for the instruction selector that simplifies construction of
+// Operands. This class implements a base for architecture-specific helpers.
+class OperandGenerator {
+ public:
+ explicit OperandGenerator(InstructionSelector* selector)
+ : selector_(selector) {}
+
+ InstructionOperand* DefineAsRegister(Node* node) {
+ return Define(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ InstructionOperand* DefineAsDoubleRegister(Node* node) {
+ return Define(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ InstructionOperand* DefineSameAsFirst(Node* result) {
+ return Define(result, new (zone())
+ UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+ }
+
+ InstructionOperand* DefineAsFixed(Node* node, Register reg) {
+ return Define(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* DefineAsFixedDouble(Node* node, DoubleRegister reg) {
+ return Define(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* DefineAsConstant(Node* node) {
+ selector()->MarkAsDefined(node);
+ sequence()->AddConstant(node->id(), ToConstant(node));
+ return ConstantOperand::Create(node->id(), zone());
+ }
+
+ InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location) {
+ return Define(node, ToUnallocatedOperand(location));
+ }
+
+ InstructionOperand* Use(Node* node) {
+ return Use(node,
+ new (zone()) UnallocatedOperand(
+ UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
+ }
+
+ InstructionOperand* UseRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START));
+ }
+
+ InstructionOperand* UseDoubleRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START));
+ }
+
+ // Use register or operand for the node. If a register is chosen, it won't
+ // alias any temporary or output registers.
+ InstructionOperand* UseUnique(Node* node) {
+ return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
+ }
+
+ // Use a unique register for the node that does not alias any temporary or
+ // output registers.
+ InstructionOperand* UseUniqueRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ // Use a unique double register for the node that does not alias any temporary
+ // or output double registers.
+ InstructionOperand* UseUniqueDoubleRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ InstructionOperand* UseFixed(Node* node, Register reg) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* UseFixedDouble(Node* node, DoubleRegister reg) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* UseImmediate(Node* node) {
+ int index = sequence()->AddImmediate(ToConstant(node));
+ return ImmediateOperand::Create(index, zone());
+ }
+
+ InstructionOperand* UseLocation(Node* node, LinkageLocation location) {
+ return Use(node, ToUnallocatedOperand(location));
+ }
+
+ InstructionOperand* TempRegister() {
+ UnallocatedOperand* op =
+ new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START);
+ op->set_virtual_register(sequence()->NextVirtualRegister());
+ return op;
+ }
+
+ InstructionOperand* TempDoubleRegister() {
+ UnallocatedOperand* op =
+ new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START);
+ op->set_virtual_register(sequence()->NextVirtualRegister());
+ sequence()->MarkAsDouble(op->virtual_register());
+ return op;
+ }
+
+ InstructionOperand* TempRegister(Register reg) {
+ return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+ }
+
+ InstructionOperand* TempImmediate(int32_t imm) {
+ int index = sequence()->AddImmediate(Constant(imm));
+ return ImmediateOperand::Create(index, zone());
+ }
+
+ InstructionOperand* Label(BasicBlock* block) {
+ // TODO(bmeurer): We misuse ImmediateOperand here.
+ return TempImmediate(block->id());
+ }
+
+ protected:
+ Graph* graph() const { return selector()->graph(); }
+ InstructionSelector* selector() const { return selector_; }
+ InstructionSequence* sequence() const { return selector()->sequence(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ Zone* zone() const { return selector()->instruction_zone(); }
+
+ private:
+ static Constant ToConstant(const Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return Constant(ValueOf<int32_t>(node->op()));
+ case IrOpcode::kInt64Constant:
+ return Constant(ValueOf<int64_t>(node->op()));
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant:
+ return Constant(ValueOf<double>(node->op()));
+ case IrOpcode::kExternalConstant:
+ return Constant(ValueOf<ExternalReference>(node->op()));
+ case IrOpcode::kHeapConstant:
+ return Constant(ValueOf<Handle<HeapObject> >(node->op()));
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return Constant(static_cast<int32_t>(0));
+ }
+
+ UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+ DCHECK_NOT_NULL(node);
+ DCHECK_NOT_NULL(operand);
+ operand->set_virtual_register(node->id());
+ selector()->MarkAsDefined(node);
+ return operand;
+ }
+
+ UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+ DCHECK_NOT_NULL(node);
+ DCHECK_NOT_NULL(operand);
+ operand->set_virtual_register(node->id());
+ selector()->MarkAsUsed(node);
+ return operand;
+ }
+
+ UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
+ if (location.location_ == LinkageLocation::ANY_REGISTER) {
+ return new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+ }
+ if (location.location_ < 0) {
+ return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+ location.location_);
+ }
+ if (location.rep_ == kMachineFloat64) {
+ return new (zone()) UnallocatedOperand(
+ UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+ }
+ return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ location.location_);
+ }
+
+ InstructionSelector* selector_;
+};
+
+
+// The flags continuation is a way to combine a branch or a materialization
+// of a boolean value with an instruction that sets the flags register.
+// The whole instruction is treated as a unit by the register allocator, and
+// thus no spills or moves can be introduced between the flags-setting
+// instruction and the branch or set it should be combined with.
+class FlagsContinuation V8_FINAL {
+ public:
+ FlagsContinuation() : mode_(kFlags_none) {}
+
+ // Creates a new flags continuation from the given condition and true/false
+ // blocks.
+ FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
+ BasicBlock* false_block)
+ : mode_(kFlags_branch),
+ condition_(condition),
+ true_block_(true_block),
+ false_block_(false_block) {
+ DCHECK_NOT_NULL(true_block);
+ DCHECK_NOT_NULL(false_block);
+ }
+
+ // Creates a new flags continuation from the given condition and result node.
+ FlagsContinuation(FlagsCondition condition, Node* result)
+ : mode_(kFlags_set), condition_(condition), result_(result) {
+ DCHECK_NOT_NULL(result);
+ }
+
+ bool IsNone() const { return mode_ == kFlags_none; }
+ bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsSet() const { return mode_ == kFlags_set; }
+ FlagsCondition condition() const {
+ DCHECK(!IsNone());
+ return condition_;
+ }
+ Node* result() const {
+ DCHECK(IsSet());
+ return result_;
+ }
+ BasicBlock* true_block() const {
+ DCHECK(IsBranch());
+ return true_block_;
+ }
+ BasicBlock* false_block() const {
+ DCHECK(IsBranch());
+ return false_block_;
+ }
+
+ void Negate() {
+ DCHECK(!IsNone());
+ condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
+ }
+
+ void Commute() {
+ DCHECK(!IsNone());
+ switch (condition_) {
+ case kEqual:
+ case kNotEqual:
+ case kOverflow:
+ case kNotOverflow:
+ return;
+ case kSignedLessThan:
+ condition_ = kSignedGreaterThan;
+ return;
+ case kSignedGreaterThanOrEqual:
+ condition_ = kSignedLessThanOrEqual;
+ return;
+ case kSignedLessThanOrEqual:
+ condition_ = kSignedGreaterThanOrEqual;
+ return;
+ case kSignedGreaterThan:
+ condition_ = kSignedLessThan;
+ return;
+ case kUnsignedLessThan:
+ condition_ = kUnsignedGreaterThan;
+ return;
+ case kUnsignedGreaterThanOrEqual:
+ condition_ = kUnsignedLessThanOrEqual;
+ return;
+ case kUnsignedLessThanOrEqual:
+ condition_ = kUnsignedGreaterThanOrEqual;
+ return;
+ case kUnsignedGreaterThan:
+ condition_ = kUnsignedLessThan;
+ return;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ return;
+ case kUnorderedLessThan:
+ condition_ = kUnorderedGreaterThan;
+ return;
+ case kUnorderedGreaterThanOrEqual:
+ condition_ = kUnorderedLessThanOrEqual;
+ return;
+ case kUnorderedLessThanOrEqual:
+ condition_ = kUnorderedGreaterThanOrEqual;
+ return;
+ case kUnorderedGreaterThan:
+ condition_ = kUnorderedLessThan;
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+ bool negate = condition_ == kEqual;
+ condition_ = condition;
+ if (negate) Negate();
+ }
+
+ void SwapBlocks() { std::swap(true_block_, false_block_); }
+
+ // Encodes this flags continuation into the given opcode.
+ InstructionCode Encode(InstructionCode opcode) {
+ opcode |= FlagsModeField::encode(mode_);
+ if (mode_ != kFlags_none) {
+ opcode |= FlagsConditionField::encode(condition_);
+ }
+ return opcode;
+ }
+
+ private:
+ FlagsMode mode_;
+ FlagsCondition condition_;
+ Node* result_; // Only valid if mode_ == kFlags_set.
+ BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
+ BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+};
+
+
+// An internal helper class for generating the operands to calls.
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+struct CallBuffer {
+ CallBuffer(Zone* zone, CallDescriptor* descriptor);
+
+ int output_count;
+ CallDescriptor* descriptor;
+ Node** output_nodes;
+ InstructionOperand** outputs;
+ InstructionOperand** fixed_and_control_args;
+ int fixed_count;
+ Node** pushed_nodes;
+ int pushed_count;
+
+ int input_count() { return descriptor->InputCount(); }
+
+ int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; }
+
+ int fixed_and_control_count() { return fixed_count + control_count(); }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
new file mode 100644
index 000000000..541e0452f
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -0,0 +1,1053 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector.h"
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionSelector::InstructionSelector(InstructionSequence* sequence,
+ SourcePositionTable* source_positions,
+ Features features)
+ : zone_(sequence->isolate()),
+ sequence_(sequence),
+ source_positions_(source_positions),
+ features_(features),
+ current_block_(NULL),
+ instructions_(InstructionDeque::allocator_type(zone())),
+ defined_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())),
+ used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {}
+
+
+void InstructionSelector::SelectInstructions() {
+ // Mark the inputs of all phis in loop headers as used.
+ BasicBlockVector* blocks = schedule()->rpo_order();
+ for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+ BasicBlock* block = *i;
+ if (!block->IsLoopHeader()) continue;
+ DCHECK_NE(0, block->PredecessorCount());
+ DCHECK_NE(1, block->PredecessorCount());
+ for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+ ++j) {
+ Node* phi = *j;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+
+ // Mark all inputs as used.
+ Node::Inputs inputs = phi->inputs();
+ for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
+ MarkAsUsed(*k);
+ }
+ }
+ }
+
+ // Visit each basic block in post order.
+ for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+ VisitBlock(*i);
+ }
+
+ // Schedule the selected instructions.
+ for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+ BasicBlock* block = *i;
+ size_t end = block->code_end_;
+ size_t start = block->code_start_;
+ sequence()->StartBlock(block);
+ while (start-- > end) {
+ sequence()->AddInstruction(instructions_[start], block);
+ }
+ sequence()->EndBlock(block);
+ }
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ size_t temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ InstructionOperand* a, size_t temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ InstructionOperand* a,
+ InstructionOperand* b, size_t temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b};
+ size_t input_count = ARRAY_SIZE(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ InstructionOperand* a,
+ InstructionOperand* b,
+ InstructionOperand* c, size_t temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b, c};
+ size_t input_count = ARRAY_SIZE(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+ InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+ InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+ size_t temp_count, InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b, c, d};
+ size_t input_count = ARRAY_SIZE(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+ InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
+ size_t input_count, InstructionOperand** inputs, size_t temp_count,
+ InstructionOperand** temps) {
+ Instruction* instr =
+ Instruction::New(instruction_zone(), opcode, output_count, outputs,
+ input_count, inputs, temp_count, temps);
+ return Emit(instr);
+}
+
+
+Instruction* InstructionSelector::Emit(Instruction* instr) {
+ instructions_.push_back(instr);
+ return instr;
+}
+
+
+bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
+ return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+ block->deferred_ == current_block_->deferred_;
+}
+
+
+bool InstructionSelector::CanCover(Node* user, Node* node) const {
+ return node->OwnedBy(user) &&
+ schedule()->block(node) == schedule()->block(user);
+}
+
+
+bool InstructionSelector::IsDefined(Node* node) const {
+ DCHECK_NOT_NULL(node);
+ NodeId id = node->id();
+ DCHECK(id >= 0);
+ DCHECK(id < static_cast<NodeId>(defined_.size()));
+ return defined_[id];
+}
+
+
+void InstructionSelector::MarkAsDefined(Node* node) {
+ DCHECK_NOT_NULL(node);
+ NodeId id = node->id();
+ DCHECK(id >= 0);
+ DCHECK(id < static_cast<NodeId>(defined_.size()));
+ defined_[id] = true;
+}
+
+
+bool InstructionSelector::IsUsed(Node* node) const {
+ if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
+ NodeId id = node->id();
+ DCHECK(id >= 0);
+ DCHECK(id < static_cast<NodeId>(used_.size()));
+ return used_[id];
+}
+
+
+void InstructionSelector::MarkAsUsed(Node* node) {
+ DCHECK_NOT_NULL(node);
+ NodeId id = node->id();
+ DCHECK(id >= 0);
+ DCHECK(id < static_cast<NodeId>(used_.size()));
+ used_[id] = true;
+}
+
+
+bool InstructionSelector::IsDouble(const Node* node) const {
+ DCHECK_NOT_NULL(node);
+ return sequence()->IsDouble(node->id());
+}
+
+
+void InstructionSelector::MarkAsDouble(Node* node) {
+ DCHECK_NOT_NULL(node);
+ DCHECK(!IsReference(node));
+ sequence()->MarkAsDouble(node->id());
+
+ // Propagate "doubleness" throughout phis.
+ for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+ Node* user = *i;
+ if (user->opcode() != IrOpcode::kPhi) continue;
+ if (IsDouble(user)) continue;
+ MarkAsDouble(user);
+ }
+}
+
+
+bool InstructionSelector::IsReference(const Node* node) const {
+ DCHECK_NOT_NULL(node);
+ return sequence()->IsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsReference(Node* node) {
+ DCHECK_NOT_NULL(node);
+ DCHECK(!IsDouble(node));
+ sequence()->MarkAsReference(node->id());
+
+ // Propagate "referenceness" throughout phis.
+ for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+ Node* user = *i;
+ if (user->opcode() != IrOpcode::kPhi) continue;
+ if (IsReference(user)) continue;
+ MarkAsReference(user);
+ }
+}
+
+
+void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
+ DCHECK_NOT_NULL(node);
+ if (rep == kMachineFloat64) MarkAsDouble(node);
+ if (rep == kMachineTagged) MarkAsReference(node);
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
+ : output_count(0),
+ descriptor(d),
+ output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
+ outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
+ fixed_and_control_args(
+ zone->NewArray<InstructionOperand*>(input_count() + control_count())),
+ fixed_count(0),
+ pushed_nodes(zone->NewArray<Node*>(input_count())),
+ pushed_count(0) {
+ if (d->ReturnCount() > 1) {
+ memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT
+ }
+ memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
+ bool call_code_immediate,
+ bool call_address_immediate,
+ BasicBlock* cont_node,
+ BasicBlock* deopt_node) {
+ OperandGenerator g(this);
+ DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
+ DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
+ buffer->input_count());
+
+ if (buffer->descriptor->ReturnCount() > 0) {
+ // Collect the projections that represent multiple outputs from this call.
+ if (buffer->descriptor->ReturnCount() == 1) {
+ buffer->output_nodes[0] = call;
+ } else {
+ call->CollectProjections(buffer->descriptor->ReturnCount(),
+ buffer->output_nodes);
+ }
+
+ // Filter out the outputs that aren't live because no projection uses them.
+ for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
+ if (buffer->output_nodes[i] != NULL) {
+ Node* output = buffer->output_nodes[i];
+ LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
+ MarkAsRepresentation(location.representation(), output);
+ buffer->outputs[buffer->output_count++] =
+ g.DefineAsLocation(output, location);
+ }
+ }
+ }
+
+ buffer->fixed_count = 1; // First argument is always the callee.
+ Node* callee = call->InputAt(0);
+ switch (buffer->descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ buffer->fixed_and_control_args[0] =
+ (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
+ ? g.UseImmediate(callee)
+ : g.UseRegister(callee);
+ break;
+ case CallDescriptor::kCallAddress:
+ buffer->fixed_and_control_args[0] =
+ (call_address_immediate &&
+ (callee->opcode() == IrOpcode::kInt32Constant ||
+ callee->opcode() == IrOpcode::kInt64Constant))
+ ? g.UseImmediate(callee)
+ : g.UseRegister(callee);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ buffer->fixed_and_control_args[0] =
+ g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
+ break;
+ }
+
+ int input_count = buffer->input_count();
+
+ // Split the arguments into pushed_nodes and fixed_args. Pushed arguments
+ // require an explicit push instruction before the call and do not appear
+ // as arguments to the call. Everything else ends up as an InstructionOperand
+ // argument to the call.
+ InputIter iter(call->inputs().begin());
+ for (int index = 0; index < input_count; ++iter, ++index) {
+ DCHECK(iter != call->inputs().end());
+ DCHECK(index == iter.index());
+ if (index == 0) continue; // The first argument (callee) is already done.
+ InstructionOperand* op =
+ g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
+ if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
+ int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+ DCHECK(buffer->pushed_nodes[stack_index] == NULL);
+ buffer->pushed_nodes[stack_index] = *iter;
+ buffer->pushed_count++;
+ } else {
+ buffer->fixed_and_control_args[buffer->fixed_count] = op;
+ buffer->fixed_count++;
+ }
+ }
+
+ // If the call can deoptimize, we add the continuation and deoptimization
+ // block labels.
+ if (buffer->descriptor->CanLazilyDeoptimize()) {
+ DCHECK(cont_node != NULL);
+ DCHECK(deopt_node != NULL);
+ buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node);
+ buffer->fixed_and_control_args[buffer->fixed_count + 1] =
+ g.Label(deopt_node);
+ } else {
+ DCHECK(cont_node == NULL);
+ DCHECK(deopt_node == NULL);
+ }
+
+ DCHECK(input_count == (buffer->fixed_count + buffer->pushed_count));
+}
+
+
+void InstructionSelector::VisitBlock(BasicBlock* block) {
+ DCHECK_EQ(NULL, current_block_);
+ current_block_ = block;
+ int current_block_end = static_cast<int>(instructions_.size());
+
+ // Generate code for the block control "top down", but schedule the code
+ // "bottom up".
+ VisitControl(block);
+ std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+
+ // Visit code in reverse control flow order, because architecture-specific
+ // matching may cover more than one node at a time.
+ for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
+ ++i) {
+ Node* node = *i;
+ // Skip nodes that are unused or already defined.
+ if (!IsUsed(node) || IsDefined(node)) continue;
+ // Generate code for this node "top down", but schedule the code "bottom
+ // up".
+ size_t current_node_end = instructions_.size();
+ VisitNode(node);
+ std::reverse(instructions_.begin() + current_node_end, instructions_.end());
+ }
+
+ // We're done with the block.
+ // TODO(bmeurer): We should not mutate the schedule.
+ block->code_end_ = current_block_end;
+ block->code_start_ = static_cast<int>(instructions_.size());
+
+ current_block_ = NULL;
+}
+
+
+static inline void CheckNoPhis(const BasicBlock* block) {
+#ifdef DEBUG
+ // Branch targets should not have phis.
+ for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+ const Node* node = *i;
+ CHECK_NE(IrOpcode::kPhi, node->opcode());
+ }
+#endif
+}
+
+
+void InstructionSelector::VisitControl(BasicBlock* block) {
+ Node* input = block->control_input_;
+ switch (block->control_) {
+ case BasicBlockData::kGoto:
+ return VisitGoto(block->SuccessorAt(0));
+ case BasicBlockData::kBranch: {
+ DCHECK_EQ(IrOpcode::kBranch, input->opcode());
+ BasicBlock* tbranch = block->SuccessorAt(0);
+ BasicBlock* fbranch = block->SuccessorAt(1);
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ CheckNoPhis(tbranch);
+ CheckNoPhis(fbranch);
+ if (tbranch == fbranch) return VisitGoto(tbranch);
+ return VisitBranch(input, tbranch, fbranch);
+ }
+ case BasicBlockData::kReturn: {
+ // If the result itself is a return, return its input.
+ Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
+ ? input->InputAt(0)
+ : input;
+ return VisitReturn(value);
+ }
+ case BasicBlockData::kThrow:
+ return VisitThrow(input);
+ case BasicBlockData::kDeoptimize:
+ return VisitDeoptimize(input);
+ case BasicBlockData::kCall: {
+ BasicBlock* deoptimization = block->SuccessorAt(0);
+ BasicBlock* continuation = block->SuccessorAt(1);
+ VisitCall(input, continuation, deoptimization);
+ break;
+ }
+ case BasicBlockData::kNone: {
+ // TODO(titzer): exit block doesn't have control.
+ DCHECK(input == NULL);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void InstructionSelector::VisitNode(Node* node) {
+ DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
+ SourcePosition source_position = source_positions_->GetSourcePosition(node);
+ if (!source_position.IsUnknown()) {
+ DCHECK(!source_position.IsInvalid());
+ if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
+ Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
+ }
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kStart:
+ case IrOpcode::kLoop:
+ case IrOpcode::kEnd:
+ case IrOpcode::kBranch:
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kEffectPhi:
+ case IrOpcode::kMerge:
+ case IrOpcode::kLazyDeoptimization:
+ case IrOpcode::kContinuation:
+ // No code needed for these graph artifacts.
+ return;
+ case IrOpcode::kParameter: {
+ int index = OpParameter<int>(node);
+ MachineType rep = linkage()
+ ->GetIncomingDescriptor()
+ ->GetInputLocation(index)
+ .representation();
+ MarkAsRepresentation(rep, node);
+ return VisitParameter(node);
+ }
+ case IrOpcode::kPhi:
+ return VisitPhi(node);
+ case IrOpcode::kProjection:
+ return VisitProjection(node);
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
+ case IrOpcode::kExternalConstant:
+ return VisitConstant(node);
+ case IrOpcode::kFloat64Constant:
+ return MarkAsDouble(node), VisitConstant(node);
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kNumberConstant:
+ // TODO(turbofan): only mark non-smis as references.
+ return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kCall:
+ return VisitCall(node, NULL, NULL);
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ return;
+ case IrOpcode::kLoad: {
+ MachineType load_rep = OpParameter<MachineType>(node);
+ MarkAsRepresentation(load_rep, node);
+ return VisitLoad(node);
+ }
+ case IrOpcode::kStore:
+ return VisitStore(node);
+ case IrOpcode::kWord32And:
+ return VisitWord32And(node);
+ case IrOpcode::kWord32Or:
+ return VisitWord32Or(node);
+ case IrOpcode::kWord32Xor:
+ return VisitWord32Xor(node);
+ case IrOpcode::kWord32Shl:
+ return VisitWord32Shl(node);
+ case IrOpcode::kWord32Shr:
+ return VisitWord32Shr(node);
+ case IrOpcode::kWord32Sar:
+ return VisitWord32Sar(node);
+ case IrOpcode::kWord32Equal:
+ return VisitWord32Equal(node);
+ case IrOpcode::kWord64And:
+ return VisitWord64And(node);
+ case IrOpcode::kWord64Or:
+ return VisitWord64Or(node);
+ case IrOpcode::kWord64Xor:
+ return VisitWord64Xor(node);
+ case IrOpcode::kWord64Shl:
+ return VisitWord64Shl(node);
+ case IrOpcode::kWord64Shr:
+ return VisitWord64Shr(node);
+ case IrOpcode::kWord64Sar:
+ return VisitWord64Sar(node);
+ case IrOpcode::kWord64Equal:
+ return VisitWord64Equal(node);
+ case IrOpcode::kInt32Add:
+ return VisitInt32Add(node);
+ case IrOpcode::kInt32AddWithOverflow:
+ return VisitInt32AddWithOverflow(node);
+ case IrOpcode::kInt32Sub:
+ return VisitInt32Sub(node);
+ case IrOpcode::kInt32SubWithOverflow:
+ return VisitInt32SubWithOverflow(node);
+ case IrOpcode::kInt32Mul:
+ return VisitInt32Mul(node);
+ case IrOpcode::kInt32Div:
+ return VisitInt32Div(node);
+ case IrOpcode::kInt32UDiv:
+ return VisitInt32UDiv(node);
+ case IrOpcode::kInt32Mod:
+ return VisitInt32Mod(node);
+ case IrOpcode::kInt32UMod:
+ return VisitInt32UMod(node);
+ case IrOpcode::kInt32LessThan:
+ return VisitInt32LessThan(node);
+ case IrOpcode::kInt32LessThanOrEqual:
+ return VisitInt32LessThanOrEqual(node);
+ case IrOpcode::kUint32LessThan:
+ return VisitUint32LessThan(node);
+ case IrOpcode::kUint32LessThanOrEqual:
+ return VisitUint32LessThanOrEqual(node);
+ case IrOpcode::kInt64Add:
+ return VisitInt64Add(node);
+ case IrOpcode::kInt64Sub:
+ return VisitInt64Sub(node);
+ case IrOpcode::kInt64Mul:
+ return VisitInt64Mul(node);
+ case IrOpcode::kInt64Div:
+ return VisitInt64Div(node);
+ case IrOpcode::kInt64UDiv:
+ return VisitInt64UDiv(node);
+ case IrOpcode::kInt64Mod:
+ return VisitInt64Mod(node);
+ case IrOpcode::kInt64UMod:
+ return VisitInt64UMod(node);
+ case IrOpcode::kInt64LessThan:
+ return VisitInt64LessThan(node);
+ case IrOpcode::kInt64LessThanOrEqual:
+ return VisitInt64LessThanOrEqual(node);
+ case IrOpcode::kConvertInt32ToInt64:
+ return VisitConvertInt32ToInt64(node);
+ case IrOpcode::kConvertInt64ToInt32:
+ return VisitConvertInt64ToInt32(node);
+ case IrOpcode::kChangeInt32ToFloat64:
+ return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
+ case IrOpcode::kChangeUint32ToFloat64:
+ return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
+ case IrOpcode::kChangeFloat64ToInt32:
+ return VisitChangeFloat64ToInt32(node);
+ case IrOpcode::kChangeFloat64ToUint32:
+ return VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kFloat64Add:
+ return MarkAsDouble(node), VisitFloat64Add(node);
+ case IrOpcode::kFloat64Sub:
+ return MarkAsDouble(node), VisitFloat64Sub(node);
+ case IrOpcode::kFloat64Mul:
+ return MarkAsDouble(node), VisitFloat64Mul(node);
+ case IrOpcode::kFloat64Div:
+ return MarkAsDouble(node), VisitFloat64Div(node);
+ case IrOpcode::kFloat64Mod:
+ return MarkAsDouble(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Equal:
+ return VisitFloat64Equal(node);
+ case IrOpcode::kFloat64LessThan:
+ return VisitFloat64LessThan(node);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ return VisitFloat64LessThanOrEqual(node);
+ default:
+ V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
+ node->opcode(), node->op()->mnemonic(), node->id());
+ }
+}
+
+
+#if V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord32Test(m.left().node(), &cont);
+ }
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord64Test(m.left().node(), &cont);
+ }
+ VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitInt32AddWithOverflow(node, &cont);
+ }
+ FlagsContinuation cont;
+ VisitInt32AddWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitInt32SubWithOverflow(node, &cont);
+ }
+ FlagsContinuation cont;
+ VisitInt32SubWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(node, &cont);
+}
+
+#endif // V8_TURBOFAN_BACKEND
+
+// 32 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+#endif // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+
+
+// 32-bit targets and unsupported architectures need dummy implementations of
+// selected 64-bit ops.
+#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+ FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+
+
+void InstructionSelector::VisitParameter(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
+ OpParameter<int>(node))));
+}
+
+
+void InstructionSelector::VisitPhi(Node* node) {
+ // TODO(bmeurer): Emit a PhiInstruction here.
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+ MarkAsUsed(*i);
+ }
+}
+
+
+void InstructionSelector::VisitProjection(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ switch (value->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt32SubWithOverflow:
+ if (OpParameter<int32_t>(node) == 0) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ } else {
+ DCHECK_EQ(1, OpParameter<int32_t>(node));
+ MarkAsUsed(value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+
+void InstructionSelector::VisitConstant(Node* node) {
+ // We must emit a NOP here because every live range needs a defining
+ // instruction in the register allocator.
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsConstant(node));
+}
+
+
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+ if (IsNextInAssemblyOrder(target)) {
+ // fall through to the next block.
+ Emit(kArchNop, NULL)->MarkAsControl();
+ } else {
+ // jump to the next block.
+ OperandGenerator g(this);
+ Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+ }
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ OperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
+
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
+ }
+
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kInt32LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kUint32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kWord64Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(value, &cont);
+ case IrOpcode::kInt64LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(value, &cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(value, &cont);
+ case IrOpcode::kFloat64Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(value, &cont);
+ case IrOpcode::kFloat64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(value, &cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(value, &cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<int32_t>(value) == 1) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (result == NULL || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitInt32AddWithOverflow(node, &cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitInt32SubWithOverflow(node, &cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ VisitWord32Test(value, &cont);
+}
+
+
+void InstructionSelector::VisitReturn(Node* value) {
+ OperandGenerator g(this);
+ if (value != NULL) {
+ Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation()));
+ } else {
+ Emit(kArchRet, NULL);
+ }
+}
+
+
+void InstructionSelector::VisitThrow(Node* value) {
+ UNIMPLEMENTED(); // TODO(titzer)
+}
+
+
+static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kHeapConstant:
+ return g->UseImmediate(input);
+ default:
+ return g->Use(input);
+ }
+}
+
+
+void InstructionSelector::VisitDeoptimize(Node* deopt) {
+ DCHECK(deopt->op()->opcode() == IrOpcode::kDeoptimize);
+ Node* state = deopt->InputAt(0);
+ DCHECK(state->op()->opcode() == IrOpcode::kFrameState);
+ BailoutId ast_id = OpParameter<BailoutId>(state);
+
+ // Add the inputs.
+ Node* parameters = state->InputAt(0);
+ int parameters_count = OpParameter<int>(parameters);
+
+ Node* locals = state->InputAt(1);
+ int locals_count = OpParameter<int>(locals);
+
+ Node* stack = state->InputAt(2);
+ int stack_count = OpParameter<int>(stack);
+
+ OperandGenerator g(this);
+ std::vector<InstructionOperand*> inputs;
+ inputs.reserve(parameters_count + locals_count + stack_count);
+ for (int i = 0; i < parameters_count; i++) {
+ inputs.push_back(UseOrImmediate(&g, parameters->InputAt(i)));
+ }
+ for (int i = 0; i < locals_count; i++) {
+ inputs.push_back(UseOrImmediate(&g, locals->InputAt(i)));
+ }
+ for (int i = 0; i < stack_count; i++) {
+ inputs.push_back(UseOrImmediate(&g, stack->InputAt(i)));
+ }
+
+ FrameStateDescriptor* descriptor = new (instruction_zone())
+ FrameStateDescriptor(ast_id, parameters_count, locals_count, stack_count);
+
+ DCHECK_EQ(descriptor->size(), inputs.size());
+
+ int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
+ Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), 0, NULL,
+ inputs.size(), &inputs.front(), 0, NULL);
+}
+
+
+#if !V8_TURBOFAN_BACKEND
+
+#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
+ void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
+MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
+#undef DECLARE_UNIMPLEMENTED_SELECTOR
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {}
+
+#endif // !V8_TURBOFAN_BACKEND
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
new file mode 100644
index 000000000..e28332284
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -0,0 +1,212 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
+
+#include <deque>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct CallBuffer; // TODO(bmeurer): Remove this.
+class FlagsContinuation;
+
+class InstructionSelector V8_FINAL {
+ public:
+ // Forward declarations.
+ class Features;
+
+ InstructionSelector(InstructionSequence* sequence,
+ SourcePositionTable* source_positions,
+ Features features = SupportedFeatures());
+
+ // Visit code for the entire graph with the included schedule.
+ void SelectInstructions();
+
+ // ===========================================================================
+ // ============= Architecture-independent code emission methods. =============
+ // ===========================================================================
+
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ size_t temp_count = 0, InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ size_t temp_count = 0, InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ InstructionOperand* c, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ InstructionOperand* c, InstructionOperand* d,
+ size_t temp_count = 0, InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, size_t output_count,
+ InstructionOperand** outputs, size_t input_count,
+ InstructionOperand** inputs, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(Instruction* instr);
+
+ // ===========================================================================
+ // ============== Architecture-independent CPU feature methods. ==============
+ // ===========================================================================
+
+ class Features V8_FINAL {
+ public:
+ Features() : bits_(0) {}
+ explicit Features(unsigned bits) : bits_(bits) {}
+ explicit Features(CpuFeature f) : bits_(1u << f) {}
+ Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
+
+ bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
+
+ private:
+ unsigned bits_;
+ };
+
+ bool IsSupported(CpuFeature feature) const {
+ return features_.Contains(feature);
+ }
+
+ // Returns the features supported on the target platform.
+ static Features SupportedFeatures() {
+ return Features(CpuFeatures::SupportedFeatures());
+ }
+
+ private:
+ friend class OperandGenerator;
+
+ // ===========================================================================
+ // ============ Architecture-independent graph covering methods. =============
+ // ===========================================================================
+
+ // Checks if {block} will appear directly after {current_block_} when
+ // assembling code, in which case, a fall-through can be used.
+ bool IsNextInAssemblyOrder(const BasicBlock* block) const;
+
+ // Used in pattern matching during code generation.
+ // Check if {node} can be covered while generating code for the current
+ // instruction. A node can be covered if the {user} of the node has the only
+ // edge and the two are in the same basic block.
+ bool CanCover(Node* user, Node* node) const;
+
+ // Checks if {node} was already defined, and therefore code was already
+ // generated for it.
+ bool IsDefined(Node* node) const;
+
+ // Inform the instruction selection that {node} was just defined.
+ void MarkAsDefined(Node* node);
+
+ // Checks if {node} has any uses, and therefore code has to be generated for
+ // it.
+ bool IsUsed(Node* node) const;
+
+ // Inform the instruction selection that {node} has at least one use and we
+ // will need to generate code for it.
+ void MarkAsUsed(Node* node);
+
+ // Checks if {node} is marked as double.
+ bool IsDouble(const Node* node) const;
+
+ // Inform the register allocator of a double result.
+ void MarkAsDouble(Node* node);
+
+ // Checks if {node} is marked as reference.
+ bool IsReference(const Node* node) const;
+
+ // Inform the register allocator of a reference result.
+ void MarkAsReference(Node* node);
+
+ // Inform the register allocation of the representation of the value produced
+ // by {node}.
+ void MarkAsRepresentation(MachineType rep, Node* node);
+
+ // Initialize the call buffer with the InstructionOperands, nodes, etc,
+ // corresponding
+ // to the inputs and outputs of the call.
+ // {call_code_immediate} to generate immediate operands to calls of code.
+ // {call_address_immediate} to generate immediate operands to address calls.
+ void InitializeCallBuffer(Node* call, CallBuffer* buffer,
+ bool call_code_immediate,
+ bool call_address_immediate, BasicBlock* cont_node,
+ BasicBlock* deopt_node);
+
+ // ===========================================================================
+ // ============= Architecture-specific graph covering methods. ===============
+ // ===========================================================================
+
+ // Visit nodes in the given block and generate code.
+ void VisitBlock(BasicBlock* block);
+
+ // Visit the node for the control flow at the end of the block, generating
+ // code if necessary.
+ void VisitControl(BasicBlock* block);
+
+ // Visit the node and generate code, if any.
+ void VisitNode(Node* node);
+
+#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
+ MACHINE_OP_LIST(DECLARE_GENERATOR)
+#undef DECLARE_GENERATOR
+
+ void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
+ void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
+
+ void VisitWord32Test(Node* node, FlagsContinuation* cont);
+ void VisitWord64Test(Node* node, FlagsContinuation* cont);
+ void VisitWord32Compare(Node* node, FlagsContinuation* cont);
+ void VisitWord64Compare(Node* node, FlagsContinuation* cont);
+ void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+
+ void VisitParameter(Node* node);
+ void VisitPhi(Node* node);
+ void VisitProjection(Node* node);
+ void VisitConstant(Node* node);
+ void VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization);
+ void VisitGoto(BasicBlock* target);
+ void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+ void VisitReturn(Node* value);
+ void VisitThrow(Node* value);
+ void VisitDeoptimize(Node* deopt);
+
+ // ===========================================================================
+
+ Graph* graph() const { return sequence()->graph(); }
+ Linkage* linkage() const { return sequence()->linkage(); }
+ Schedule* schedule() const { return sequence()->schedule(); }
+ InstructionSequence* sequence() const { return sequence_; }
+ Zone* instruction_zone() const { return sequence()->zone(); }
+ Zone* zone() { return &zone_; }
+
+ // ===========================================================================
+
+ typedef zone_allocator<Instruction*> InstructionPtrZoneAllocator;
+ typedef std::deque<Instruction*, InstructionPtrZoneAllocator> Instructions;
+
+ Zone zone_;
+ InstructionSequence* sequence_;
+ SourcePositionTable* source_positions_;
+ Features features_;
+ BasicBlock* current_block_;
+ Instructions instructions_;
+ BoolVector defined_;
+ BoolVector used_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
new file mode 100644
index 000000000..a2f4ed4f4
--- /dev/null
+++ b/deps/v8/src/compiler/instruction.cc
@@ -0,0 +1,483 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const InstructionOperand& op) {
+ switch (op.kind()) {
+ case InstructionOperand::INVALID:
+ return os << "(0)";
+ case InstructionOperand::UNALLOCATED: {
+ const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
+ os << "v" << unalloc->virtual_register();
+ if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+ return os << "(=" << unalloc->fixed_slot_index() << "S)";
+ }
+ switch (unalloc->extended_policy()) {
+ case UnallocatedOperand::NONE:
+ return os;
+ case UnallocatedOperand::FIXED_REGISTER:
+ return os << "(=" << Register::AllocationIndexToString(
+ unalloc->fixed_register_index()) << ")";
+ case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+ return os << "(=" << DoubleRegister::AllocationIndexToString(
+ unalloc->fixed_register_index()) << ")";
+ case UnallocatedOperand::MUST_HAVE_REGISTER:
+ return os << "(R)";
+ case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+ return os << "(1)";
+ case UnallocatedOperand::ANY:
+ return os << "(-)";
+ }
+ }
+ case InstructionOperand::CONSTANT:
+ return os << "[constant:" << op.index() << "]";
+ case InstructionOperand::IMMEDIATE:
+ return os << "[immediate:" << op.index() << "]";
+ case InstructionOperand::STACK_SLOT:
+ return os << "[stack:" << op.index() << "]";
+ case InstructionOperand::DOUBLE_STACK_SLOT:
+ return os << "[double_stack:" << op.index() << "]";
+ case InstructionOperand::REGISTER:
+ return os << "[" << Register::AllocationIndexToString(op.index())
+ << "|R]";
+ case InstructionOperand::DOUBLE_REGISTER:
+ return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
+ << "|R]";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+SubKindOperand<kOperandKind, kNumCachedOperands>*
+ SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+ if (cache) return;
+ cache = new SubKindOperand[kNumCachedOperands];
+ for (int i = 0; i < kNumCachedOperands; i++) {
+ cache[i].ConvertTo(kOperandKind, i);
+ }
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+ delete[] cache;
+}
+
+
+void InstructionOperand::SetUpCaches() {
+#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
+ name##Operand::SetUpCache();
+ INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
+#undef INSTRUCTION_OPERAND_SETUP
+}
+
+
+void InstructionOperand::TearDownCaches() {
+#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
+ name##Operand::TearDownCache();
+ INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
+#undef INSTRUCTION_OPERAND_TEARDOWN
+}
+
+
+OStream& operator<<(OStream& os, const MoveOperands& mo) {
+ os << *mo.destination();
+ if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+ return os << ";";
+}
+
+
+bool ParallelMove::IsRedundant() const {
+ for (int i = 0; i < move_operands_.length(); ++i) {
+ if (!move_operands_[i].IsRedundant()) return false;
+ }
+ return true;
+}
+
+
+OStream& operator<<(OStream& os, const ParallelMove& pm) {
+ bool first = true;
+ for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
+ move != pm.move_operands()->end(); ++move) {
+ if (move->IsEliminated()) continue;
+ if (!first) os << " ";
+ first = false;
+ os << *move;
+ }
+ return os;
+}
+
+
+void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ pointer_operands_.Add(op, zone);
+}
+
+
+void PointerMap::RemovePointer(InstructionOperand* op) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ for (int i = 0; i < pointer_operands_.length(); ++i) {
+ if (pointer_operands_[i]->Equals(op)) {
+ pointer_operands_.Remove(i);
+ --i;
+ }
+ }
+}
+
+
+void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ untagged_operands_.Add(op, zone);
+}
+
+
+OStream& operator<<(OStream& os, const PointerMap& pm) {
+ os << "{";
+ for (ZoneList<InstructionOperand*>::iterator op =
+ pm.pointer_operands_.begin();
+ op != pm.pointer_operands_.end(); ++op) {
+ if (op != pm.pointer_operands_.begin()) os << ";";
+ os << *op;
+ }
+ return os << "}";
+}
+
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao) {
+ switch (ao) {
+#define CASE(Name) \
+ case k##Name: \
+ return os << #Name;
+ ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const AddressingMode& am) {
+ switch (am) {
+ case kMode_None:
+ return os;
+#define CASE(Name) \
+ case kMode_##Name: \
+ return os << #Name;
+ TARGET_ADDRESSING_MODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsMode& fm) {
+ switch (fm) {
+ case kFlags_none:
+ return os;
+ case kFlags_branch:
+ return os << "branch";
+ case kFlags_set:
+ return os << "set";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc) {
+ switch (fc) {
+ case kEqual:
+ return os << "equal";
+ case kNotEqual:
+ return os << "not equal";
+ case kSignedLessThan:
+ return os << "signed less than";
+ case kSignedGreaterThanOrEqual:
+ return os << "signed greater than or equal";
+ case kSignedLessThanOrEqual:
+ return os << "signed less than or equal";
+ case kSignedGreaterThan:
+ return os << "signed greater than";
+ case kUnsignedLessThan:
+ return os << "unsigned less than";
+ case kUnsignedGreaterThanOrEqual:
+ return os << "unsigned greater than or equal";
+ case kUnsignedLessThanOrEqual:
+ return os << "unsigned less than or equal";
+ case kUnsignedGreaterThan:
+ return os << "unsigned greater than";
+ case kUnorderedEqual:
+ return os << "unordered equal";
+ case kUnorderedNotEqual:
+ return os << "unordered not equal";
+ case kUnorderedLessThan:
+ return os << "unordered less than";
+ case kUnorderedGreaterThanOrEqual:
+ return os << "unordered greater than or equal";
+ case kUnorderedLessThanOrEqual:
+ return os << "unordered less than or equal";
+ case kUnorderedGreaterThan:
+ return os << "unordered greater than";
+ case kOverflow:
+ return os << "overflow";
+ case kNotOverflow:
+ return os << "not overflow";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const Instruction& instr) {
+ if (instr.OutputCount() > 1) os << "(";
+ for (size_t i = 0; i < instr.OutputCount(); i++) {
+ if (i > 0) os << ", ";
+ os << *instr.OutputAt(i);
+ }
+
+ if (instr.OutputCount() > 1) os << ") = ";
+ if (instr.OutputCount() == 1) os << " = ";
+
+ if (instr.IsGapMoves()) {
+ const GapInstruction* gap = GapInstruction::cast(&instr);
+ os << (instr.IsBlockStart() ? " block-start" : "gap ");
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ os << "(";
+ if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+ os << ") ";
+ }
+ } else if (instr.IsSourcePosition()) {
+ const SourcePositionInstruction* pos =
+ SourcePositionInstruction::cast(&instr);
+ os << "position (" << pos->source_position().raw() << ")";
+ } else {
+ os << ArchOpcodeField::decode(instr.opcode());
+ AddressingMode am = AddressingModeField::decode(instr.opcode());
+ if (am != kMode_None) {
+ os << " : " << AddressingModeField::decode(instr.opcode());
+ }
+ FlagsMode fm = FlagsModeField::decode(instr.opcode());
+ if (fm != kFlags_none) {
+ os << " && " << fm << " if "
+ << FlagsConditionField::decode(instr.opcode());
+ }
+ }
+ if (instr.InputCount() > 0) {
+ for (size_t i = 0; i < instr.InputCount(); i++) {
+ os << " " << *instr.InputAt(i);
+ }
+ }
+ return os << "\n";
+}
+
+
+OStream& operator<<(OStream& os, const Constant& constant) {
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return os << constant.ToInt32();
+ case Constant::kInt64:
+ return os << constant.ToInt64() << "l";
+ case Constant::kFloat64:
+ return os << constant.ToFloat64();
+ case Constant::kExternalReference:
+ return os << constant.ToExternalReference().address();
+ case Constant::kHeapObject:
+ return os << Brief(*constant.ToHeapObject());
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+Label* InstructionSequence::GetLabel(BasicBlock* block) {
+ return GetBlockStart(block)->label();
+}
+
+
+BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
+ return BlockStartInstruction::cast(InstructionAt(block->code_start_));
+}
+
+
+void InstructionSequence::StartBlock(BasicBlock* block) {
+ block->code_start_ = static_cast<int>(instructions_.size());
+ BlockStartInstruction* block_start =
+ BlockStartInstruction::New(zone(), block);
+ AddInstruction(block_start, block);
+}
+
+
+void InstructionSequence::EndBlock(BasicBlock* block) {
+ int end = static_cast<int>(instructions_.size());
+ DCHECK(block->code_start_ >= 0 && block->code_start_ < end);
+ block->code_end_ = end;
+}
+
+
+int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
+ // TODO(titzer): the order of these gaps is a holdover from Lithium.
+ GapInstruction* gap = GapInstruction::New(zone());
+ if (instr->IsControl()) instructions_.push_back(gap);
+ int index = static_cast<int>(instructions_.size());
+ instructions_.push_back(instr);
+ if (!instr->IsControl()) instructions_.push_back(gap);
+ if (instr->NeedsPointerMap()) {
+ DCHECK(instr->pointer_map() == NULL);
+ PointerMap* pointer_map = new (zone()) PointerMap(zone());
+ pointer_map->set_instruction_position(index);
+ instr->set_pointer_map(pointer_map);
+ pointer_maps_.push_back(pointer_map);
+ }
+ return index;
+}
+
+
+BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
+ // TODO(turbofan): Optimize this.
+ for (;;) {
+ DCHECK_LE(0, instruction_index);
+ Instruction* instruction = InstructionAt(instruction_index--);
+ if (instruction->IsBlockStart()) {
+ return BlockStartInstruction::cast(instruction)->block();
+ }
+ }
+}
+
+
+bool InstructionSequence::IsReference(int virtual_register) const {
+ return references_.find(virtual_register) != references_.end();
+}
+
+
+bool InstructionSequence::IsDouble(int virtual_register) const {
+ return doubles_.find(virtual_register) != doubles_.end();
+}
+
+
+void InstructionSequence::MarkAsReference(int virtual_register) {
+ references_.insert(virtual_register);
+}
+
+
+void InstructionSequence::MarkAsDouble(int virtual_register) {
+ doubles_.insert(virtual_register);
+}
+
+
+void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
+ InstructionOperand* to) {
+ GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
+ from, to, zone());
+}
+
+
+int InstructionSequence::AddDeoptimizationEntry(
+ FrameStateDescriptor* descriptor) {
+ int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
+ deoptimization_entries_.push_back(descriptor);
+ return deoptimization_id;
+}
+
+FrameStateDescriptor* InstructionSequence::GetDeoptimizationEntry(
+ int deoptimization_id) {
+ return deoptimization_entries_[deoptimization_id];
+}
+
+
+int InstructionSequence::GetDeoptimizationEntryCount() {
+ return static_cast<int>(deoptimization_entries_.size());
+}
+
+
+OStream& operator<<(OStream& os, const InstructionSequence& code) {
+ for (size_t i = 0; i < code.immediates_.size(); ++i) {
+ Constant constant = code.immediates_[i];
+ os << "IMM#" << i << ": " << constant << "\n";
+ }
+ int i = 0;
+ for (ConstantMap::const_iterator it = code.constants_.begin();
+ it != code.constants_.end(); ++i, ++it) {
+ os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
+ }
+ for (int i = 0; i < code.BasicBlockCount(); i++) {
+ BasicBlock* block = code.BlockAt(i);
+
+ int bid = block->id();
+ os << "RPO#" << block->rpo_number_ << ": B" << bid;
+ CHECK(block->rpo_number_ == i);
+ if (block->IsLoopHeader()) {
+ os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
+ << ")";
+ }
+ os << " instructions: [" << block->code_start_ << ", " << block->code_end_
+ << ")\n predecessors:";
+
+ BasicBlock::Predecessors predecessors = block->predecessors();
+ for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
+ iter != predecessors.end(); ++iter) {
+ os << " B" << (*iter)->id();
+ }
+ os << "\n";
+
+ for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+ ++j) {
+ Node* phi = *j;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+ os << " phi: v" << phi->id() << " =";
+ Node::Inputs inputs = phi->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter) {
+ os << " v" << (*iter)->id();
+ }
+ os << "\n";
+ }
+
+ ScopedVector<char> buf(32);
+ for (int j = block->first_instruction_index();
+ j <= block->last_instruction_index(); j++) {
+ // TODO(svenpanne) Add some basic formatting to our streams.
+ SNPrintF(buf, "%5d", j);
+ os << " " << buf.start() << ": " << *code.InstructionAt(j);
+ }
+
+ os << " " << block->control_;
+
+ if (block->control_input_ != NULL) {
+ os << " v" << block->control_input_->id();
+ }
+
+ BasicBlock::Successors successors = block->successors();
+ for (BasicBlock::Successors::iterator iter = successors.begin();
+ iter != successors.end(); ++iter) {
+ os << " B" << (*iter)->id();
+ }
+ os << "\n";
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
new file mode 100644
index 000000000..7b357639e
--- /dev/null
+++ b/deps/v8/src/compiler/instruction.h
@@ -0,0 +1,871 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_H_
+#define V8_COMPILER_INSTRUCTION_H_
+
+#include <deque>
+#include <map>
+#include <set>
+
+// TODO(titzer): don't include the assembler?
+#include "src/assembler.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-codes.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class OStream;
+
+namespace compiler {
+
+// Forward declarations.
+class Linkage;
+
+// A couple of reserved opcodes are used for internal use.
+const InstructionCode kGapInstruction = -1;
+const InstructionCode kBlockStartInstruction = -2;
+const InstructionCode kSourcePositionInstruction = -3;
+
+
+#define INSTRUCTION_OPERAND_LIST(V) \
+ V(Constant, CONSTANT, 128) \
+ V(Immediate, IMMEDIATE, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Register, REGISTER, Register::kNumRegisters) \
+ V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+class InstructionOperand : public ZoneObject {
+ public:
+ enum Kind {
+ INVALID,
+ UNALLOCATED,
+ CONSTANT,
+ IMMEDIATE,
+ STACK_SLOT,
+ DOUBLE_STACK_SLOT,
+ REGISTER,
+ DOUBLE_REGISTER
+ };
+
+ InstructionOperand() : value_(KindField::encode(INVALID)) {}
+ InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+ Kind kind() const { return KindField::decode(value_); }
+ int index() const { return static_cast<int>(value_) >> KindField::kSize; }
+#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+ bool Is##name() const { return kind() == type; }
+ INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
+ INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
+#undef INSTRUCTION_OPERAND_PREDICATE
+ bool Equals(InstructionOperand* other) const {
+ return value_ == other->value_;
+ }
+
+ void ConvertTo(Kind kind, int index) {
+ if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
+ value_ = KindField::encode(kind);
+ value_ |= index << KindField::kSize;
+ DCHECK(this->index() == index);
+ }
+
+ // Calls SetUpCache()/TearDownCache() for each subclass.
+ static void SetUpCaches();
+ static void TearDownCaches();
+
+ protected:
+ typedef BitField<Kind, 0, 3> KindField;
+
+ unsigned value_;
+};
+
+OStream& operator<<(OStream& os, const InstructionOperand& op);
+
+class UnallocatedOperand : public InstructionOperand {
+ public:
+ enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
+
+ enum ExtendedPolicy {
+ NONE,
+ ANY,
+ FIXED_REGISTER,
+ FIXED_DOUBLE_REGISTER,
+ MUST_HAVE_REGISTER,
+ SAME_AS_FIRST_INPUT
+ };
+
+ // Lifetime of operand inside the instruction.
+ enum Lifetime {
+ // USED_AT_START operand is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ USED_AT_START,
+
+ // USED_AT_END operand is treated as live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ USED_AT_END
+ };
+
+ explicit UnallocatedOperand(ExtendedPolicy policy)
+ : InstructionOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ }
+
+ UnallocatedOperand(BasicPolicy policy, int index)
+ : InstructionOperand(UNALLOCATED, 0) {
+ DCHECK(policy == FIXED_SLOT);
+ value_ |= BasicPolicyField::encode(policy);
+ value_ |= index << FixedSlotIndexField::kShift;
+ DCHECK(this->fixed_slot_index() == index);
+ }
+
+ UnallocatedOperand(ExtendedPolicy policy, int index)
+ : InstructionOperand(UNALLOCATED, 0) {
+ DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ value_ |= FixedRegisterField::encode(index);
+ }
+
+ UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
+ : InstructionOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
+ }
+
+ UnallocatedOperand* CopyUnconstrained(Zone* zone) {
+ UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
+ result->set_virtual_register(virtual_register());
+ return result;
+ }
+
+ static const UnallocatedOperand* cast(const InstructionOperand* op) {
+ DCHECK(op->IsUnallocated());
+ return static_cast<const UnallocatedOperand*>(op);
+ }
+
+ static UnallocatedOperand* cast(InstructionOperand* op) {
+ DCHECK(op->IsUnallocated());
+ return static_cast<UnallocatedOperand*>(op);
+ }
+
+ // The encoding used for UnallocatedOperand operands depends on the policy
+ // that is
+ // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+ // because it accommodates a larger pay-load.
+ //
+ // For FIXED_SLOT policy:
+ // +------------------------------------------+
+ // | slot_index | vreg | 0 | 001 |
+ // +------------------------------------------+
+ //
+ // For all other (extended) policies:
+ // +------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
+ // +------------------------------------------+ P ... Policy
+ //
+ // The slot index is a signed value which requires us to decode it manually
+ // instead of using the BitField utility class.
+
+ // The superclass has a KindField.
+ STATIC_ASSERT(KindField::kSize == 3);
+
+ // BitFields for all unallocated operands.
+ class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+ // BitFields specific to BasicPolicy::FIXED_SLOT.
+ class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+ // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+ class LifetimeField : public BitField<Lifetime, 25, 1> {};
+ class FixedRegisterField : public BitField<int, 26, 6> {};
+
+ static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+ static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+ static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+ static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+ // Predicates for the operand policy.
+ bool HasAnyPolicy() const {
+ return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
+ }
+ bool HasFixedPolicy() const {
+ return basic_policy() == FIXED_SLOT ||
+ extended_policy() == FIXED_REGISTER ||
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+ bool HasRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == MUST_HAVE_REGISTER;
+ }
+ bool HasSameAsInputPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == SAME_AS_FIRST_INPUT;
+ }
+ bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
+ bool HasFixedRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER;
+ }
+ bool HasFixedDoubleRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+
+ // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+ BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
+
+ // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+ ExtendedPolicy extended_policy() const {
+ DCHECK(basic_policy() == EXTENDED_POLICY);
+ return ExtendedPolicyField::decode(value_);
+ }
+
+ // [fixed_slot_index]: Only for FIXED_SLOT.
+ int fixed_slot_index() const {
+ DCHECK(HasFixedSlotPolicy());
+ return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+ }
+
+ // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+ int fixed_register_index() const {
+ DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ return FixedRegisterField::decode(value_);
+ }
+
+ // [virtual_register]: The virtual register ID for this operand.
+ int virtual_register() const { return VirtualRegisterField::decode(value_); }
+ void set_virtual_register(unsigned id) {
+ value_ = VirtualRegisterField::update(value_, id);
+ }
+
+ // [lifetime]: Only for non-FIXED_SLOT.
+ bool IsUsedAtStart() {
+ DCHECK(basic_policy() == EXTENDED_POLICY);
+ return LifetimeField::decode(value_) == USED_AT_START;
+ }
+};
+
+
+class MoveOperands V8_FINAL {
+ public:
+ MoveOperands(InstructionOperand* source, InstructionOperand* destination)
+ : source_(source), destination_(destination) {}
+
+ InstructionOperand* source() const { return source_; }
+ void set_source(InstructionOperand* operand) { source_ = operand; }
+
+ InstructionOperand* destination() const { return destination_; }
+ void set_destination(InstructionOperand* operand) { destination_ = operand; }
+
+ // The gap resolver marks moves as "in-progress" by clearing the
+ // destination (but not the source).
+ bool IsPending() const { return destination_ == NULL && source_ != NULL; }
+
+ // True if this move a move into the given destination operand.
+ bool Blocks(InstructionOperand* operand) const {
+ return !IsEliminated() && source()->Equals(operand);
+ }
+
+ // A move is redundant if it's been eliminated, if its source and
+ // destination are the same, or if its destination is unneeded or constant.
+ bool IsRedundant() const {
+ return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+ (destination_ != NULL && destination_->IsConstant());
+ }
+
+ bool IsIgnored() const {
+ return destination_ != NULL && destination_->IsIgnored();
+ }
+
+ // We clear both operands to indicate move that's been eliminated.
+ void Eliminate() { source_ = destination_ = NULL; }
+ bool IsEliminated() const {
+ DCHECK(source_ != NULL || destination_ == NULL);
+ return source_ == NULL;
+ }
+
+ private:
+ InstructionOperand* source_;
+ InstructionOperand* destination_;
+};
+
+OStream& operator<<(OStream& os, const MoveOperands& mo);
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+class SubKindOperand V8_FINAL : public InstructionOperand {
+ public:
+ static SubKindOperand* Create(int index, Zone* zone) {
+ DCHECK(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new (zone) SubKindOperand(index);
+ }
+
+ static SubKindOperand* cast(InstructionOperand* op) {
+ DCHECK(op->kind() == kOperandKind);
+ return reinterpret_cast<SubKindOperand*>(op);
+ }
+
+ static void SetUpCache();
+ static void TearDownCache();
+
+ private:
+ static SubKindOperand* cache;
+
+ SubKindOperand() : InstructionOperand() {}
+ explicit SubKindOperand(int index)
+ : InstructionOperand(kOperandKind, index) {}
+};
+
+
+#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+ typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
+INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
+
+
+class ParallelMove V8_FINAL : public ZoneObject {
+ public:
+ explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
+
+ void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
+ move_operands_.Add(MoveOperands(from, to), zone);
+ }
+
+ bool IsRedundant() const;
+
+ ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
+ const ZoneList<MoveOperands>* move_operands() const {
+ return &move_operands_;
+ }
+
+ private:
+ ZoneList<MoveOperands> move_operands_;
+};
+
+OStream& operator<<(OStream& os, const ParallelMove& pm);
+
+class PointerMap V8_FINAL : public ZoneObject {
+ public:
+ explicit PointerMap(Zone* zone)
+ : pointer_operands_(8, zone),
+ untagged_operands_(0, zone),
+ instruction_position_(-1) {}
+
+ const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
+ for (int i = 0; i < untagged_operands_.length(); ++i) {
+ RemovePointer(untagged_operands_[i]);
+ }
+ untagged_operands_.Clear();
+ return &pointer_operands_;
+ }
+ int instruction_position() const { return instruction_position_; }
+
+ void set_instruction_position(int pos) {
+ DCHECK(instruction_position_ == -1);
+ instruction_position_ = pos;
+ }
+
+ void RecordPointer(InstructionOperand* op, Zone* zone);
+ void RemovePointer(InstructionOperand* op);
+ void RecordUntagged(InstructionOperand* op, Zone* zone);
+
+ private:
+ friend OStream& operator<<(OStream& os, const PointerMap& pm);
+
+ ZoneList<InstructionOperand*> pointer_operands_;
+ ZoneList<InstructionOperand*> untagged_operands_;
+ int instruction_position_;
+};
+
+OStream& operator<<(OStream& os, const PointerMap& pm);
+
+// TODO(titzer): s/PointerMap/ReferenceMap/
+class Instruction : public ZoneObject {
+ public:
+ size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
+ InstructionOperand* Output() const { return OutputAt(0); }
+ InstructionOperand* OutputAt(size_t i) const {
+ DCHECK(i < OutputCount());
+ return operands_[i];
+ }
+
+ size_t InputCount() const { return InputCountField::decode(bit_field_); }
+ InstructionOperand* InputAt(size_t i) const {
+ DCHECK(i < InputCount());
+ return operands_[OutputCount() + i];
+ }
+
+ size_t TempCount() const { return TempCountField::decode(bit_field_); }
+ InstructionOperand* TempAt(size_t i) const {
+ DCHECK(i < TempCount());
+ return operands_[OutputCount() + InputCount() + i];
+ }
+
+ InstructionCode opcode() const { return opcode_; }
+ ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
+ AddressingMode addressing_mode() const {
+ return AddressingModeField::decode(opcode());
+ }
+ FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
+ FlagsCondition flags_condition() const {
+ return FlagsConditionField::decode(opcode());
+ }
+
+ // TODO(titzer): make control and call into flags.
+ static Instruction* New(Zone* zone, InstructionCode opcode) {
+ return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+ }
+
+ static Instruction* New(Zone* zone, InstructionCode opcode,
+ size_t output_count, InstructionOperand** outputs,
+ size_t input_count, InstructionOperand** inputs,
+ size_t temp_count, InstructionOperand** temps) {
+ DCHECK(opcode >= 0);
+ DCHECK(output_count == 0 || outputs != NULL);
+ DCHECK(input_count == 0 || inputs != NULL);
+ DCHECK(temp_count == 0 || temps != NULL);
+ InstructionOperand* none = NULL;
+ USE(none);
+ int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) +
+ (output_count + input_count + temp_count - 1) *
+ sizeof(none));
+ return new (zone->New(size)) Instruction(
+ opcode, output_count, outputs, input_count, inputs, temp_count, temps);
+ }
+
+ // TODO(titzer): another holdover from lithium days; register allocator
+ // should not need to know about control instructions.
+ Instruction* MarkAsControl() {
+ bit_field_ = IsControlField::update(bit_field_, true);
+ return this;
+ }
+ Instruction* MarkAsCall() {
+ bit_field_ = IsCallField::update(bit_field_, true);
+ return this;
+ }
+ bool IsControl() const { return IsControlField::decode(bit_field_); }
+ bool IsCall() const { return IsCallField::decode(bit_field_); }
+ bool NeedsPointerMap() const { return IsCall(); }
+ bool HasPointerMap() const { return pointer_map_ != NULL; }
+
+ bool IsGapMoves() const {
+ return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
+ }
+ bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
+ bool IsSourcePosition() const {
+ return opcode() == kSourcePositionInstruction;
+ }
+
+ bool ClobbersRegisters() const { return IsCall(); }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersDoubleRegisters() const { return IsCall(); }
+ PointerMap* pointer_map() const { return pointer_map_; }
+
+ void set_pointer_map(PointerMap* map) {
+ DCHECK(NeedsPointerMap());
+ DCHECK_EQ(NULL, pointer_map_);
+ pointer_map_ = map;
+ }
+
+ // Placement new operator so that we can smash instructions into
+ // zone-allocated memory.
+ void* operator new(size_t, void* location) { return location; }
+
+ void operator delete(void* pointer, void* location) { UNREACHABLE(); }
+
+ protected:
+ explicit Instruction(InstructionCode opcode)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
+ TempCountField::encode(0) | IsCallField::encode(false) |
+ IsControlField::encode(false)),
+ pointer_map_(NULL) {}
+
+ Instruction(InstructionCode opcode, size_t output_count,
+ InstructionOperand** outputs, size_t input_count,
+ InstructionOperand** inputs, size_t temp_count,
+ InstructionOperand** temps)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(output_count) |
+ InputCountField::encode(input_count) |
+ TempCountField::encode(temp_count) |
+ IsCallField::encode(false) | IsControlField::encode(false)),
+ pointer_map_(NULL) {
+ for (size_t i = 0; i < output_count; ++i) {
+ operands_[i] = outputs[i];
+ }
+ for (size_t i = 0; i < input_count; ++i) {
+ operands_[output_count + i] = inputs[i];
+ }
+ for (size_t i = 0; i < temp_count; ++i) {
+ operands_[output_count + input_count + i] = temps[i];
+ }
+ }
+
+ protected:
+ typedef BitField<size_t, 0, 8> OutputCountField;
+ typedef BitField<size_t, 8, 16> InputCountField;
+ typedef BitField<size_t, 24, 6> TempCountField;
+ typedef BitField<bool, 30, 1> IsCallField;
+ typedef BitField<bool, 31, 1> IsControlField;
+
+ InstructionCode opcode_;
+ uint32_t bit_field_;
+ PointerMap* pointer_map_;
+ InstructionOperand* operands_[1];
+};
+
+OStream& operator<<(OStream& os, const Instruction& instr);
+
+// Represents moves inserted before an instruction due to register allocation.
+// TODO(titzer): squash GapInstruction back into Instruction, since essentially
+// every instruction can possibly have moves inserted before it.
+class GapInstruction : public Instruction {
+ public:
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new (zone) ParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ ParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ static GapInstruction* New(Zone* zone) {
+ void* buffer = zone->New(sizeof(GapInstruction));
+ return new (buffer) GapInstruction(kGapInstruction);
+ }
+
+ static GapInstruction* cast(Instruction* instr) {
+ DCHECK(instr->IsGapMoves());
+ return static_cast<GapInstruction*>(instr);
+ }
+
+ static const GapInstruction* cast(const Instruction* instr) {
+ DCHECK(instr->IsGapMoves());
+ return static_cast<const GapInstruction*>(instr);
+ }
+
+ protected:
+ explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ private:
+ friend OStream& operator<<(OStream& os, const Instruction& instr);
+ ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+};
+
+
+// This special kind of gap move instruction represents the beginning of a
+// block of code.
+// TODO(titzer): move code_start and code_end from BasicBlock to here.
+class BlockStartInstruction V8_FINAL : public GapInstruction {
+ public:
+ BasicBlock* block() const { return block_; }
+ Label* label() { return &label_; }
+
+ static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
+ void* buffer = zone->New(sizeof(BlockStartInstruction));
+ return new (buffer) BlockStartInstruction(block);
+ }
+
+ static BlockStartInstruction* cast(Instruction* instr) {
+ DCHECK(instr->IsBlockStart());
+ return static_cast<BlockStartInstruction*>(instr);
+ }
+
+ private:
+ explicit BlockStartInstruction(BasicBlock* block)
+ : GapInstruction(kBlockStartInstruction), block_(block) {}
+
+ BasicBlock* block_;
+ Label label_;
+};
+
+
+class SourcePositionInstruction V8_FINAL : public Instruction {
+ public:
+ static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
+ void* buffer = zone->New(sizeof(SourcePositionInstruction));
+ return new (buffer) SourcePositionInstruction(position);
+ }
+
+ SourcePosition source_position() const { return source_position_; }
+
+ static SourcePositionInstruction* cast(Instruction* instr) {
+ DCHECK(instr->IsSourcePosition());
+ return static_cast<SourcePositionInstruction*>(instr);
+ }
+
+ static const SourcePositionInstruction* cast(const Instruction* instr) {
+ DCHECK(instr->IsSourcePosition());
+ return static_cast<const SourcePositionInstruction*>(instr);
+ }
+
+ private:
+ explicit SourcePositionInstruction(SourcePosition source_position)
+ : Instruction(kSourcePositionInstruction),
+ source_position_(source_position) {
+ DCHECK(!source_position_.IsInvalid());
+ DCHECK(!source_position_.IsUnknown());
+ }
+
+ SourcePosition source_position_;
+};
+
+
+class Constant V8_FINAL {
+ public:
+ enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+
+ explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+ explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+ explicit Constant(double v) : type_(kFloat64), value_(BitCast<int64_t>(v)) {}
+ explicit Constant(ExternalReference ref)
+ : type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {}
+ explicit Constant(Handle<HeapObject> obj)
+ : type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {}
+
+ Type type() const { return type_; }
+
+ int32_t ToInt32() const {
+ DCHECK_EQ(kInt32, type());
+ return static_cast<int32_t>(value_);
+ }
+
+ int64_t ToInt64() const {
+ if (type() == kInt32) return ToInt32();
+ DCHECK_EQ(kInt64, type());
+ return value_;
+ }
+
+ double ToFloat64() const {
+ if (type() == kInt32) return ToInt32();
+ DCHECK_EQ(kFloat64, type());
+ return BitCast<double>(value_);
+ }
+
+ ExternalReference ToExternalReference() const {
+ DCHECK_EQ(kExternalReference, type());
+ return BitCast<ExternalReference>(static_cast<intptr_t>(value_));
+ }
+
+ Handle<HeapObject> ToHeapObject() const {
+ DCHECK_EQ(kHeapObject, type());
+ return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
+ }
+
+ private:
+ Type type_;
+ int64_t value_;
+};
+
+
+class FrameStateDescriptor : public ZoneObject {
+ public:
+ FrameStateDescriptor(BailoutId bailout_id, int parameters_count,
+ int locals_count, int stack_count)
+ : bailout_id_(bailout_id),
+ parameters_count_(parameters_count),
+ locals_count_(locals_count),
+ stack_count_(stack_count) {}
+
+ BailoutId bailout_id() const { return bailout_id_; }
+ int parameters_count() { return parameters_count_; }
+ int locals_count() { return locals_count_; }
+ int stack_count() { return stack_count_; }
+
+ int size() { return parameters_count_ + locals_count_ + stack_count_; }
+
+ private:
+ BailoutId bailout_id_;
+ int parameters_count_;
+ int locals_count_;
+ int stack_count_;
+};
+
+OStream& operator<<(OStream& os, const Constant& constant);
+
+typedef std::deque<Constant, zone_allocator<Constant> > ConstantDeque;
+typedef std::map<int, Constant, std::less<int>,
+ zone_allocator<std::pair<int, Constant> > > ConstantMap;
+
+
+typedef std::deque<Instruction*, zone_allocator<Instruction*> >
+ InstructionDeque;
+typedef std::deque<PointerMap*, zone_allocator<PointerMap*> > PointerMapDeque;
+typedef std::vector<FrameStateDescriptor*,
+ zone_allocator<FrameStateDescriptor*> >
+ DeoptimizationVector;
+
+
+// Represents architecture-specific generated code before, during, and after
+// register allocation.
+// TODO(titzer): s/IsDouble/IsFloat64/
+class InstructionSequence V8_FINAL {
+ public:
+ InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
+ : graph_(graph),
+ linkage_(linkage),
+ schedule_(schedule),
+ constants_(ConstantMap::key_compare(),
+ ConstantMap::allocator_type(zone())),
+ immediates_(ConstantDeque::allocator_type(zone())),
+ instructions_(InstructionDeque::allocator_type(zone())),
+ next_virtual_register_(graph->NodeCount()),
+ pointer_maps_(PointerMapDeque::allocator_type(zone())),
+ doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+ references_(std::less<int>(),
+ VirtualRegisterSet::allocator_type(zone())),
+ deoptimization_entries_(DeoptimizationVector::allocator_type(zone())) {}
+
+ int NextVirtualRegister() { return next_virtual_register_++; }
+ int VirtualRegisterCount() const { return next_virtual_register_; }
+
+ int ValueCount() const { return graph_->NodeCount(); }
+
+ int BasicBlockCount() const {
+ return static_cast<int>(schedule_->rpo_order()->size());
+ }
+
+ BasicBlock* BlockAt(int rpo_number) const {
+ return (*schedule_->rpo_order())[rpo_number];
+ }
+
+ BasicBlock* GetContainingLoop(BasicBlock* block) {
+ return block->loop_header_;
+ }
+
+ int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
+
+ BasicBlock* GetBasicBlock(int instruction_index);
+
+ int GetVirtualRegister(Node* node) const { return node->id(); }
+
+ bool IsReference(int virtual_register) const;
+ bool IsDouble(int virtual_register) const;
+
+ void MarkAsReference(int virtual_register);
+ void MarkAsDouble(int virtual_register);
+
+ void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
+
+ Label* GetLabel(BasicBlock* block);
+ BlockStartInstruction* GetBlockStart(BasicBlock* block);
+
+ typedef InstructionDeque::const_iterator const_iterator;
+ const_iterator begin() const { return instructions_.begin(); }
+ const_iterator end() const { return instructions_.end(); }
+
+ GapInstruction* GapAt(int index) const {
+ return GapInstruction::cast(InstructionAt(index));
+ }
+ bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
+ Instruction* InstructionAt(int index) const {
+ DCHECK(index >= 0);
+ DCHECK(index < static_cast<int>(instructions_.size()));
+ return instructions_[index];
+ }
+
+ Frame* frame() { return &frame_; }
+ Graph* graph() const { return graph_; }
+ Isolate* isolate() const { return zone()->isolate(); }
+ Linkage* linkage() const { return linkage_; }
+ Schedule* schedule() const { return schedule_; }
+ const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
+ Zone* zone() const { return graph_->zone(); }
+
+ // Used by the code generator while adding instructions.
+ int AddInstruction(Instruction* instr, BasicBlock* block);
+ void StartBlock(BasicBlock* block);
+ void EndBlock(BasicBlock* block);
+
+ void AddConstant(int virtual_register, Constant constant) {
+ DCHECK(constants_.find(virtual_register) == constants_.end());
+ constants_.insert(std::make_pair(virtual_register, constant));
+ }
+ Constant GetConstant(int virtual_register) const {
+ ConstantMap::const_iterator it = constants_.find(virtual_register);
+ DCHECK(it != constants_.end());
+ DCHECK_EQ(virtual_register, it->first);
+ return it->second;
+ }
+
+ typedef ConstantDeque Immediates;
+ const Immediates& immediates() const { return immediates_; }
+
+ int AddImmediate(Constant constant) {
+ int index = static_cast<int>(immediates_.size());
+ immediates_.push_back(constant);
+ return index;
+ }
+ Constant GetImmediate(int index) const {
+ DCHECK(index >= 0);
+ DCHECK(index < static_cast<int>(immediates_.size()));
+ return immediates_[index];
+ }
+
+ int AddDeoptimizationEntry(FrameStateDescriptor* descriptor);
+ FrameStateDescriptor* GetDeoptimizationEntry(int deoptimization_id);
+ int GetDeoptimizationEntryCount();
+
+ private:
+ friend OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+ typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+
+ Graph* graph_;
+ Linkage* linkage_;
+ Schedule* schedule_;
+ ConstantMap constants_;
+ ConstantDeque immediates_;
+ InstructionDeque instructions_;
+ int next_virtual_register_;
+ PointerMapDeque pointer_maps_;
+ VirtualRegisterSet doubles_;
+ VirtualRegisterSet references_;
+ Frame frame_;
+ DeoptimizationVector deoptimization_entries_;
+};
+
+OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_H_
diff --git a/deps/v8/src/compiler/ir-operations.txt b/deps/v8/src/compiler/ir-operations.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/deps/v8/src/compiler/ir-operations.txt
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
new file mode 100644
index 000000000..bdf142763
--- /dev/null
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(titzer): factor this out to a common routine with js-typed-lowering.
+static void ReplaceEffectfulWithValue(Node* node, Node* value) {
+ Node* effect = NULL;
+ if (OperatorProperties::HasEffectInput(node->op())) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+
+ // Requires distinguishing between value and effect edges.
+ UseIter iter = node->uses().begin();
+ while (iter != node->uses().end()) {
+ if (NodeProperties::IsEffectEdge(iter.edge())) {
+ DCHECK_NE(NULL, effect);
+ iter = iter.UpdateToAndIncrement(effect);
+ } else {
+ iter = iter.UpdateToAndIncrement(value);
+ }
+ }
+}
+
+
+class ContextSpecializationVisitor : public NullNodeVisitor {
+ public:
+ explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
+ : spec_(spec) {}
+
+ GenericGraphVisit::Control Post(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadContext: {
+ Reduction r = spec_->ReduceJSLoadContext(node);
+ if (r.Changed() && r.replacement() != node) {
+ ReplaceEffectfulWithValue(node, r.replacement());
+ }
+ break;
+ }
+ case IrOpcode::kJSStoreContext: {
+ Reduction r = spec_->ReduceJSStoreContext(node);
+ if (r.Changed() && r.replacement() != node) {
+ ReplaceEffectfulWithValue(node, r.replacement());
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ private:
+ JSContextSpecializer* spec_;
+};
+
+
+void JSContextSpecializer::SpecializeToContext() {
+ ReplaceEffectfulWithValue(context_, jsgraph_->Constant(info_->context()));
+
+ ContextSpecializationVisitor visitor(this);
+ jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+
+ ValueMatcher<Handle<Context> > match(NodeProperties::GetValueInput(node, 0));
+ // If the context is not constant, no reduction can occur.
+ if (!match.HasValue()) {
+ return Reducer::NoChange();
+ }
+
+ ContextAccess access = OpParameter<ContextAccess>(node);
+
+ // Find the right parent context.
+ Context* context = *match.Value();
+ for (int i = access.depth(); i > 0; --i) {
+ context = context->previous();
+ }
+
+ // If the access itself is mutable, only fold-in the parent.
+ if (!access.immutable()) {
+ // The access does not have to look up a parent, nothing to fold.
+ if (access.depth() == 0) {
+ return Reducer::NoChange();
+ }
+ Operator* op = jsgraph_->javascript()->LoadContext(0, access.index(),
+ access.immutable());
+ node->set_op(op);
+ Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
+ node->ReplaceInput(0, jsgraph_->Constant(context_handle));
+ return Reducer::Changed(node);
+ }
+ Handle<Object> value =
+ Handle<Object>(context->get(access.index()), info_->isolate());
+
+ // Even though the context slot is immutable, the context might have escaped
+ // before the function to which it belongs has initialized the slot.
+ // We must be conservative and check if the value in the slot is currently the
+ // hole or undefined. If it is neither of these, then it must be initialized.
+ if (value->IsUndefined() || value->IsTheHole()) {
+ return Reducer::NoChange();
+ }
+
+ // Success. The context load can be replaced with the constant.
+ // TODO(titzer): record the specialization for sharing code across multiple
+ // contexts that have the same value in the corresponding context slot.
+ return Reducer::Replace(jsgraph_->Constant(value));
+}
+
+
+Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+
+ ValueMatcher<Handle<Context> > match(NodeProperties::GetValueInput(node, 0));
+ // If the context is not constant, no reduction can occur.
+ if (!match.HasValue()) {
+ return Reducer::NoChange();
+ }
+
+ ContextAccess access = OpParameter<ContextAccess>(node);
+
+ // The access does not have to look up a parent, nothing to fold.
+ if (access.depth() == 0) {
+ return Reducer::NoChange();
+ }
+
+ // Find the right parent context.
+ Context* context = *match.Value();
+ for (int i = access.depth(); i > 0; --i) {
+ context = context->previous();
+ }
+
+ Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
+ node->set_op(op);
+ Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
+ node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
+
+ return Reducer::Changed(node);
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
new file mode 100644
index 000000000..b8b50ed6c
--- /dev/null
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/contexts.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Specializes a given JSGraph to a given context, potentially constant folding
+// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
+class JSContextSpecializer {
+ public:
+ JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
+ : info_(info), jsgraph_(jsgraph), context_(context) {}
+
+ void SpecializeToContext();
+ Reduction ReduceJSLoadContext(Node* node);
+ Reduction ReduceJSStoreContext(Node* node);
+
+ private:
+ CompilationInfo* info_;
+ JSGraph* jsgraph_;
+ Node* context_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
new file mode 100644
index 000000000..68cc1cea9
--- /dev/null
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -0,0 +1,550 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stubs.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// TODO(mstarzinger): This is a temporary workaround for non-hydrogen stubs for
+// which we don't have an interface descriptor yet. Use ReplaceWithICStubCall
+// once these stub have been made into a HydrogenCodeStub.
+template <typename T>
+static CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate,
+ T* stub) {
+ CodeStub::Major key = static_cast<CodeStub*>(stub)->MajorKey();
+ CodeStubInterfaceDescriptor* d = isolate->code_stub_interface_descriptor(key);
+ stub->InitializeInterfaceDescriptor(d);
+ return d;
+}
+
+
+// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
+// which doesn't have an interface descriptor yet. It mimics a hydrogen code
+// stub for the underlying IC stub code.
+class LoadICStubShim : public HydrogenCodeStub {
+ public:
+ LoadICStubShim(Isolate* isolate, ContextualMode contextual_mode)
+ : HydrogenCodeStub(isolate), contextual_mode_(contextual_mode) {
+ i::compiler::GetInterfaceDescriptor(isolate, this);
+ }
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE {
+ ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode_);
+ return LoadIC::initialize_stub(isolate(), extra_state);
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
+ Register registers[] = { InterfaceDescriptor::ContextRegister(),
+ LoadIC::ReceiverRegister(),
+ LoadIC::NameRegister() };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+ }
+
+ private:
+ virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
+ virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
+ virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
+
+ ContextualMode contextual_mode_;
+};
+
+
+// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
+// which doesn't have an interface descriptor yet. It mimics a hydrogen code
+// stub for the underlying IC stub code.
+class KeyedLoadICStubShim : public HydrogenCodeStub {
+ public:
+ explicit KeyedLoadICStubShim(Isolate* isolate) : HydrogenCodeStub(isolate) {
+ i::compiler::GetInterfaceDescriptor(isolate, this);
+ }
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE {
+ return isolate()->builtins()->KeyedLoadIC_Initialize();
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
+ Register registers[] = { InterfaceDescriptor::ContextRegister(),
+ KeyedLoadIC::ReceiverRegister(),
+ KeyedLoadIC::NameRegister() };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+ }
+
+ private:
+ virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
+ virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
+ virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
+};
+
+
+// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
+// which doesn't have an interface descriptor yet. It mimics a hydrogen code
+// stub for the underlying IC stub code.
+class StoreICStubShim : public HydrogenCodeStub {
+ public:
+ StoreICStubShim(Isolate* isolate, StrictMode strict_mode)
+ : HydrogenCodeStub(isolate), strict_mode_(strict_mode) {
+ i::compiler::GetInterfaceDescriptor(isolate, this);
+ }
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE {
+ return StoreIC::initialize_stub(isolate(), strict_mode_);
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
+ Register registers[] = { InterfaceDescriptor::ContextRegister(),
+ StoreIC::ReceiverRegister(),
+ StoreIC::NameRegister(),
+ StoreIC::ValueRegister() };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+ }
+
+ private:
+ virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
+ virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
+ virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
+
+ StrictMode strict_mode_;
+};
+
+
+// TODO(mstarzinger): This is a temporary shim to be able to call an IC stub
+// which doesn't have an interface descriptor yet. It mimics a hydrogen code
+// stub for the underlying IC stub code.
+class KeyedStoreICStubShim : public HydrogenCodeStub {
+ public:
+ KeyedStoreICStubShim(Isolate* isolate, StrictMode strict_mode)
+ : HydrogenCodeStub(isolate), strict_mode_(strict_mode) {
+ i::compiler::GetInterfaceDescriptor(isolate, this);
+ }
+
+ virtual Handle<Code> GenerateCode() V8_OVERRIDE {
+ return strict_mode_ == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE {
+ Register registers[] = { InterfaceDescriptor::ContextRegister(),
+ KeyedStoreIC::ReceiverRegister(),
+ KeyedStoreIC::NameRegister(),
+ KeyedStoreIC::ValueRegister() };
+ descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers);
+ }
+
+ private:
+ virtual Major MajorKey() const V8_OVERRIDE { return NoCache; }
+ virtual int NotMissMinorKey() const V8_OVERRIDE { return 0; }
+ virtual bool UseSpecialCache() V8_OVERRIDE { return true; }
+
+ StrictMode strict_mode_;
+};
+
+
+JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph,
+ MachineOperatorBuilder* machine,
+ SourcePositionTable* source_positions)
+ : LoweringBuilder(jsgraph->graph(), source_positions),
+ info_(info),
+ jsgraph_(jsgraph),
+ linkage_(new (jsgraph->zone()) Linkage(info)),
+ machine_(machine) {}
+
+
+void JSGenericLowering::PatchOperator(Node* node, Operator* op) {
+ node->set_op(op);
+}
+
+
+void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
+ node->InsertInput(zone(), index, input);
+}
+
+
+Node* JSGenericLowering::SmiConstant(int32_t immediate) {
+ return jsgraph()->SmiConstant(immediate);
+}
+
+
+Node* JSGenericLowering::Int32Constant(int immediate) {
+ return jsgraph()->Int32Constant(immediate);
+}
+
+
+Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
+ return jsgraph()->HeapConstant(code);
+}
+
+
+Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
+ return jsgraph()->HeapConstant(function);
+}
+
+
+Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
+ return jsgraph()->ExternalConstant(ref);
+}
+
+
+void JSGenericLowering::Lower(Node* node) {
+ Node* replacement = NULL;
+ // Dispatch according to the opcode.
+ switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+ case IrOpcode::k##x: \
+ replacement = Lower##x(node); \
+ break;
+ DECLARE_CASE(Branch)
+ JS_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+ default:
+ // Nothing to see.
+ return;
+ }
+
+ // Nothing to do if lowering was done by patching the existing node.
+ if (replacement == node) return;
+
+ // Iterate through uses of the original node and replace uses accordingly.
+ UNIMPLEMENTED();
+}
+
+
+#define REPLACE_IC_STUB_CALL(op, StubDeclaration) \
+ Node* JSGenericLowering::Lower##op(Node* node) { \
+ StubDeclaration; \
+ ReplaceWithICStubCall(node, &stub); \
+ return node; \
+ }
+REPLACE_IC_STUB_CALL(JSBitwiseOr, BinaryOpICStub stub(isolate(), Token::BIT_OR))
+REPLACE_IC_STUB_CALL(JSBitwiseXor,
+ BinaryOpICStub stub(isolate(), Token::BIT_XOR))
+REPLACE_IC_STUB_CALL(JSBitwiseAnd,
+ BinaryOpICStub stub(isolate(), Token::BIT_AND))
+REPLACE_IC_STUB_CALL(JSShiftLeft, BinaryOpICStub stub(isolate(), Token::SHL))
+REPLACE_IC_STUB_CALL(JSShiftRight, BinaryOpICStub stub(isolate(), Token::SAR))
+REPLACE_IC_STUB_CALL(JSShiftRightLogical,
+ BinaryOpICStub stub(isolate(), Token::SHR))
+REPLACE_IC_STUB_CALL(JSAdd, BinaryOpICStub stub(isolate(), Token::ADD))
+REPLACE_IC_STUB_CALL(JSSubtract, BinaryOpICStub stub(isolate(), Token::SUB))
+REPLACE_IC_STUB_CALL(JSMultiply, BinaryOpICStub stub(isolate(), Token::MUL))
+REPLACE_IC_STUB_CALL(JSDivide, BinaryOpICStub stub(isolate(), Token::DIV))
+REPLACE_IC_STUB_CALL(JSModulus, BinaryOpICStub stub(isolate(), Token::MOD))
+REPLACE_IC_STUB_CALL(JSToNumber, ToNumberStub stub(isolate()))
+#undef REPLACE_IC_STUB_CALL
+
+
+#define REPLACE_COMPARE_IC_CALL(op, token, pure) \
+ Node* JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token, pure); \
+ return node; \
+ }
+REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
+REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
+REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
+#undef REPLACE_COMPARE_IC_CALL
+
+
+#define REPLACE_RUNTIME_CALL(op, fun) \
+ Node* JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithRuntimeCall(node, fun); \
+ return node; \
+ }
+REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
+REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
+REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
+REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
+REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
+REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
+REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
+#undef REPLACE_RUNTIME
+
+
+#define REPLACE_UNIMPLEMENTED(op) \
+ Node* JSGenericLowering::Lower##op(Node* node) { \
+ UNIMPLEMENTED(); \
+ return node; \
+ }
+REPLACE_UNIMPLEMENTED(JSToString)
+REPLACE_UNIMPLEMENTED(JSToName)
+REPLACE_UNIMPLEMENTED(JSYield)
+REPLACE_UNIMPLEMENTED(JSDebugger)
+#undef REPLACE_UNIMPLEMENTED
+
+
+static CallDescriptor::DeoptimizationSupport DeoptimizationSupportForNode(
+ Node* node) {
+ return OperatorProperties::CanLazilyDeoptimize(node->op())
+ ? CallDescriptor::kCanDeoptimize
+ : CallDescriptor::kCannotDeoptimize;
+}
+
+
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
+ bool pure) {
+ BinaryOpICStub stub(isolate(), Token::ADD); // TODO(mstarzinger): Hack.
+ CodeStubInterfaceDescriptor* d = stub.GetInterfaceDescriptor();
+ CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(d);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), token);
+ Node* compare;
+ if (pure) {
+ // A pure (strict) comparison doesn't have an effect or control.
+ // But for the graph, we need to add these inputs.
+ compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
+ NodeProperties::GetValueInput(node, 0),
+ NodeProperties::GetValueInput(node, 1),
+ NodeProperties::GetContextInput(node),
+ graph()->start(), graph()->start());
+ } else {
+ compare = graph()->NewNode(common()->Call(desc_compare), CodeConstant(ic),
+ NodeProperties::GetValueInput(node, 0),
+ NodeProperties::GetValueInput(node, 1),
+ NodeProperties::GetContextInput(node),
+ NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
+ }
+ node->ReplaceInput(0, compare);
+ node->ReplaceInput(1, SmiConstant(token));
+ ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+}
+
+
+void JSGenericLowering::ReplaceWithICStubCall(Node* node,
+ HydrogenCodeStub* stub) {
+ CodeStubInterfaceDescriptor* d = stub->GetInterfaceDescriptor();
+ CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+ d, 0, DeoptimizationSupportForNode(node));
+ Node* stub_code = CodeConstant(stub->GetCode());
+ PatchInsertInput(node, 0, stub_code);
+ PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
+ Builtins::JavaScript id,
+ int nargs) {
+ CallFunctionStub stub(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
+ CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+ CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, nargs);
+ // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
+ // of code across native contexts. Fix this by loading from given context.
+ Handle<JSFunction> function(
+ JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
+ Node* stub_code = CodeConstant(stub.GetCode());
+ Node* function_node = FunctionConstant(function);
+ PatchInsertInput(node, 0, stub_code);
+ PatchInsertInput(node, 1, function_node);
+ PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
+ Runtime::FunctionId f,
+ int nargs_override) {
+ Operator::Property props = node->op()->properties();
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
+ CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
+ f, nargs, props, DeoptimizationSupportForNode(node));
+ Node* ref = ExternalConstant(ExternalReference(f, isolate()));
+ Node* arity = Int32Constant(nargs);
+ if (!centrystub_constant_.is_set()) {
+ centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode()));
+ }
+ PatchInsertInput(node, 0, centrystub_constant_.get());
+ PatchInsertInput(node, nargs + 1, ref);
+ PatchInsertInput(node, nargs + 2, arity);
+ PatchOperator(node, common()->Call(desc));
+}
+
+
+Node* JSGenericLowering::LowerBranch(Node* node) {
+ Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+ jsgraph()->TrueConstant());
+ node->ReplaceInput(0, test);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSUnaryNot(Node* node) {
+ ToBooleanStub stub(isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
+ ReplaceWithICStubCall(node, &stub);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSToBoolean(Node* node) {
+ ToBooleanStub stub(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+ ReplaceWithICStubCall(node, &stub);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSToObject(Node* node) {
+ ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSLoadProperty(Node* node) {
+ KeyedLoadICStubShim stub(isolate());
+ ReplaceWithICStubCall(node, &stub);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSLoadNamed(Node* node) {
+ LoadNamedParameters p = OpParameter<LoadNamedParameters>(node);
+ LoadICStubShim stub(isolate(), p.contextual_mode);
+ PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name));
+ ReplaceWithICStubCall(node, &stub);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSStoreProperty(Node* node) {
+ // TODO(mstarzinger): The strict_mode needs to be carried along in the
+ // operator so that graphs are fully compositional for inlining.
+ StrictMode strict_mode = info()->strict_mode();
+ KeyedStoreICStubShim stub(isolate(), strict_mode);
+ ReplaceWithICStubCall(node, &stub);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSStoreNamed(Node* node) {
+ PrintableUnique<Name> key = OpParameter<PrintableUnique<Name> >(node);
+ // TODO(mstarzinger): The strict_mode needs to be carried along in the
+ // operator so that graphs are fully compositional for inlining.
+ StrictMode strict_mode = info()->strict_mode();
+ StoreICStubShim stub(isolate(), strict_mode);
+ PatchInsertInput(node, 1, jsgraph()->HeapConstant(key));
+ ReplaceWithICStubCall(node, &stub);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSDeleteProperty(Node* node) {
+ StrictMode strict_mode = OpParameter<StrictMode>(node);
+ PatchInsertInput(node, 2, SmiConstant(strict_mode));
+ ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSHasProperty(Node* node) {
+ ReplaceWithBuiltinCall(node, Builtins::IN, 2);
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSInstanceOf(Node* node) {
+ InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+ InstanceofStub::kReturnTrueFalseObject |
+ InstanceofStub::kArgsInRegisters);
+ InstanceofStub stub(isolate(), flags);
+ CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+ CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
+ Node* stub_code = CodeConstant(stub.GetCode());
+ PatchInsertInput(node, 0, stub_code);
+ PatchOperator(node, common()->Call(desc));
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSLoadContext(Node* node) {
+ ContextAccess access = OpParameter<ContextAccess>(node);
+ // TODO(mstarzinger): Use simplified operators instead of machine operators
+ // here so that load/store optimization can be applied afterwards.
+ for (int i = 0; i < access.depth(); ++i) {
+ node->ReplaceInput(
+ 0, graph()->NewNode(
+ machine()->Load(kMachineTagged),
+ NodeProperties::GetValueInput(node, 0),
+ Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetEffectInput(node)));
+ }
+ node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+ PatchOperator(node, machine()->Load(kMachineTagged));
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSStoreContext(Node* node) {
+ ContextAccess access = OpParameter<ContextAccess>(node);
+ // TODO(mstarzinger): Use simplified operators instead of machine operators
+ // here so that load/store optimization can be applied afterwards.
+ for (int i = 0; i < access.depth(); ++i) {
+ node->ReplaceInput(
+ 0, graph()->NewNode(
+ machine()->Load(kMachineTagged),
+ NodeProperties::GetValueInput(node, 0),
+ Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetEffectInput(node)));
+ }
+ node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
+ node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+ PatchOperator(node, machine()->Store(kMachineTagged, kFullWriteBarrier));
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSCallConstruct(Node* node) {
+ int arity = OpParameter<int>(node);
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+ CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+ d, arity, DeoptimizationSupportForNode(node));
+ Node* stub_code = CodeConstant(stub.GetCode());
+ Node* construct = NodeProperties::GetValueInput(node, 0);
+ PatchInsertInput(node, 0, stub_code);
+ PatchInsertInput(node, 1, Int32Constant(arity - 1));
+ PatchInsertInput(node, 2, construct);
+ PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
+ PatchOperator(node, common()->Call(desc));
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSCallFunction(Node* node) {
+ CallParameters p = OpParameter<CallParameters>(node);
+ CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
+ CodeStubInterfaceDescriptor* d = GetInterfaceDescriptor(isolate(), &stub);
+ CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+ d, p.arity - 1, DeoptimizationSupportForNode(node));
+ Node* stub_code = CodeConstant(stub.GetCode());
+ PatchInsertInput(node, 0, stub_code);
+ PatchOperator(node, common()->Call(desc));
+ return node;
+}
+
+
+Node* JSGenericLowering::LowerJSCallRuntime(Node* node) {
+ Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
+ int arity = OperatorProperties::GetValueInputCount(node->op());
+ ReplaceWithRuntimeCall(node, function, arity);
+ return node;
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
new file mode 100644
index 000000000..e3113e541
--- /dev/null
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -0,0 +1,83 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
+#define V8_COMPILER_JS_GENERIC_LOWERING_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/opcodes.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HydrogenCodeStub;
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class MachineOperatorBuilder;
+class Linkage;
+
+// Lowers JS-level operators to runtime and IC calls in the "generic" case.
+class JSGenericLowering : public LoweringBuilder {
+ public:
+ JSGenericLowering(CompilationInfo* info, JSGraph* graph,
+ MachineOperatorBuilder* machine,
+ SourcePositionTable* source_positions);
+ virtual ~JSGenericLowering() {}
+
+ virtual void Lower(Node* node);
+
+ protected:
+// Dispatched depending on opcode.
+#define DECLARE_LOWER(x) Node* Lower##x(Node* node);
+ ALL_OP_LIST(DECLARE_LOWER)
+#undef DECLARE_LOWER
+
+ // Helpers to create new constant nodes.
+ Node* SmiConstant(int immediate);
+ Node* Int32Constant(int immediate);
+ Node* CodeConstant(Handle<Code> code);
+ Node* FunctionConstant(Handle<JSFunction> function);
+ Node* ExternalConstant(ExternalReference ref);
+
+ // Helpers to patch existing nodes in the graph.
+ void PatchOperator(Node* node, Operator* new_op);
+ void PatchInsertInput(Node* node, int index, Node* input);
+
+ // Helpers to replace existing nodes with a generic call.
+ void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
+ void ReplaceWithICStubCall(Node* node, HydrogenCodeStub* stub);
+ void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
+ void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
+
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Graph* graph() const { return jsgraph()->graph(); }
+ Linkage* linkage() const { return linkage_; }
+ CompilationInfo* info() const { return info_; }
+ CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+ MachineOperatorBuilder* machine() const { return machine_; }
+
+ private:
+ CompilationInfo* info_;
+ JSGraph* jsgraph_;
+ Linkage* linkage_;
+ MachineOperatorBuilder* machine_;
+ SetOncePointer<Node> centrystub_constant_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_JS_GENERIC_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
new file mode 100644
index 000000000..2cebbc784
--- /dev/null
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
+ PrintableUnique<Object> unique =
+ PrintableUnique<Object>::CreateImmovable(zone(), object);
+ return NewNode(common()->HeapConstant(unique));
+}
+
+
+Node* JSGraph::NewNode(Operator* op) {
+ Node* node = graph()->NewNode(op);
+ typer_->Init(node);
+ return node;
+}
+
+
+Node* JSGraph::UndefinedConstant() {
+ if (!undefined_constant_.is_set()) {
+ undefined_constant_.set(
+ ImmovableHeapConstant(factory()->undefined_value()));
+ }
+ return undefined_constant_.get();
+}
+
+
+Node* JSGraph::TheHoleConstant() {
+ if (!the_hole_constant_.is_set()) {
+ the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value()));
+ }
+ return the_hole_constant_.get();
+}
+
+
+Node* JSGraph::TrueConstant() {
+ if (!true_constant_.is_set()) {
+ true_constant_.set(ImmovableHeapConstant(factory()->true_value()));
+ }
+ return true_constant_.get();
+}
+
+
+Node* JSGraph::FalseConstant() {
+ if (!false_constant_.is_set()) {
+ false_constant_.set(ImmovableHeapConstant(factory()->false_value()));
+ }
+ return false_constant_.get();
+}
+
+
+Node* JSGraph::NullConstant() {
+ if (!null_constant_.is_set()) {
+ null_constant_.set(ImmovableHeapConstant(factory()->null_value()));
+ }
+ return null_constant_.get();
+}
+
+
+Node* JSGraph::ZeroConstant() {
+ if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0));
+ return zero_constant_.get();
+}
+
+
+Node* JSGraph::OneConstant() {
+ if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0));
+ return one_constant_.get();
+}
+
+
+Node* JSGraph::NaNConstant() {
+ if (!nan_constant_.is_set()) {
+ nan_constant_.set(NumberConstant(base::OS::nan_value()));
+ }
+ return nan_constant_.get();
+}
+
+
+Node* JSGraph::HeapConstant(PrintableUnique<Object> value) {
+ // TODO(turbofan): canonicalize heap constants using Unique<T>
+ return NewNode(common()->HeapConstant(value));
+}
+
+
+Node* JSGraph::HeapConstant(Handle<Object> value) {
+ // TODO(titzer): We could also match against the addresses of immortable
+ // immovables here, even without access to the heap, thus always
+ // canonicalizing references to them.
+ return HeapConstant(
+ PrintableUnique<Object>::CreateUninitialized(zone(), value));
+}
+
+
+Node* JSGraph::Constant(Handle<Object> value) {
+ // Dereference the handle to determine if a number constant or other
+ // canonicalized node can be used.
+ if (value->IsNumber()) {
+ return Constant(value->Number());
+ } else if (value->IsUndefined()) {
+ return UndefinedConstant();
+ } else if (value->IsTrue()) {
+ return TrueConstant();
+ } else if (value->IsFalse()) {
+ return FalseConstant();
+ } else if (value->IsNull()) {
+ return NullConstant();
+ } else if (value->IsTheHole()) {
+ return TheHoleConstant();
+ } else {
+ return HeapConstant(value);
+ }
+}
+
+
+Node* JSGraph::Constant(double value) {
+ if (BitCast<int64_t>(value) == BitCast<int64_t>(0.0)) return ZeroConstant();
+ if (BitCast<int64_t>(value) == BitCast<int64_t>(1.0)) return OneConstant();
+ return NumberConstant(value);
+}
+
+
+Node* JSGraph::Constant(int32_t value) {
+ if (value == 0) return ZeroConstant();
+ if (value == 1) return OneConstant();
+ return NumberConstant(value);
+}
+
+
+Node* JSGraph::Int32Constant(int32_t value) {
+ Node** loc = cache_.FindInt32Constant(value);
+ if (*loc == NULL) {
+ *loc = NewNode(common()->Int32Constant(value));
+ }
+ return *loc;
+}
+
+
+Node* JSGraph::NumberConstant(double value) {
+ Node** loc = cache_.FindNumberConstant(value);
+ if (*loc == NULL) {
+ *loc = NewNode(common()->NumberConstant(value));
+ }
+ return *loc;
+}
+
+
+Node* JSGraph::Float64Constant(double value) {
+ Node** loc = cache_.FindFloat64Constant(value);
+ if (*loc == NULL) {
+ *loc = NewNode(common()->Float64Constant(value));
+ }
+ return *loc;
+}
+
+
+Node* JSGraph::ExternalConstant(ExternalReference reference) {
+ Node** loc = cache_.FindExternalConstant(reference);
+ if (*loc == NULL) {
+ *loc = NewNode(common()->ExternalConstant(reference));
+ }
+ return *loc;
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
new file mode 100644
index 000000000..59a6b845e
--- /dev/null
+++ b/deps/v8/src/compiler/js-graph.h
@@ -0,0 +1,107 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GRAPH_H_
+#define V8_COMPILER_JS_GRAPH_H_
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer;
+
+// Implements a facade on a Graph, enhancing the graph with JS-specific
+// notions, including a builder for for JS* operators, canonicalized global
+// constants, and various helper methods.
+class JSGraph : public ZoneObject {
+ public:
+ JSGraph(Graph* graph, CommonOperatorBuilder* common, Typer* typer)
+ : graph_(graph),
+ common_(common),
+ javascript_(zone()),
+ typer_(typer),
+ cache_(zone()) {}
+
+ // Canonicalized global constants.
+ Node* UndefinedConstant();
+ Node* TheHoleConstant();
+ Node* TrueConstant();
+ Node* FalseConstant();
+ Node* NullConstant();
+ Node* ZeroConstant();
+ Node* OneConstant();
+ Node* NaNConstant();
+
+ // Creates a HeapConstant node, possibly canonicalized, without inspecting the
+ // object.
+ Node* HeapConstant(PrintableUnique<Object> value);
+
+ // Creates a HeapConstant node, possibly canonicalized, and may access the
+ // heap to inspect the object.
+ Node* HeapConstant(Handle<Object> value);
+
+ // Creates a Constant node of the appropriate type for the given object.
+ // Accesses the heap to inspect the object and determine whether one of the
+ // canonicalized globals or a number constant should be returned.
+ Node* Constant(Handle<Object> value);
+
+ // Creates a NumberConstant node, usually canonicalized.
+ Node* Constant(double value);
+
+ // Creates a NumberConstant node, usually canonicalized.
+ Node* Constant(int32_t value);
+
+ // Creates a Int32Constant node, usually canonicalized.
+ Node* Int32Constant(int32_t value);
+
+ // Creates a Float64Constant node, usually canonicalized.
+ Node* Float64Constant(double value);
+
+ // Creates an ExternalConstant node, usually canonicalized.
+ Node* ExternalConstant(ExternalReference ref);
+
+ Node* SmiConstant(int32_t immediate) {
+ DCHECK(Smi::IsValid(immediate));
+ return Constant(immediate);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+ CommonOperatorBuilder* common() { return common_; }
+ Graph* graph() { return graph_; }
+ Zone* zone() { return graph()->zone(); }
+
+ private:
+ Graph* graph_;
+ CommonOperatorBuilder* common_;
+ JSOperatorBuilder javascript_;
+ Typer* typer_;
+
+ SetOncePointer<Node> undefined_constant_;
+ SetOncePointer<Node> the_hole_constant_;
+ SetOncePointer<Node> true_constant_;
+ SetOncePointer<Node> false_constant_;
+ SetOncePointer<Node> null_constant_;
+ SetOncePointer<Node> zero_constant_;
+ SetOncePointer<Node> one_constant_;
+ SetOncePointer<Node> nan_constant_;
+
+ CommonNodeCache cache_;
+
+ Node* ImmovableHeapConstant(Handle<Object> value);
+ Node* NumberConstant(double value);
+ Node* NewNode(Operator* op);
+
+ Factory* factory() { return zone()->isolate()->factory(); }
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
new file mode 100644
index 000000000..fd9547d94
--- /dev/null
+++ b/deps/v8/src/compiler/js-operator.h
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_OPERATOR_H_
+#define V8_COMPILER_JS_OPERATOR_H_
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Defines the location of a context slot relative to a specific scope. This is
+// used as a parameter by JSLoadContext and JSStoreContext operators and allows
+// accessing a context-allocated variable without keeping track of the scope.
+class ContextAccess {
+ public:
+ ContextAccess(int depth, int index, bool immutable)
+ : immutable_(immutable), depth_(depth), index_(index) {
+ DCHECK(0 <= depth && depth <= kMaxUInt16);
+ DCHECK(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32);
+ }
+ int depth() const { return depth_; }
+ int index() const { return index_; }
+ bool immutable() const { return immutable_; }
+
+ private:
+ // For space reasons, we keep this tightly packed, otherwise we could just use
+ // a simple int/int/bool POD.
+ const bool immutable_;
+ const uint16_t depth_;
+ const uint32_t index_;
+};
+
+// Defines the property being loaded from an object by a named load. This is
+// used as a parameter by JSLoadNamed operators.
+struct LoadNamedParameters {
+ PrintableUnique<Name> name;
+ ContextualMode contextual_mode;
+};
+
+// Defines the arity and the call flags for a JavaScript function call. This is
+// used as a parameter by JSCall operators.
+struct CallParameters {
+ int arity;
+ CallFunctionFlags flags;
+};
+
+// Interface for building JavaScript-level operators, e.g. directly from the
+// AST. Most operators have no parameters, thus can be globally shared for all
+// graphs.
+class JSOperatorBuilder {
+ public:
+ explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define SIMPLE(name, properties, inputs, outputs) \
+ return new (zone_) \
+ SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define NOPROPS(name, inputs, outputs) \
+ SIMPLE(name, Operator::kNoProperties, inputs, outputs)
+
+#define OP1(name, ptype, pname, properties, inputs, outputs) \
+ return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \
+ outputs, #name, pname)
+
+#define BINOP(name) NOPROPS(name, 2, 1)
+#define UNOP(name) NOPROPS(name, 1, 1)
+
+#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+
+ Operator* Equal() { BINOP(JSEqual); }
+ Operator* NotEqual() { BINOP(JSNotEqual); }
+ Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
+ Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
+ Operator* LessThan() { BINOP(JSLessThan); }
+ Operator* GreaterThan() { BINOP(JSGreaterThan); }
+ Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
+ Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
+ Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
+ Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
+ Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
+ Operator* ShiftLeft() { BINOP(JSShiftLeft); }
+ Operator* ShiftRight() { BINOP(JSShiftRight); }
+ Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
+ Operator* Add() { BINOP(JSAdd); }
+ Operator* Subtract() { BINOP(JSSubtract); }
+ Operator* Multiply() { BINOP(JSMultiply); }
+ Operator* Divide() { BINOP(JSDivide); }
+ Operator* Modulus() { BINOP(JSModulus); }
+
+ Operator* UnaryNot() { UNOP(JSUnaryNot); }
+ Operator* ToBoolean() { UNOP(JSToBoolean); }
+ Operator* ToNumber() { UNOP(JSToNumber); }
+ Operator* ToString() { UNOP(JSToString); }
+ Operator* ToName() { UNOP(JSToName); }
+ Operator* ToObject() { UNOP(JSToObject); }
+ Operator* Yield() { UNOP(JSYield); }
+
+ Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
+
+ Operator* Call(int arguments, CallFunctionFlags flags) {
+ CallParameters parameters = {arguments, flags};
+ OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
+ arguments, 1);
+ }
+
+ Operator* CallNew(int arguments) {
+ return new (zone_)
+ Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
+ arguments, 1, "JSCallConstruct", arguments);
+ }
+
+ Operator* LoadProperty() { BINOP(JSLoadProperty); }
+ Operator* LoadNamed(PrintableUnique<Name> name,
+ ContextualMode contextual_mode = NOT_CONTEXTUAL) {
+ LoadNamedParameters parameters = {name, contextual_mode};
+ OP1(JSLoadNamed, LoadNamedParameters, parameters, Operator::kNoProperties,
+ 1, 1);
+ }
+
+ Operator* StoreProperty() { NOPROPS(JSStoreProperty, 3, 0); }
+ Operator* StoreNamed(PrintableUnique<Name> name) {
+ OP1(JSStoreNamed, PrintableUnique<Name>, name, Operator::kNoProperties, 2,
+ 0);
+ }
+
+ Operator* DeleteProperty(StrictMode strict_mode) {
+ OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
+ 1);
+ }
+
+ Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
+
+ Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
+ ContextAccess access(depth, index, immutable);
+ OP1(JSLoadContext, ContextAccess, access,
+ Operator::kEliminatable | Operator::kNoWrite, 1, 1);
+ }
+ Operator* StoreContext(uint16_t depth, uint32_t index) {
+ ContextAccess access(depth, index, false);
+ OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 1);
+ }
+
+ Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
+ Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
+ Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
+
+ // TODO(titzer): nail down the static parts of each of these context flavors.
+ Operator* CreateFunctionContext() { NOPROPS(JSCreateFunctionContext, 1, 1); }
+ Operator* CreateCatchContext(PrintableUnique<String> name) {
+ OP1(JSCreateCatchContext, PrintableUnique<String>, name,
+ Operator::kNoProperties, 1, 1);
+ }
+ Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
+ Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
+ Operator* CreateModuleContext() { NOPROPS(JSCreateModuleContext, 2, 1); }
+ Operator* CreateGlobalContext() { NOPROPS(JSCreateGlobalContext, 2, 1); }
+
+ Operator* Runtime(Runtime::FunctionId function, int arguments) {
+ const Runtime::Function* f = Runtime::FunctionForId(function);
+ DCHECK(f->nargs == -1 || f->nargs == arguments);
+ OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
+ arguments, f->result_size);
+ }
+
+#undef SIMPLE
+#undef NOPROPS
+#undef OP1
+#undef BINOP
+#undef UNOP
+
+ private:
+ Zone* zone_;
+};
+
+// Specialization for static parameters of type {ContextAccess}.
+template <>
+struct StaticParameterTraits<ContextAccess> {
+ static OStream& PrintTo(OStream& os, ContextAccess val) { // NOLINT
+ return os << val.depth() << "," << val.index()
+ << (val.immutable() ? ",imm" : "");
+ }
+ static int HashCode(ContextAccess val) {
+ return (val.depth() << 16) | (val.index() & 0xffff);
+ }
+ static bool Equals(ContextAccess a, ContextAccess b) {
+ return a.immutable() == b.immutable() && a.depth() == b.depth() &&
+ a.index() == b.index();
+ }
+};
+
+// Specialization for static parameters of type {Runtime::FunctionId}.
+template <>
+struct StaticParameterTraits<Runtime::FunctionId> {
+ static OStream& PrintTo(OStream& os, Runtime::FunctionId val) { // NOLINT
+ const Runtime::Function* f = Runtime::FunctionForId(val);
+ return os << (f->name ? f->name : "?Runtime?");
+ }
+ static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); }
+ static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) {
+ return a == b;
+ }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_JS_OPERATOR_H_
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
new file mode 100644
index 000000000..361cb94f0
--- /dev/null
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -0,0 +1,604 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+// - relax effects for ToNumber(mixed)
+
+// Replace value uses of {node} with {value} and effect uses of {node} with
+// {effect}. If {effect == NULL}, then use the effect input to {node}.
+// TODO(titzer): move into a GraphEditor?
+static void ReplaceUses(Node* node, Node* value, Node* effect) {
+ if (value == effect) {
+ // Effect and value updates are the same; no special iteration needed.
+ if (value != node) node->ReplaceUses(value);
+ return;
+ }
+
+ if (effect == NULL) effect = NodeProperties::GetEffectInput(node);
+
+ // The iteration requires distinguishing between value and effect edges.
+ UseIter iter = node->uses().begin();
+ while (iter != node->uses().end()) {
+ if (NodeProperties::IsEffectEdge(iter.edge())) {
+ iter = iter.UpdateToAndIncrement(effect);
+ } else {
+ iter = iter.UpdateToAndIncrement(value);
+ }
+ }
+}
+
+
+// Relax the effects of {node} by immediately replacing effect uses of {node}
+// with the effect input to {node}.
+// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
+// TODO(titzer): move into a GraphEditor?
+static void RelaxEffects(Node* node) { ReplaceUses(node, node, NULL); }
+
+
+Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
+ ReplaceUses(old, node, node);
+ return Reducer::Changed(node);
+}
+
+
+// A helper class to simplify the process of reducing a single binop node with a
+// JSOperator. This class manages the rewriting of context, control, and effect
+// dependencies during lowering of a binop and contains numerous helper
+// functions for matching the types of inputs to an operation.
+class JSBinopReduction {
+ public:
+ JSBinopReduction(JSTypedLowering* lowering, Node* node)
+ : lowering_(lowering),
+ node_(node),
+ left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
+ right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
+
+ void ConvertInputsToNumber() {
+ node_->ReplaceInput(0, ConvertToNumber(left()));
+ node_->ReplaceInput(1, ConvertToNumber(right()));
+ }
+
+ void ConvertInputsToInt32(bool left_signed, bool right_signed) {
+ node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+ node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
+ }
+
+ void ConvertInputsToString() {
+ node_->ReplaceInput(0, ConvertToString(left()));
+ node_->ReplaceInput(1, ConvertToString(right()));
+ }
+
+ // Convert inputs for bitwise shift operation (ES5 spec 11.7).
+ void ConvertInputsForShift(bool left_signed) {
+ node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+ Node* rnum = ConvertToI32(false, right());
+ node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
+ jsgraph()->Int32Constant(0x1F)));
+ }
+
+ void SwapInputs() {
+ Node* l = left();
+ Node* r = right();
+ node_->ReplaceInput(0, r);
+ node_->ReplaceInput(1, l);
+ std::swap(left_type_, right_type_);
+ }
+
+ // Remove all effect and control inputs and outputs to this node and change
+ // to the pure operator {op}, possibly inserting a boolean inversion.
+ Reduction ChangeToPureOperator(Operator* op, bool invert = false) {
+ DCHECK_EQ(0, OperatorProperties::GetEffectInputCount(op));
+ DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+ DCHECK_EQ(0, OperatorProperties::GetControlInputCount(op));
+ DCHECK_EQ(2, OperatorProperties::GetValueInputCount(op));
+
+ // Remove the effects from the node, if any, and update its effect usages.
+ if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) {
+ RelaxEffects(node_);
+ }
+ // Remove the inputs corresponding to context, effect, and control.
+ NodeProperties::RemoveNonValueInputs(node_);
+ // Finally, update the operator to the new one.
+ node_->set_op(op);
+
+ if (invert) {
+ // Insert an boolean not to invert the value.
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+ node_->ReplaceUses(value);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node_);
+ return lowering_->ReplaceWith(value);
+ }
+ return lowering_->Changed(node_);
+ }
+
+ bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
+
+ bool BothInputsAre(Type* t) {
+ return left_type_->Is(t) && right_type_->Is(t);
+ }
+
+ bool OneInputCannotBe(Type* t) {
+ return !left_type_->Maybe(t) || !right_type_->Maybe(t);
+ }
+
+ bool NeitherInputCanBe(Type* t) {
+ return !left_type_->Maybe(t) && !right_type_->Maybe(t);
+ }
+
+ Node* effect() { return NodeProperties::GetEffectInput(node_); }
+ Node* control() { return NodeProperties::GetControlInput(node_); }
+ Node* context() { return NodeProperties::GetContextInput(node_); }
+ Node* left() { return NodeProperties::GetValueInput(node_, 0); }
+ Node* right() { return NodeProperties::GetValueInput(node_, 1); }
+ Type* left_type() { return left_type_; }
+ Type* right_type() { return right_type_; }
+
+ SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
+ Graph* graph() { return lowering_->graph(); }
+ JSGraph* jsgraph() { return lowering_->jsgraph(); }
+ JSOperatorBuilder* javascript() { return lowering_->javascript(); }
+ MachineOperatorBuilder* machine() { return lowering_->machine(); }
+
+ private:
+ JSTypedLowering* lowering_; // The containing lowering instance.
+ Node* node_; // The original node.
+ Type* left_type_; // Cache of the left input's type.
+ Type* right_type_; // Cache of the right input's type.
+
+ Node* ConvertToString(Node* node) {
+ // Avoid introducing too many eager ToString() operations.
+ Reduction reduced = lowering_->ReduceJSToStringInput(node);
+ if (reduced.Changed()) return reduced.replacement();
+ Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
+ effect(), control());
+ update_effect(n);
+ return n;
+ }
+
+ Node* ConvertToNumber(Node* node) {
+ // Avoid introducing too many eager ToNumber() operations.
+ Reduction reduced = lowering_->ReduceJSToNumberInput(node);
+ if (reduced.Changed()) return reduced.replacement();
+ Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
+ effect(), control());
+ update_effect(n);
+ return n;
+ }
+
+ // Try to narrowing a double or number operation to an Int32 operation.
+ bool TryNarrowingToI32(Type* type, Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Add:
+ case IrOpcode::kNumberAdd: {
+ JSBinopReduction r(lowering_, node);
+ if (r.BothInputsAre(Type::Integral32())) {
+ node->set_op(lowering_->machine()->Int32Add());
+ // TODO(titzer): narrow bounds instead of overwriting.
+ NodeProperties::SetBounds(node, Bounds(type));
+ return true;
+ }
+ }
+ case IrOpcode::kFloat64Sub:
+ case IrOpcode::kNumberSubtract: {
+ JSBinopReduction r(lowering_, node);
+ if (r.BothInputsAre(Type::Integral32())) {
+ node->set_op(lowering_->machine()->Int32Sub());
+ // TODO(titzer): narrow bounds instead of overwriting.
+ NodeProperties::SetBounds(node, Bounds(type));
+ return true;
+ }
+ }
+ default:
+ return false;
+ }
+ }
+
+ Node* ConvertToI32(bool is_signed, Node* node) {
+ Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
+ if (node->OwnedBy(node_)) {
+ // If this node {node_} has the only edge to {node}, then try narrowing
+ // its operation to an Int32 add or subtract.
+ if (TryNarrowingToI32(type, node)) return node;
+ } else {
+ // Otherwise, {node} has multiple uses. Leave it as is and let the
+ // further lowering passes deal with it, which use a full backwards
+ // fixpoint.
+ }
+
+ // Avoid introducing too many eager NumberToXXnt32() operations.
+ node = ConvertToNumber(node);
+ Type* input_type = NodeProperties::GetBounds(node).upper;
+
+ if (input_type->Is(type)) return node; // already in the value range.
+
+ Operator* op = is_signed ? simplified()->NumberToInt32()
+ : simplified()->NumberToUint32();
+ Node* n = graph()->NewNode(op, node);
+ return n;
+ }
+
+ void update_effect(Node* effect) {
+ NodeProperties::ReplaceEffectInput(node_, effect);
+ }
+};
+
+
+Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+ JSBinopReduction r(this, node);
+ if (r.OneInputIs(Type::String())) {
+ r.ConvertInputsToString();
+ return r.ChangeToPureOperator(simplified()->StringAdd());
+ } else if (r.NeitherInputCanBe(Type::String())) {
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(simplified()->NumberAdd());
+ }
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node, Operator* numberOp) {
+ JSBinopReduction r(this, node);
+ if (r.OneInputIs(Type::Primitive())) {
+ // If at least one input is a primitive, then insert appropriate conversions
+ // to number and reduce this operator to the given numeric one.
+ // TODO(turbofan): make this heuristic configurable for code size.
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(numberOp);
+ }
+ // TODO(turbofan): relax/remove the effects of this operator in other cases.
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
+ bool right_signed, Operator* intOp) {
+ JSBinopReduction r(this, node);
+ // TODO(titzer): some Smi bitwise operations don't really require going
+ // all the way to int32, which can save tagging/untagging for some operations
+ // on some platforms.
+ // TODO(turbofan): make this heuristic configurable for code size.
+ r.ConvertInputsToInt32(left_signed, right_signed);
+ return r.ChangeToPureOperator(intOp);
+}
+
+
+Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
+ Operator* shift_op) {
+ JSBinopReduction r(this, node);
+ r.ConvertInputsForShift(left_signed);
+ return r.ChangeToPureOperator(shift_op);
+}
+
+
+Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+ JSBinopReduction r(this, node);
+ if (r.BothInputsAre(Type::String())) {
+ // If both inputs are definitely strings, perform a string comparison.
+ Operator* stringOp;
+ switch (node->opcode()) {
+ case IrOpcode::kJSLessThan:
+ stringOp = simplified()->StringLessThan();
+ break;
+ case IrOpcode::kJSGreaterThan:
+ stringOp = simplified()->StringLessThan();
+ r.SwapInputs(); // a > b => b < a
+ break;
+ case IrOpcode::kJSLessThanOrEqual:
+ stringOp = simplified()->StringLessThanOrEqual();
+ break;
+ case IrOpcode::kJSGreaterThanOrEqual:
+ stringOp = simplified()->StringLessThanOrEqual();
+ r.SwapInputs(); // a >= b => b <= a
+ break;
+ default:
+ return NoChange();
+ }
+ return r.ChangeToPureOperator(stringOp);
+ } else if (r.OneInputCannotBe(Type::String())) {
+ // If one input cannot be a string, then emit a number comparison.
+ Operator* less_than;
+ Operator* less_than_or_equal;
+ if (r.BothInputsAre(Type::Unsigned32())) {
+ less_than = machine()->Uint32LessThan();
+ less_than_or_equal = machine()->Uint32LessThanOrEqual();
+ } else if (r.BothInputsAre(Type::Signed32())) {
+ less_than = machine()->Int32LessThan();
+ less_than_or_equal = machine()->Int32LessThanOrEqual();
+ } else {
+ // TODO(turbofan): mixed signed/unsigned int32 comparisons.
+ r.ConvertInputsToNumber();
+ less_than = simplified()->NumberLessThan();
+ less_than_or_equal = simplified()->NumberLessThanOrEqual();
+ }
+ Operator* comparison;
+ switch (node->opcode()) {
+ case IrOpcode::kJSLessThan:
+ comparison = less_than;
+ break;
+ case IrOpcode::kJSGreaterThan:
+ comparison = less_than;
+ r.SwapInputs(); // a > b => b < a
+ break;
+ case IrOpcode::kJSLessThanOrEqual:
+ comparison = less_than_or_equal;
+ break;
+ case IrOpcode::kJSGreaterThanOrEqual:
+ comparison = less_than_or_equal;
+ r.SwapInputs(); // a >= b => b <= a
+ break;
+ default:
+ return NoChange();
+ }
+ return r.ChangeToPureOperator(comparison);
+ }
+ // TODO(turbofan): relax/remove effects of this operator in other cases.
+ return NoChange(); // Keep a generic comparison.
+}
+
+
+Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+ JSBinopReduction r(this, node);
+
+ if (r.BothInputsAre(Type::Number())) {
+ return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ }
+ if (r.BothInputsAre(Type::String())) {
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ }
+ if (r.BothInputsAre(Type::Receiver())) {
+ return r.ChangeToPureOperator(
+ simplified()->ReferenceEqual(Type::Receiver()), invert);
+ }
+ // TODO(turbofan): js-typed-lowering of Equal(undefined)
+ // TODO(turbofan): js-typed-lowering of Equal(null)
+ // TODO(turbofan): js-typed-lowering of Equal(boolean)
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+ JSBinopReduction r(this, node);
+ if (r.left() == r.right()) {
+ // x === x is always true if x != NaN
+ if (!r.left_type()->Maybe(Type::NaN())) {
+ return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant());
+ }
+ }
+ if (!r.left_type()->Maybe(r.right_type())) {
+ // Type intersection is empty; === is always false unless both
+ // inputs could be strings (one internalized and one not).
+ if (r.OneInputCannotBe(Type::String())) {
+ return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant()
+ : jsgraph()->FalseConstant());
+ }
+ }
+ if (r.OneInputIs(Type::Undefined())) {
+ return r.ChangeToPureOperator(
+ simplified()->ReferenceEqual(Type::Undefined()), invert);
+ }
+ if (r.OneInputIs(Type::Null())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
+ invert);
+ }
+ if (r.OneInputIs(Type::Boolean())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+ invert);
+ }
+ if (r.OneInputIs(Type::Object())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
+ invert);
+ }
+ if (r.OneInputIs(Type::Receiver())) {
+ return r.ChangeToPureOperator(
+ simplified()->ReferenceEqual(Type::Receiver()), invert);
+ }
+ if (r.BothInputsAre(Type::String())) {
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ }
+ if (r.BothInputsAre(Type::Number())) {
+ return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ }
+ // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
+ if (input->opcode() == IrOpcode::kJSToNumber) {
+ // Recursively try to reduce the input first.
+ Reduction result = ReduceJSToNumberInput(input->InputAt(0));
+ if (result.Changed()) {
+ RelaxEffects(input);
+ return result;
+ }
+ return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
+ }
+ Type* input_type = NodeProperties::GetBounds(input).upper;
+ if (input_type->Is(Type::Number())) {
+ // JSToNumber(number) => x
+ return Changed(input);
+ }
+ if (input_type->Is(Type::Undefined())) {
+ // JSToNumber(undefined) => #NaN
+ return ReplaceWith(jsgraph()->NaNConstant());
+ }
+ if (input_type->Is(Type::Null())) {
+ // JSToNumber(null) => #0
+ return ReplaceWith(jsgraph()->ZeroConstant());
+ }
+ // TODO(turbofan): js-typed-lowering of ToNumber(boolean)
+ // TODO(turbofan): js-typed-lowering of ToNumber(string)
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
+ if (input->opcode() == IrOpcode::kJSToString) {
+ // Recursively try to reduce the input first.
+ Reduction result = ReduceJSToStringInput(input->InputAt(0));
+ if (result.Changed()) {
+ RelaxEffects(input);
+ return result;
+ }
+ return Changed(input); // JSToString(JSToString(x)) => JSToString(x)
+ }
+ Type* input_type = NodeProperties::GetBounds(input).upper;
+ if (input_type->Is(Type::String())) {
+ return Changed(input); // JSToString(string) => x
+ }
+ if (input_type->Is(Type::Undefined())) {
+ return ReplaceWith(jsgraph()->HeapConstant(
+ graph()->zone()->isolate()->factory()->undefined_string()));
+ }
+ if (input_type->Is(Type::Null())) {
+ return ReplaceWith(jsgraph()->HeapConstant(
+ graph()->zone()->isolate()->factory()->null_string()));
+ }
+ // TODO(turbofan): js-typed-lowering of ToString(boolean)
+ // TODO(turbofan): js-typed-lowering of ToString(number)
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
+ if (input->opcode() == IrOpcode::kJSToBoolean) {
+ // Recursively try to reduce the input first.
+ Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
+ if (result.Changed()) {
+ RelaxEffects(input);
+ return result;
+ }
+ return Changed(input); // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
+ }
+ Type* input_type = NodeProperties::GetBounds(input).upper;
+ if (input_type->Is(Type::Boolean())) {
+ return Changed(input); // JSToBoolean(boolean) => x
+ }
+ if (input_type->Is(Type::Undefined())) {
+ // JSToBoolean(undefined) => #false
+ return ReplaceWith(jsgraph()->FalseConstant());
+ }
+ if (input_type->Is(Type::Null())) {
+ // JSToBoolean(null) => #false
+ return ReplaceWith(jsgraph()->FalseConstant());
+ }
+ if (input_type->Is(Type::DetectableReceiver())) {
+ // JSToBoolean(detectable) => #true
+ return ReplaceWith(jsgraph()->TrueConstant());
+ }
+ if (input_type->Is(Type::Undetectable())) {
+ // JSToBoolean(undetectable) => #false
+ return ReplaceWith(jsgraph()->FalseConstant());
+ }
+ if (input_type->Is(Type::Number())) {
+ // JSToBoolean(number) => BooleanNot(NumberEqual(x, #0))
+ Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
+ jsgraph()->ZeroConstant());
+ Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+ ReplaceEagerly(input, inv);
+ // TODO(titzer): Ugly. ReplaceEagerly smashes all uses. Smash it back here.
+ cmp->ReplaceInput(0, input);
+ return Changed(inv);
+ }
+ // TODO(turbofan): js-typed-lowering of ToBoolean(string)
+ return NoChange();
+}
+
+
+static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
+ if (reduction.Changed()) {
+ ReplaceUses(node, reduction.replacement(), NULL);
+ return reduction;
+ }
+ return Reducer::NoChange();
+}
+
+
+Reduction JSTypedLowering::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSEqual:
+ return ReduceJSEqual(node, false);
+ case IrOpcode::kJSNotEqual:
+ return ReduceJSEqual(node, true);
+ case IrOpcode::kJSStrictEqual:
+ return ReduceJSStrictEqual(node, false);
+ case IrOpcode::kJSStrictNotEqual:
+ return ReduceJSStrictEqual(node, true);
+ case IrOpcode::kJSLessThan: // fall through
+ case IrOpcode::kJSGreaterThan: // fall through
+ case IrOpcode::kJSLessThanOrEqual: // fall through
+ case IrOpcode::kJSGreaterThanOrEqual:
+ return ReduceJSComparison(node);
+ case IrOpcode::kJSBitwiseOr:
+ return ReduceI32Binop(node, true, true, machine()->Word32Or());
+ case IrOpcode::kJSBitwiseXor:
+ return ReduceI32Binop(node, true, true, machine()->Word32Xor());
+ case IrOpcode::kJSBitwiseAnd:
+ return ReduceI32Binop(node, true, true, machine()->Word32And());
+ case IrOpcode::kJSShiftLeft:
+ return ReduceI32Shift(node, true, machine()->Word32Shl());
+ case IrOpcode::kJSShiftRight:
+ return ReduceI32Shift(node, true, machine()->Word32Sar());
+ case IrOpcode::kJSShiftRightLogical:
+ return ReduceI32Shift(node, false, machine()->Word32Shr());
+ case IrOpcode::kJSAdd:
+ return ReduceJSAdd(node);
+ case IrOpcode::kJSSubtract:
+ return ReduceNumberBinop(node, simplified()->NumberSubtract());
+ case IrOpcode::kJSMultiply:
+ return ReduceNumberBinop(node, simplified()->NumberMultiply());
+ case IrOpcode::kJSDivide:
+ return ReduceNumberBinop(node, simplified()->NumberDivide());
+ case IrOpcode::kJSModulus:
+ return ReduceNumberBinop(node, simplified()->NumberModulus());
+ case IrOpcode::kJSUnaryNot: {
+ Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
+ Node* value;
+ if (result.Changed()) {
+ // !x => BooleanNot(x)
+ value =
+ graph()->NewNode(simplified()->BooleanNot(), result.replacement());
+ ReplaceUses(node, value, NULL);
+ return Changed(value);
+ } else {
+ // !x => BooleanNot(JSToBoolean(x))
+ value = graph()->NewNode(simplified()->BooleanNot(), node);
+ node->set_op(javascript()->ToBoolean());
+ ReplaceUses(node, value, node);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node);
+ return ReplaceWith(value);
+ }
+ }
+ case IrOpcode::kJSToBoolean:
+ return ReplaceWithReduction(node,
+ ReduceJSToBooleanInput(node->InputAt(0)));
+ case IrOpcode::kJSToNumber:
+ return ReplaceWithReduction(node,
+ ReduceJSToNumberInput(node->InputAt(0)));
+ case IrOpcode::kJSToString:
+ return ReplaceWithReduction(node,
+ ReduceJSToStringInput(node->InputAt(0)));
+ default:
+ break;
+ }
+ return NoChange();
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
new file mode 100644
index 000000000..c69fc2736
--- /dev/null
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_REDUCERS_H_
+#define V8_COMPILER_OPERATOR_REDUCERS_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Lowers JS-level operators to simplified operators based on types.
+class JSTypedLowering : public LoweringBuilder {
+ public:
+ explicit JSTypedLowering(JSGraph* jsgraph,
+ SourcePositionTable* source_positions)
+ : LoweringBuilder(jsgraph->graph(), source_positions),
+ jsgraph_(jsgraph),
+ simplified_(jsgraph->zone()),
+ machine_(jsgraph->zone()) {}
+ virtual ~JSTypedLowering() {}
+
+ Reduction Reduce(Node* node);
+ virtual void Lower(Node* node) { Reduce(node); }
+
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+
+ private:
+ friend class JSBinopReduction;
+ JSGraph* jsgraph_;
+ SimplifiedOperatorBuilder simplified_;
+ MachineOperatorBuilder machine_;
+
+ Reduction ReplaceEagerly(Node* old, Node* node);
+ Reduction NoChange() { return Reducer::NoChange(); }
+ Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
+ Reduction Changed(Node* node) { return Reducer::Changed(node); }
+ Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSComparison(Node* node);
+ Reduction ReduceJSEqual(Node* node, bool invert);
+ Reduction ReduceJSStrictEqual(Node* node, bool invert);
+ Reduction ReduceJSToNumberInput(Node* input);
+ Reduction ReduceJSToStringInput(Node* input);
+ Reduction ReduceJSToBooleanInput(Node* input);
+ Reduction ReduceNumberBinop(Node* node, Operator* numberOp);
+ Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
+ Operator* intOp);
+ Reduction ReduceI32Shift(Node* node, bool left_signed, Operator* shift_op);
+
+ JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_OPERATOR_REDUCERS_H_
diff --git a/deps/v8/src/compiler/linkage-impl.h b/deps/v8/src/compiler/linkage-impl.h
new file mode 100644
index 000000000..e7aafc388
--- /dev/null
+++ b/deps/v8/src/compiler/linkage-impl.h
@@ -0,0 +1,206 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_IMPL_H_
+#define V8_COMPILER_LINKAGE_IMPL_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LinkageHelper {
+ public:
+ static LinkageLocation TaggedStackSlot(int index) {
+ DCHECK(index < 0);
+ return LinkageLocation(kMachineTagged, index);
+ }
+
+ static LinkageLocation TaggedRegisterLocation(Register reg) {
+ return LinkageLocation(kMachineTagged, Register::ToAllocationIndex(reg));
+ }
+
+ static inline LinkageLocation WordRegisterLocation(Register reg) {
+ return LinkageLocation(MachineOperatorBuilder::pointer_rep(),
+ Register::ToAllocationIndex(reg));
+ }
+
+ static LinkageLocation UnconstrainedRegister(MachineType rep) {
+ return LinkageLocation(rep, LinkageLocation::ANY_REGISTER);
+ }
+
+ static const RegList kNoCalleeSaved = 0;
+
+ // TODO(turbofan): cache call descriptors for JSFunction calls.
+ template <typename LinkageTraits>
+ static CallDescriptor* GetJSCallDescriptor(Zone* zone, int parameter_count) {
+ const int jsfunction_count = 1;
+ const int context_count = 1;
+ int input_count = jsfunction_count + parameter_count + context_count;
+
+ const int return_count = 1;
+ LinkageLocation* locations =
+ zone->NewArray<LinkageLocation>(return_count + input_count);
+
+ int index = 0;
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::JSCallFunctionReg());
+
+ for (int i = 0; i < parameter_count; i++) {
+ // All parameters to JS calls go on the stack.
+ int spill_slot_index = i - parameter_count;
+ locations[index++] = TaggedStackSlot(spill_slot_index);
+ }
+ locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+
+ // TODO(titzer): refactor TurboFan graph to consider context a value input.
+ return new (zone)
+ CallDescriptor(CallDescriptor::kCallJSFunction, // kind
+ return_count, // return_count
+ parameter_count, // parameter_count
+ input_count - context_count, // input_count
+ locations, // locations
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ CallDescriptor::kCanDeoptimize); // deoptimization
+ }
+
+
+ // TODO(turbofan): cache call descriptors for runtime calls.
+ template <typename LinkageTraits>
+ static CallDescriptor* GetRuntimeCallDescriptor(
+ Zone* zone, Runtime::FunctionId function_id, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize) {
+ const int code_count = 1;
+ const int function_count = 1;
+ const int num_args_count = 1;
+ const int context_count = 1;
+ const int input_count = code_count + parameter_count + function_count +
+ num_args_count + context_count;
+
+ const Runtime::Function* function = Runtime::FunctionForId(function_id);
+ const int return_count = function->result_size;
+ LinkageLocation* locations =
+ zone->NewArray<LinkageLocation>(return_count + input_count);
+
+ int index = 0;
+ if (return_count > 0) {
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+ }
+ if (return_count > 1) {
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::ReturnValue2Reg());
+ }
+
+ DCHECK_LE(return_count, 2);
+
+ locations[index++] = UnconstrainedRegister(kMachineTagged); // CEntryStub
+
+ for (int i = 0; i < parameter_count; i++) {
+ // All parameters to runtime calls go on the stack.
+ int spill_slot_index = i - parameter_count;
+ locations[index++] = TaggedStackSlot(spill_slot_index);
+ }
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::RuntimeCallFunctionReg());
+ locations[index++] =
+ WordRegisterLocation(LinkageTraits::RuntimeCallArgCountReg());
+ locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+
+ // TODO(titzer): refactor TurboFan graph to consider context a value input.
+ return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind
+ return_count, // return_count
+ parameter_count, // parameter_count
+ input_count, // input_count
+ locations, // locations
+ properties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ can_deoptimize, // deoptimization
+ function->name);
+ }
+
+
+ // TODO(turbofan): cache call descriptors for code stub calls.
+ template <typename LinkageTraits>
+ static CallDescriptor* GetStubCallDescriptor(
+ Zone* zone, CodeStubInterfaceDescriptor* descriptor,
+ int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize) {
+ int register_parameter_count = descriptor->GetEnvironmentParameterCount();
+ int parameter_count = register_parameter_count + stack_parameter_count;
+ const int code_count = 1;
+ const int context_count = 1;
+ int input_count = code_count + parameter_count + context_count;
+
+ const int return_count = 1;
+ LinkageLocation* locations =
+ zone->NewArray<LinkageLocation>(return_count + input_count);
+
+ int index = 0;
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+ locations[index++] = UnconstrainedRegister(kMachineTagged); // code
+ for (int i = 0; i < parameter_count; i++) {
+ if (i < register_parameter_count) {
+ // The first parameters to code stub calls go in registers.
+ Register reg = descriptor->GetEnvironmentParameterRegister(i);
+ locations[index++] = TaggedRegisterLocation(reg);
+ } else {
+ // The rest of the parameters go on the stack.
+ int stack_slot = i - register_parameter_count - stack_parameter_count;
+ locations[index++] = TaggedStackSlot(stack_slot);
+ }
+ }
+ locations[index++] = TaggedRegisterLocation(LinkageTraits::ContextReg());
+
+ // TODO(titzer): refactor TurboFan graph to consider context a value input.
+ return new (zone)
+ CallDescriptor(CallDescriptor::kCallCodeObject, // kind
+ return_count, // return_count
+ parameter_count, // parameter_count
+ input_count, // input_count
+ locations, // locations
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ can_deoptimize, // deoptimization
+ CodeStub::MajorName(descriptor->MajorKey(), false));
+ }
+
+
+ template <typename LinkageTraits>
+ static CallDescriptor* GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types) {
+ LinkageLocation* locations =
+ zone->NewArray<LinkageLocation>(num_params + 2);
+ int index = 0;
+ locations[index++] =
+ TaggedRegisterLocation(LinkageTraits::ReturnValueReg());
+ locations[index++] = LinkageHelper::UnconstrainedRegister(
+ MachineOperatorBuilder::pointer_rep());
+ // TODO(dcarney): test with lots of parameters.
+ int i = 0;
+ for (; i < LinkageTraits::CRegisterParametersLength() && i < num_params;
+ i++) {
+ locations[index++] = LinkageLocation(
+ param_types[i],
+ Register::ToAllocationIndex(LinkageTraits::CRegisterParameter(i)));
+ }
+ for (; i < num_params; i++) {
+ locations[index++] = LinkageLocation(param_types[i], -1 - i);
+ }
+ return new (zone) CallDescriptor(
+ CallDescriptor::kCallAddress, 1, num_params, num_params + 1, locations,
+ Operator::kNoProperties, LinkageTraits::CCalleeSaveRegisters(),
+ CallDescriptor::kCannotDeoptimize); // TODO(jarin) should deoptimize!
+ }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
new file mode 100644
index 000000000..26a3dccc4
--- /dev/null
+++ b/deps/v8/src/compiler/linkage.cc
@@ -0,0 +1,149 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/linkage.h"
+
+#include "src/code-stubs.h"
+#include "src/compiler.h"
+#include "src/compiler/node.h"
+#include "src/compiler/pipeline.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
+ switch (k) {
+ case CallDescriptor::kCallCodeObject:
+ os << "Code";
+ break;
+ case CallDescriptor::kCallJSFunction:
+ os << "JS";
+ break;
+ case CallDescriptor::kCallAddress:
+ os << "Addr";
+ break;
+ }
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const CallDescriptor& d) {
+ // TODO(svenpanne) Output properties etc. and be less cryptic.
+ return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
+ << "p" << d.ParameterCount() << "i" << d.InputCount()
+ << (d.CanLazilyDeoptimize() ? "deopt" : "");
+}
+
+
+Linkage::Linkage(CompilationInfo* info) : info_(info) {
+ if (info->function() != NULL) {
+ // If we already have the function literal, use the number of parameters
+ // plus the receiver.
+ incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
+ } else if (!info->closure().is_null()) {
+ // If we are compiling a JS function, use a JS call descriptor,
+ // plus the receiver.
+ SharedFunctionInfo* shared = info->closure()->shared();
+ incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
+ } else if (info->code_stub() != NULL) {
+ // Use the code stub interface descriptor.
+ HydrogenCodeStub* stub = info->code_stub();
+ CodeStubInterfaceDescriptor* descriptor =
+ info_->isolate()->code_stub_interface_descriptor(stub->MajorKey());
+ incoming_ = GetStubCallDescriptor(descriptor);
+ } else {
+ incoming_ = NULL; // TODO(titzer): ?
+ }
+}
+
+
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
+ if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
+ incoming_->kind() == CallDescriptor::kCallAddress) {
+ int offset;
+ int register_save_area_size = frame->GetRegisterSaveAreaSize();
+ if (spill_slot >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, and
+ // context in the fixed part of the frame.
+ offset =
+ -(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
+ } else {
+ // Incoming parameter. Skip the return address.
+ offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
+ kPCOnStackSize + extra;
+ }
+ return FrameOffset::FromFramePointer(offset);
+ } else {
+ // No frame. Retrieve all parameters relative to stack pointer.
+ DCHECK(spill_slot < 0); // Must be a parameter.
+ int register_save_area_size = frame->GetRegisterSaveAreaSize();
+ int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
+ kPCOnStackSize + extra;
+ return FrameOffset::FromStackPointer(offset);
+ }
+}
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
+ return GetJSCallDescriptor(parameter_count, this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize) {
+ return GetRuntimeCallDescriptor(function, parameter_count, properties,
+ can_deoptimize, this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize) {
+ return GetStubCallDescriptor(descriptor, stack_parameter_count,
+ can_deoptimize, this->info_->zone());
+}
+
+
+//==============================================================================
+// Provide unimplemented methods on unsupported architectures, to at least link.
+//==============================================================================
+#if !V8_TURBOFAN_BACKEND
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+#endif // !V8_TURBOFAN_BACKEND
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
new file mode 100644
index 000000000..9fe02183e
--- /dev/null
+++ b/deps/v8/src/compiler/linkage.h
@@ -0,0 +1,193 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_H_
+#define V8_COMPILER_LINKAGE_H_
+
+#include "src/v8.h"
+
+#include "src/code-stubs.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Describes the location for a parameter or a return value to a call.
+// TODO(titzer): replace with Radium locations when they are ready.
+class LinkageLocation {
+ public:
+ LinkageLocation(MachineType rep, int location)
+ : rep_(rep), location_(location) {}
+
+ inline MachineType representation() const { return rep_; }
+
+ static const int16_t ANY_REGISTER = 32767;
+
+ private:
+ friend class CallDescriptor;
+ friend class OperandGenerator;
+ MachineType rep_;
+ int16_t location_; // >= 0 implies register, otherwise stack slot.
+};
+
+
+class CallDescriptor : public ZoneObject {
+ public:
+ // Describes whether the first parameter is a code object, a JSFunction,
+ // or an address--all of which require different machine sequences to call.
+ enum Kind { kCallCodeObject, kCallJSFunction, kCallAddress };
+
+ enum DeoptimizationSupport { kCanDeoptimize, kCannotDeoptimize };
+
+ CallDescriptor(Kind kind, int8_t return_count, int16_t parameter_count,
+ int16_t input_count, LinkageLocation* locations,
+ Operator::Property properties, RegList callee_saved_registers,
+ DeoptimizationSupport deoptimization_support,
+ const char* debug_name = "")
+ : kind_(kind),
+ return_count_(return_count),
+ parameter_count_(parameter_count),
+ input_count_(input_count),
+ locations_(locations),
+ properties_(properties),
+ callee_saved_registers_(callee_saved_registers),
+ deoptimization_support_(deoptimization_support),
+ debug_name_(debug_name) {}
+ // Returns the kind of this call.
+ Kind kind() const { return kind_; }
+
+ // Returns {true} if this descriptor is a call to a JSFunction.
+ bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+
+ // The number of return values from this call, usually 0 or 1.
+ int ReturnCount() const { return return_count_; }
+
+ // The number of JavaScript parameters to this call, including receiver,
+ // but not the context.
+ int ParameterCount() const { return parameter_count_; }
+
+ int InputCount() const { return input_count_; }
+
+ bool CanLazilyDeoptimize() const {
+ return deoptimization_support_ == kCanDeoptimize;
+ }
+
+ LinkageLocation GetReturnLocation(int index) {
+ DCHECK(index < return_count_);
+ return locations_[0 + index]; // return locations start at 0.
+ }
+
+ LinkageLocation GetInputLocation(int index) {
+ DCHECK(index < input_count_ + 1); // input_count + 1 is the context.
+ return locations_[return_count_ + index]; // inputs start after returns.
+ }
+
+ // Operator properties describe how this call can be optimized, if at all.
+ Operator::Property properties() const { return properties_; }
+
+ // Get the callee-saved registers, if any, across this call.
+ RegList CalleeSavedRegisters() { return callee_saved_registers_; }
+
+ const char* debug_name() const { return debug_name_; }
+
+ private:
+ friend class Linkage;
+
+ Kind kind_;
+ int8_t return_count_;
+ int16_t parameter_count_;
+ int16_t input_count_;
+ LinkageLocation* locations_;
+ Operator::Property properties_;
+ RegList callee_saved_registers_;
+ DeoptimizationSupport deoptimization_support_;
+ const char* debug_name_;
+};
+
+OStream& operator<<(OStream& os, const CallDescriptor& d);
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
+
+// Defines the linkage for a compilation, including the calling conventions
+// for incoming parameters and return value(s) as well as the outgoing calling
+// convention for any kind of call. Linkage is generally architecture-specific.
+//
+// Can be used to translate {arg_index} (i.e. index of the call node input) as
+// well as {param_index} (i.e. as stored in parameter nodes) into an operator
+// representing the architecture-specific location. The following call node
+// layouts are supported (where {n} is the number value inputs):
+//
+// #0 #1 #2 #3 [...] #n
+// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
+// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], context
+// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+class Linkage : public ZoneObject {
+ public:
+ explicit Linkage(CompilationInfo* info);
+ explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
+ : info_(info), incoming_(incoming) {}
+
+ // The call descriptor for this compilation unit describes the locations
+ // of incoming parameters and the outgoing return value(s).
+ CallDescriptor* GetIncomingDescriptor() { return incoming_; }
+ CallDescriptor* GetJSCallDescriptor(int parameter_count);
+ static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
+ CallDescriptor* GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize =
+ CallDescriptor::kCannotDeoptimize);
+ static CallDescriptor* GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
+
+ CallDescriptor* GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count = 0,
+ CallDescriptor::DeoptimizationSupport can_deoptimize =
+ CallDescriptor::kCannotDeoptimize);
+ static CallDescriptor* GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone);
+
+ // Creates a call descriptor for simplified C calls that is appropriate
+ // for the host platform. This simplified calling convention only supports
+ // integers and pointers of one word size each, i.e. no floating point,
+ // structs, pointers to members, etc.
+ static CallDescriptor* GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types);
+
+ // Get the location of an (incoming) parameter to this function.
+ LinkageLocation GetParameterLocation(int index) {
+ return incoming_->GetInputLocation(index + 1);
+ }
+
+ // Get the location where this function should place its return value.
+ LinkageLocation GetReturnLocation() {
+ return incoming_->GetReturnLocation(0);
+ }
+
+ // Get the frame offset for a given spill slot. The location depends on the
+ // calling convention and the specific frame layout, and may thus be
+ // architecture-specific. Negative spill slots indicate arguments on the
+ // caller's frame. The {extra} parameter indicates an additional offset from
+ // the frame offset, e.g. to index into part of a double slot.
+ FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
+
+ CompilationInfo* info() const { return info_; }
+
+ private:
+ CompilationInfo* info_;
+ CallDescriptor* incoming_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_LINKAGE_H_
diff --git a/deps/v8/src/compiler/lowering-builder.cc b/deps/v8/src/compiler/lowering-builder.cc
new file mode 100644
index 000000000..1246f54f1
--- /dev/null
+++ b/deps/v8/src/compiler/lowering-builder.cc
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoweringBuilder::NodeVisitor : public NullNodeVisitor {
+ public:
+ explicit NodeVisitor(LoweringBuilder* lowering) : lowering_(lowering) {}
+
+ GenericGraphVisit::Control Post(Node* node) {
+ if (lowering_->source_positions_ != NULL) {
+ SourcePositionTable::Scope pos(lowering_->source_positions_, node);
+ lowering_->Lower(node);
+ } else {
+ lowering_->Lower(node);
+ }
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ private:
+ LoweringBuilder* lowering_;
+};
+
+
+LoweringBuilder::LoweringBuilder(Graph* graph,
+ SourcePositionTable* source_positions)
+ : graph_(graph), source_positions_(source_positions) {}
+
+
+void LoweringBuilder::LowerAllNodes() {
+ NodeVisitor visitor(this);
+ graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/lowering-builder.h b/deps/v8/src/compiler/lowering-builder.h
new file mode 100644
index 000000000..aeaaaacfd
--- /dev/null
+++ b/deps/v8/src/compiler/lowering-builder.h
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOWERING_BUILDER_H_
+#define V8_COMPILER_LOWERING_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(dcarney): rename this class.
+class LoweringBuilder {
+ public:
+ explicit LoweringBuilder(Graph* graph, SourcePositionTable* source_positions);
+ virtual ~LoweringBuilder() {}
+
+ void LowerAllNodes();
+ virtual void Lower(Node* node) = 0; // Exposed for testing.
+
+ Graph* graph() const { return graph_; }
+
+ private:
+ class NodeVisitor;
+ Graph* graph_;
+ SourcePositionTable* source_positions_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_LOWERING_BUILDER_H_
diff --git a/deps/v8/src/compiler/machine-node-factory.h b/deps/v8/src/compiler/machine-node-factory.h
new file mode 100644
index 000000000..faee93ebb
--- /dev/null
+++ b/deps/v8/src/compiler/machine-node-factory.h
@@ -0,0 +1,381 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_NODE_FACTORY_H_
+#define V8_COMPILER_MACHINE_NODE_FACTORY_H_
+
+#ifdef USE_SIMULATOR
+#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
+#else
+#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
+#endif
+
+#include "src/v8.h"
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineCallDescriptorBuilder : public ZoneObject {
+ public:
+ MachineCallDescriptorBuilder(MachineType return_type, int parameter_count,
+ const MachineType* parameter_types)
+ : return_type_(return_type),
+ parameter_count_(parameter_count),
+ parameter_types_(parameter_types) {}
+
+ int parameter_count() const { return parameter_count_; }
+ const MachineType* parameter_types() const { return parameter_types_; }
+
+ CallDescriptor* BuildCallDescriptor(Zone* zone) {
+ return Linkage::GetSimplifiedCDescriptor(zone, parameter_count_,
+ return_type_, parameter_types_);
+ }
+
+ private:
+ const MachineType return_type_;
+ const int parameter_count_;
+ const MachineType* const parameter_types_;
+};
+
+
+#define ZONE() static_cast<NodeFactory*>(this)->zone()
+#define COMMON() static_cast<NodeFactory*>(this)->common()
+#define MACHINE() static_cast<NodeFactory*>(this)->machine()
+#define NEW_NODE_0(op) static_cast<NodeFactory*>(this)->NewNode(op)
+#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
+#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
+#define NEW_NODE_3(op, a, b, c) \
+ static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
+
+template <typename NodeFactory>
+class MachineNodeFactory {
+ public:
+ // Constants.
+ Node* PointerConstant(void* value) {
+ return IntPtrConstant(reinterpret_cast<intptr_t>(value));
+ }
+ Node* IntPtrConstant(intptr_t value) {
+ // TODO(dcarney): mark generated code as unserializable if value != 0.
+ return kPointerSize == 8 ? Int64Constant(value)
+ : Int32Constant(static_cast<int>(value));
+ }
+ Node* Int32Constant(int32_t value) {
+ return NEW_NODE_0(COMMON()->Int32Constant(value));
+ }
+ Node* Int64Constant(int64_t value) {
+ return NEW_NODE_0(COMMON()->Int64Constant(value));
+ }
+ Node* NumberConstant(double value) {
+ return NEW_NODE_0(COMMON()->NumberConstant(value));
+ }
+ Node* Float64Constant(double value) {
+ return NEW_NODE_0(COMMON()->Float64Constant(value));
+ }
+ Node* HeapConstant(Handle<Object> object) {
+ PrintableUnique<Object> val =
+ PrintableUnique<Object>::CreateUninitialized(ZONE(), object);
+ return NEW_NODE_0(COMMON()->HeapConstant(val));
+ }
+
+ Node* Projection(int index, Node* a) {
+ return NEW_NODE_1(COMMON()->Projection(index), a);
+ }
+
+ // Memory Operations.
+ Node* Load(MachineType rep, Node* base) {
+ return Load(rep, base, Int32Constant(0));
+ }
+ Node* Load(MachineType rep, Node* base, Node* index) {
+ return NEW_NODE_2(MACHINE()->Load(rep), base, index);
+ }
+ void Store(MachineType rep, Node* base, Node* value) {
+ Store(rep, base, Int32Constant(0), value);
+ }
+ void Store(MachineType rep, Node* base, Node* index, Node* value) {
+ NEW_NODE_3(MACHINE()->Store(rep, kNoWriteBarrier), base, index, value);
+ }
+ // Arithmetic Operations.
+ Node* WordAnd(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordAnd(), a, b);
+ }
+ Node* WordOr(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordOr(), a, b);
+ }
+ Node* WordXor(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordXor(), a, b);
+ }
+ Node* WordShl(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordShl(), a, b);
+ }
+ Node* WordShr(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordShr(), a, b);
+ }
+ Node* WordSar(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordSar(), a, b);
+ }
+ Node* WordEqual(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->WordEqual(), a, b);
+ }
+ Node* WordNotEqual(Node* a, Node* b) {
+ return WordBinaryNot(WordEqual(a, b));
+ }
+ Node* WordNot(Node* a) {
+ if (MACHINE()->is32()) {
+ return Word32Not(a);
+ } else {
+ return Word64Not(a);
+ }
+ }
+ Node* WordBinaryNot(Node* a) {
+ if (MACHINE()->is32()) {
+ return Word32BinaryNot(a);
+ } else {
+ return Word64BinaryNot(a);
+ }
+ }
+
+ Node* Word32And(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32And(), a, b);
+ }
+ Node* Word32Or(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32Or(), a, b);
+ }
+ Node* Word32Xor(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32Xor(), a, b);
+ }
+ Node* Word32Shl(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32Shl(), a, b);
+ }
+ Node* Word32Shr(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32Shr(), a, b);
+ }
+ Node* Word32Sar(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32Sar(), a, b);
+ }
+ Node* Word32Equal(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word32Equal(), a, b);
+ }
+ Node* Word32NotEqual(Node* a, Node* b) {
+ return Word32BinaryNot(Word32Equal(a, b));
+ }
+ Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+ Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
+
+ Node* Word64And(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64And(), a, b);
+ }
+ Node* Word64Or(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64Or(), a, b);
+ }
+ Node* Word64Xor(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64Xor(), a, b);
+ }
+ Node* Word64Shl(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64Shl(), a, b);
+ }
+ Node* Word64Shr(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64Shr(), a, b);
+ }
+ Node* Word64Sar(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64Sar(), a, b);
+ }
+ Node* Word64Equal(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Word64Equal(), a, b);
+ }
+ Node* Word64NotEqual(Node* a, Node* b) {
+ return Word64BinaryNot(Word64Equal(a, b));
+ }
+ Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
+ Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
+
+ Node* Int32Add(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
+ }
+ Node* Int32AddWithOverflow(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
+ }
+ Node* Int32Sub(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
+ }
+ Node* Int32SubWithOverflow(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
+ }
+ Node* Int32Mul(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
+ }
+ Node* Int32Div(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32Div(), a, b);
+ }
+ Node* Int32UDiv(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32UDiv(), a, b);
+ }
+ Node* Int32Mod(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32Mod(), a, b);
+ }
+ Node* Int32UMod(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32UMod(), a, b);
+ }
+ Node* Int32LessThan(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32LessThan(), a, b);
+ }
+ Node* Int32LessThanOrEqual(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32LessThanOrEqual(), a, b);
+ }
+ Node* Uint32LessThan(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Uint32LessThan(), a, b);
+ }
+ Node* Uint32LessThanOrEqual(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Uint32LessThanOrEqual(), a, b);
+ }
+ Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
+ Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
+ return Int32LessThanOrEqual(b, a);
+ }
+ Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
+
+ Node* Int64Add(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64Add(), a, b);
+ }
+ Node* Int64Sub(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64Sub(), a, b);
+ }
+ Node* Int64Mul(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64Mul(), a, b);
+ }
+ Node* Int64Div(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64Div(), a, b);
+ }
+ Node* Int64UDiv(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64UDiv(), a, b);
+ }
+ Node* Int64Mod(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64Mod(), a, b);
+ }
+ Node* Int64UMod(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64UMod(), a, b);
+ }
+ Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
+ Node* Int64LessThan(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64LessThan(), a, b);
+ }
+ Node* Int64LessThanOrEqual(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int64LessThanOrEqual(), a, b);
+ }
+ Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
+ Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
+ return Int64LessThanOrEqual(b, a);
+ }
+
+ Node* ConvertIntPtrToInt32(Node* a) {
+ return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a)
+ : a;
+ }
+ Node* ConvertInt32ToIntPtr(Node* a) {
+ return kPointerSize == 8 ? NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a)
+ : a;
+ }
+
+#define INTPTR_BINOP(prefix, name) \
+ Node* IntPtr##name(Node* a, Node* b) { \
+ return kPointerSize == 8 ? prefix##64##name(a, b) \
+ : prefix##32##name(a, b); \
+ }
+
+ INTPTR_BINOP(Int, Add);
+ INTPTR_BINOP(Int, Sub);
+ INTPTR_BINOP(Int, LessThan);
+ INTPTR_BINOP(Int, LessThanOrEqual);
+ INTPTR_BINOP(Word, Equal);
+ INTPTR_BINOP(Word, NotEqual);
+ INTPTR_BINOP(Int, GreaterThanOrEqual);
+ INTPTR_BINOP(Int, GreaterThan);
+
+#undef INTPTR_BINOP
+
+ Node* Float64Add(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64Add(), a, b);
+ }
+ Node* Float64Sub(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64Sub(), a, b);
+ }
+ Node* Float64Mul(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64Mul(), a, b);
+ }
+ Node* Float64Div(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64Div(), a, b);
+ }
+ Node* Float64Mod(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64Mod(), a, b);
+ }
+ Node* Float64Equal(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64Equal(), a, b);
+ }
+ Node* Float64NotEqual(Node* a, Node* b) {
+ return WordBinaryNot(Float64Equal(a, b));
+ }
+ Node* Float64LessThan(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64LessThan(), a, b);
+ }
+ Node* Float64LessThanOrEqual(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Float64LessThanOrEqual(), a, b);
+ }
+ Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
+ Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
+ return Float64LessThanOrEqual(b, a);
+ }
+
+ // Conversions.
+ Node* ConvertInt32ToInt64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ConvertInt32ToInt64(), a);
+ }
+ Node* ConvertInt64ToInt32(Node* a) {
+ return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a);
+ }
+ Node* ChangeInt32ToFloat64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeInt32ToFloat64(), a);
+ }
+ Node* ChangeUint32ToFloat64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeUint32ToFloat64(), a);
+ }
+ Node* ChangeFloat64ToInt32(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeFloat64ToInt32(), a);
+ }
+ Node* ChangeFloat64ToUint32(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeFloat64ToUint32(), a);
+ }
+
+#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
+ // Call to C.
+ Node* CallC(Node* function_address, MachineType return_type,
+ MachineType* arg_types, Node** args, int n_args) {
+ CallDescriptor* descriptor = Linkage::GetSimplifiedCDescriptor(
+ ZONE(), n_args, return_type, arg_types);
+ Node** passed_args =
+ static_cast<Node**>(alloca((n_args + 1) * sizeof(args[0])));
+ passed_args[0] = function_address;
+ for (int i = 0; i < n_args; ++i) {
+ passed_args[i + 1] = args[i];
+ }
+ return NEW_NODE_2(COMMON()->Call(descriptor), n_args + 1, passed_args);
+ }
+#endif
+};
+
+#undef NEW_NODE_0
+#undef NEW_NODE_1
+#undef NEW_NODE_2
+#undef NEW_NODE_3
+#undef MACHINE
+#undef COMMON
+#undef ZONE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MACHINE_NODE_FACTORY_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
new file mode 100644
index 000000000..4a4057646
--- /dev/null
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -0,0 +1,343 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator-reducer.h"
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineOperatorReducer::MachineOperatorReducer(Graph* graph)
+ : graph_(graph),
+ cache_(new (graph->zone()) CommonNodeCache(graph->zone())),
+ common_(graph->zone()),
+ machine_(graph->zone()) {}
+
+
+MachineOperatorReducer::MachineOperatorReducer(Graph* graph,
+ CommonNodeCache* cache)
+ : graph_(graph),
+ cache_(cache),
+ common_(graph->zone()),
+ machine_(graph->zone()) {}
+
+
+Node* MachineOperatorReducer::Int32Constant(int32_t value) {
+ Node** loc = cache_->FindInt32Constant(value);
+ if (*loc == NULL) {
+ *loc = graph_->NewNode(common_.Int32Constant(value));
+ }
+ return *loc;
+}
+
+
+Node* MachineOperatorReducer::Float64Constant(volatile double value) {
+ Node** loc = cache_->FindFloat64Constant(value);
+ if (*loc == NULL) {
+ *loc = graph_->NewNode(common_.Float64Constant(value));
+ }
+ return *loc;
+}
+
+
+// Perform constant folding and strength reduction on machine operators.
+Reduction MachineOperatorReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
+ if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
+ if (m.IsFoldable()) { // K & K => K
+ return ReplaceInt32(m.left().Value() & m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
+ break;
+ }
+ case IrOpcode::kWord32Or: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
+ if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
+ if (m.IsFoldable()) { // K | K => K
+ return ReplaceInt32(m.left().Value() | m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
+ break;
+ }
+ case IrOpcode::kWord32Xor: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
+ if (m.IsFoldable()) { // K ^ K => K
+ return ReplaceInt32(m.left().Value() ^ m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
+ break;
+ }
+ case IrOpcode::kWord32Shl: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
+ if (m.IsFoldable()) { // K << K => K
+ return ReplaceInt32(m.left().Value() << m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kWord32Shr: {
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
+ if (m.IsFoldable()) { // K >>> K => K
+ return ReplaceInt32(m.left().Value() >> m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kWord32Sar: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
+ if (m.IsFoldable()) { // K >> K => K
+ return ReplaceInt32(m.left().Value() >> m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kWord32Equal: {
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) { // K == K => K
+ return ReplaceBool(m.left().Value() == m.right().Value());
+ }
+ if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
+ Int32BinopMatcher msub(m.left().node());
+ node->ReplaceInput(0, msub.left().node());
+ node->ReplaceInput(1, msub.right().node());
+ return Changed(node);
+ }
+ // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+ if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
+ break;
+ }
+ case IrOpcode::kInt32Add: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
+ if (m.IsFoldable()) { // K + K => K
+ return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
+ static_cast<uint32_t>(m.right().Value()));
+ }
+ break;
+ }
+ case IrOpcode::kInt32Sub: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
+ if (m.IsFoldable()) { // K - K => K
+ return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
+ static_cast<uint32_t>(m.right().Value()));
+ }
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
+ break;
+ }
+ case IrOpcode::kInt32Mul: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
+ if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
+ if (m.IsFoldable()) { // K * K => K
+ return ReplaceInt32(m.left().Value() * m.right().Value());
+ }
+ if (m.right().Is(-1)) { // x * -1 => 0 - x
+ graph_->ChangeOperator(node, machine_.Int32Sub());
+ node->ReplaceInput(0, Int32Constant(0));
+ node->ReplaceInput(1, m.left().node());
+ return Changed(node);
+ }
+ if (m.right().IsPowerOf2()) { // x * 2^n => x << n
+ graph_->ChangeOperator(node, machine_.Word32Shl());
+ node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+ return Changed(node);
+ }
+ break;
+ }
+ case IrOpcode::kInt32Div: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
+ // TODO(turbofan): if (m.left().Is(0))
+ // TODO(turbofan): if (m.right().IsPowerOf2())
+ // TODO(turbofan): if (m.right().Is(0))
+ // TODO(turbofan): if (m.LeftEqualsRight())
+ if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K
+ if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
+ return ReplaceInt32(m.left().Value() / m.right().Value());
+ }
+ if (m.right().Is(-1)) { // x / -1 => 0 - x
+ graph_->ChangeOperator(node, machine_.Int32Sub());
+ node->ReplaceInput(0, Int32Constant(0));
+ node->ReplaceInput(1, m.left().node());
+ return Changed(node);
+ }
+ break;
+ }
+ case IrOpcode::kInt32UDiv: {
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
+ // TODO(turbofan): if (m.left().Is(0))
+ // TODO(turbofan): if (m.right().Is(0))
+ // TODO(turbofan): if (m.LeftEqualsRight())
+ if (m.IsFoldable() && !m.right().Is(0)) { // K / K => K
+ return ReplaceInt32(m.left().Value() / m.right().Value());
+ }
+ if (m.right().IsPowerOf2()) { // x / 2^n => x >> n
+ graph_->ChangeOperator(node, machine_.Word32Shr());
+ node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+ return Changed(node);
+ }
+ break;
+ }
+ case IrOpcode::kInt32Mod: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
+ if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
+ // TODO(turbofan): if (m.left().Is(0))
+ // TODO(turbofan): if (m.right().IsPowerOf2())
+ // TODO(turbofan): if (m.right().Is(0))
+ // TODO(turbofan): if (m.LeftEqualsRight())
+ if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
+ return ReplaceInt32(m.left().Value() % m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kInt32UMod: {
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
+ // TODO(turbofan): if (m.left().Is(0))
+ // TODO(turbofan): if (m.right().Is(0))
+ // TODO(turbofan): if (m.LeftEqualsRight())
+ if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
+ return ReplaceInt32(m.left().Value() % m.right().Value());
+ }
+ if (m.right().IsPowerOf2()) { // x % 2^n => x & 2^n-1
+ graph_->ChangeOperator(node, machine_.Word32And());
+ node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
+ return Changed(node);
+ }
+ break;
+ }
+ case IrOpcode::kInt32LessThan: {
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) { // K < K => K
+ return ReplaceBool(m.left().Value() < m.right().Value());
+ }
+ if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y < 0 => x < y
+ Int32BinopMatcher msub(m.left().node());
+ node->ReplaceInput(0, msub.left().node());
+ node->ReplaceInput(1, msub.right().node());
+ return Changed(node);
+ }
+ if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 < x - y => y < x
+ Int32BinopMatcher msub(m.right().node());
+ node->ReplaceInput(0, msub.right().node());
+ node->ReplaceInput(1, msub.left().node());
+ return Changed(node);
+ }
+ if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
+ break;
+ }
+ case IrOpcode::kInt32LessThanOrEqual: {
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) { // K <= K => K
+ return ReplaceBool(m.left().Value() <= m.right().Value());
+ }
+ if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y <= 0 => x <= y
+ Int32BinopMatcher msub(m.left().node());
+ node->ReplaceInput(0, msub.left().node());
+ node->ReplaceInput(1, msub.right().node());
+ return Changed(node);
+ }
+ if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 <= x - y => y <= x
+ Int32BinopMatcher msub(m.right().node());
+ node->ReplaceInput(0, msub.right().node());
+ node->ReplaceInput(1, msub.left().node());
+ return Changed(node);
+ }
+ if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
+ break;
+ }
+ case IrOpcode::kUint32LessThan: {
+ Uint32BinopMatcher m(node);
+ if (m.left().Is(kMaxUInt32)) return ReplaceBool(false); // M < x => false
+ if (m.right().Is(0)) return ReplaceBool(false); // x < 0 => false
+ if (m.IsFoldable()) { // K < K => K
+ return ReplaceBool(m.left().Value() < m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
+ break;
+ }
+ case IrOpcode::kUint32LessThanOrEqual: {
+ Uint32BinopMatcher m(node);
+ if (m.left().Is(0)) return ReplaceBool(true); // 0 <= x => true
+ if (m.right().Is(kMaxUInt32)) return ReplaceBool(true); // x <= M => true
+ if (m.IsFoldable()) { // K <= K => K
+ return ReplaceBool(m.left().Value() <= m.right().Value());
+ }
+ if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
+ break;
+ }
+ case IrOpcode::kFloat64Add: {
+ Float64BinopMatcher m(node);
+ if (m.IsFoldable()) { // K + K => K
+ return ReplaceFloat64(m.left().Value() + m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Sub: {
+ Float64BinopMatcher m(node);
+ if (m.IsFoldable()) { // K - K => K
+ return ReplaceFloat64(m.left().Value() - m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Mul: {
+ Float64BinopMatcher m(node);
+ if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x
+ if (m.right().IsNaN()) { // x * NaN => NaN
+ return Replace(m.right().node());
+ }
+ if (m.IsFoldable()) { // K * K => K
+ return ReplaceFloat64(m.left().Value() * m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Div: {
+ Float64BinopMatcher m(node);
+ if (m.right().Is(1)) return Replace(m.left().node()); // x / 1.0 => x
+ if (m.right().IsNaN()) { // x / NaN => NaN
+ return Replace(m.right().node());
+ }
+ if (m.left().IsNaN()) { // NaN / x => NaN
+ return Replace(m.left().node());
+ }
+ if (m.IsFoldable()) { // K / K => K
+ return ReplaceFloat64(m.left().Value() / m.right().Value());
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Mod: {
+ Float64BinopMatcher m(node);
+ if (m.right().IsNaN()) { // x % NaN => NaN
+ return Replace(m.right().node());
+ }
+ if (m.left().IsNaN()) { // NaN % x => NaN
+ return Replace(m.left().node());
+ }
+ if (m.IsFoldable()) { // K % K => K
+ return ReplaceFloat64(modulo(m.left().Value(), m.right().Value()));
+ }
+ break;
+ }
+ // TODO(turbofan): strength-reduce and fold floating point operations.
+ default:
+ break;
+ }
+ return NoChange();
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
new file mode 100644
index 000000000..46d2931e9
--- /dev/null
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -0,0 +1,52 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonNodeCache;
+
+// Performs constant folding and strength reduction on nodes that have
+// machine operators.
+class MachineOperatorReducer : public Reducer {
+ public:
+ explicit MachineOperatorReducer(Graph* graph);
+
+ MachineOperatorReducer(Graph* graph, CommonNodeCache* cache);
+
+ virtual Reduction Reduce(Node* node);
+
+ private:
+ Graph* graph_;
+ CommonNodeCache* cache_;
+ CommonOperatorBuilder common_;
+ MachineOperatorBuilder machine_;
+
+ Node* Int32Constant(int32_t value);
+ Node* Float64Constant(volatile double value);
+
+ Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
+
+ Reduction ReplaceInt32(int32_t value) {
+ return Replace(Int32Constant(value));
+ }
+
+ Reduction ReplaceFloat64(volatile double value) {
+ return Replace(Float64Constant(value));
+ }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
new file mode 100644
index 000000000..93ccedc2c
--- /dev/null
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -0,0 +1,168 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
+#define V8_COMPILER_MACHINE_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(turbofan): other write barriers are possible based on type
+enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+
+
+// A Store needs a MachineType and a WriteBarrierKind
+// in order to emit the correct write barrier.
+struct StoreRepresentation {
+ MachineType rep;
+ WriteBarrierKind write_barrier_kind;
+};
+
+
+// Interface for building machine-level operators. These operators are
+// machine-level but machine-independent and thus define a language suitable
+// for generating code to run on architectures such as ia32, x64, arm, etc.
+class MachineOperatorBuilder {
+ public:
+ explicit MachineOperatorBuilder(Zone* zone, MachineType word = pointer_rep())
+ : zone_(zone), word_(word) {
+ CHECK(word == kMachineWord32 || word == kMachineWord64);
+ }
+
+#define SIMPLE(name, properties, inputs, outputs) \
+ return new (zone_) \
+ SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define OP1(name, ptype, pname, properties, inputs, outputs) \
+ return new (zone_) \
+ Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
+ inputs, outputs, #name, pname)
+
+#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+#define BINOP_O(name) SIMPLE(name, Operator::kPure, 2, 2)
+#define BINOP_C(name) \
+ SIMPLE(name, Operator::kCommutative | Operator::kPure, 2, 1)
+#define BINOP_AC(name) \
+ SIMPLE(name, \
+ Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
+ 1)
+#define BINOP_ACO(name) \
+ SIMPLE(name, \
+ Operator::kAssociative | Operator::kCommutative | Operator::kPure, 2, \
+ 2)
+#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
+
+#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
+
+ Operator* Load(MachineType rep) { // load [base + index]
+ OP1(Load, MachineType, rep, Operator::kNoWrite, 2, 1);
+ }
+ // store [base + index], value
+ Operator* Store(MachineType rep, WriteBarrierKind kind) {
+ StoreRepresentation store_rep = {rep, kind};
+ OP1(Store, StoreRepresentation, store_rep, Operator::kNoRead, 3, 0);
+ }
+
+ Operator* WordAnd() { WORD_SIZE(And); }
+ Operator* WordOr() { WORD_SIZE(Or); }
+ Operator* WordXor() { WORD_SIZE(Xor); }
+ Operator* WordShl() { WORD_SIZE(Shl); }
+ Operator* WordShr() { WORD_SIZE(Shr); }
+ Operator* WordSar() { WORD_SIZE(Sar); }
+ Operator* WordEqual() { WORD_SIZE(Equal); }
+
+ Operator* Word32And() { BINOP_AC(Word32And); }
+ Operator* Word32Or() { BINOP_AC(Word32Or); }
+ Operator* Word32Xor() { BINOP_AC(Word32Xor); }
+ Operator* Word32Shl() { BINOP(Word32Shl); }
+ Operator* Word32Shr() { BINOP(Word32Shr); }
+ Operator* Word32Sar() { BINOP(Word32Sar); }
+ Operator* Word32Equal() { BINOP_C(Word32Equal); }
+
+ Operator* Word64And() { BINOP_AC(Word64And); }
+ Operator* Word64Or() { BINOP_AC(Word64Or); }
+ Operator* Word64Xor() { BINOP_AC(Word64Xor); }
+ Operator* Word64Shl() { BINOP(Word64Shl); }
+ Operator* Word64Shr() { BINOP(Word64Shr); }
+ Operator* Word64Sar() { BINOP(Word64Sar); }
+ Operator* Word64Equal() { BINOP_C(Word64Equal); }
+
+ Operator* Int32Add() { BINOP_AC(Int32Add); }
+ Operator* Int32AddWithOverflow() { BINOP_ACO(Int32AddWithOverflow); }
+ Operator* Int32Sub() { BINOP(Int32Sub); }
+ Operator* Int32SubWithOverflow() { BINOP_O(Int32SubWithOverflow); }
+ Operator* Int32Mul() { BINOP_AC(Int32Mul); }
+ Operator* Int32Div() { BINOP(Int32Div); }
+ Operator* Int32UDiv() { BINOP(Int32UDiv); }
+ Operator* Int32Mod() { BINOP(Int32Mod); }
+ Operator* Int32UMod() { BINOP(Int32UMod); }
+ Operator* Int32LessThan() { BINOP(Int32LessThan); }
+ Operator* Int32LessThanOrEqual() { BINOP(Int32LessThanOrEqual); }
+ Operator* Uint32LessThan() { BINOP(Uint32LessThan); }
+ Operator* Uint32LessThanOrEqual() { BINOP(Uint32LessThanOrEqual); }
+
+ Operator* Int64Add() { BINOP_AC(Int64Add); }
+ Operator* Int64Sub() { BINOP(Int64Sub); }
+ Operator* Int64Mul() { BINOP_AC(Int64Mul); }
+ Operator* Int64Div() { BINOP(Int64Div); }
+ Operator* Int64UDiv() { BINOP(Int64UDiv); }
+ Operator* Int64Mod() { BINOP(Int64Mod); }
+ Operator* Int64UMod() { BINOP(Int64UMod); }
+ Operator* Int64LessThan() { BINOP(Int64LessThan); }
+ Operator* Int64LessThanOrEqual() { BINOP(Int64LessThanOrEqual); }
+
+ Operator* ConvertInt32ToInt64() { UNOP(ConvertInt32ToInt64); }
+ Operator* ConvertInt64ToInt32() { UNOP(ConvertInt64ToInt32); }
+
+ // Convert representation of integers between float64 and int32/uint32.
+ // The precise rounding mode and handling of out of range inputs are *not*
+ // defined for these operators, since they are intended only for use with
+ // integers.
+ // TODO(titzer): rename ConvertXXX to ChangeXXX in machine operators.
+ Operator* ChangeInt32ToFloat64() { UNOP(ChangeInt32ToFloat64); }
+ Operator* ChangeUint32ToFloat64() { UNOP(ChangeUint32ToFloat64); }
+ Operator* ChangeFloat64ToInt32() { UNOP(ChangeFloat64ToInt32); }
+ Operator* ChangeFloat64ToUint32() { UNOP(ChangeFloat64ToUint32); }
+
+ // Floating point operators always operate with IEEE 754 round-to-nearest.
+ Operator* Float64Add() { BINOP_C(Float64Add); }
+ Operator* Float64Sub() { BINOP(Float64Sub); }
+ Operator* Float64Mul() { BINOP_C(Float64Mul); }
+ Operator* Float64Div() { BINOP(Float64Div); }
+ Operator* Float64Mod() { BINOP(Float64Mod); }
+
+ // Floating point comparisons complying to IEEE 754.
+ Operator* Float64Equal() { BINOP_C(Float64Equal); }
+ Operator* Float64LessThan() { BINOP(Float64LessThan); }
+ Operator* Float64LessThanOrEqual() { BINOP(Float64LessThanOrEqual); }
+
+ inline bool is32() const { return word_ == kMachineWord32; }
+ inline bool is64() const { return word_ == kMachineWord64; }
+ inline MachineType word() const { return word_; }
+
+ static inline MachineType pointer_rep() {
+ return kPointerSize == 8 ? kMachineWord64 : kMachineWord32;
+ }
+
+#undef WORD_SIZE
+#undef UNOP
+#undef BINOP
+#undef OP1
+#undef SIMPLE
+
+ private:
+ Zone* zone_;
+ MachineType word_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_MACHINE_OPERATOR_H_
diff --git a/deps/v8/src/compiler/machine-type.h b/deps/v8/src/compiler/machine-type.h
new file mode 100644
index 000000000..716ca2236
--- /dev/null
+++ b/deps/v8/src/compiler/machine-type.h
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_TYPE_H_
+#define V8_COMPILER_MACHINE_TYPE_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An enumeration of the storage representations at the machine level.
+// - Words are uninterpreted bits of a given fixed size that can be used
+// to store integers and pointers. They are normally allocated to general
+// purpose registers by the backend and are not tracked for GC.
+// - Floats are bits of a given fixed size that are used to store floating
+// point numbers. They are normally allocated to the floating point
+// registers of the machine and are not tracked for the GC.
+// - Tagged values are the size of a reference into the heap and can store
+// small words or references into the heap using a language and potentially
+// machine-dependent tagging scheme. These values are tracked by the code
+// generator for precise GC.
+enum MachineType {
+ kMachineWord8,
+ kMachineWord16,
+ kMachineWord32,
+ kMachineWord64,
+ kMachineFloat64,
+ kMachineTagged,
+ kMachineLast
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/deps/v8/src/compiler/node-aux-data-inl.h b/deps/v8/src/compiler/node-aux-data-inl.h
new file mode 100644
index 000000000..679320ab6
--- /dev/null
+++ b/deps/v8/src/compiler/node-aux-data-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
+#define V8_COMPILER_NODE_AUX_DATA_INL_H_
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class T>
+NodeAuxData<T>::NodeAuxData(Zone* zone)
+ : aux_data_(ZoneAllocator(zone)) {}
+
+
+template <class T>
+void NodeAuxData<T>::Set(Node* node, const T& data) {
+ int id = node->id();
+ if (id >= static_cast<int>(aux_data_.size())) {
+ aux_data_.resize(id + 1);
+ }
+ aux_data_[id] = data;
+}
+
+
+template <class T>
+T NodeAuxData<T>::Get(Node* node) {
+ int id = node->id();
+ if (id >= static_cast<int>(aux_data_.size())) {
+ return T();
+ }
+ return aux_data_[id];
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
new file mode 100644
index 000000000..1e836338a
--- /dev/null
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_H_
+#define V8_COMPILER_NODE_AUX_DATA_H_
+
+#include <vector>
+
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+template <class T>
+class NodeAuxData {
+ public:
+ inline explicit NodeAuxData(Zone* zone);
+
+ inline void Set(Node* node, const T& data);
+ inline T Get(Node* node);
+
+ private:
+ typedef zone_allocator<T> ZoneAllocator;
+ typedef std::vector<T, ZoneAllocator> TZoneVector;
+
+ TZoneVector aux_data_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
new file mode 100644
index 000000000..c3ee58c5a
--- /dev/null
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define INITIAL_SIZE 16
+#define LINEAR_PROBE 5
+
+template <typename Key>
+int32_t NodeCacheHash(Key key) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+template <>
+inline int32_t NodeCacheHash(int32_t key) {
+ return ComputeIntegerHash(key, 0);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(int64_t key) {
+ return ComputeLongHash(key);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(double key) {
+ return ComputeLongHash(BitCast<int64_t>(key));
+}
+
+
+template <>
+inline int32_t NodeCacheHash(void* key) {
+ return ComputePointerHash(key);
+}
+
+
+template <typename Key>
+bool NodeCache<Key>::Resize(Zone* zone) {
+ if (size_ >= max_) return false; // Don't grow past the maximum size.
+
+ // Allocate a new block of entries 4x the size.
+ Entry* old_entries = entries_;
+ int old_size = size_ + LINEAR_PROBE;
+ size_ = size_ * 4;
+ int num_entries = size_ + LINEAR_PROBE;
+ entries_ = zone->NewArray<Entry>(num_entries);
+ memset(entries_, 0, sizeof(Entry) * num_entries);
+
+ // Insert the old entries into the new block.
+ for (int i = 0; i < old_size; i++) {
+ Entry* old = &old_entries[i];
+ if (old->value_ != NULL) {
+ int hash = NodeCacheHash(old->key_);
+ int start = hash & (size_ - 1);
+ int end = start + LINEAR_PROBE;
+ for (int j = start; j < end; j++) {
+ Entry* entry = &entries_[j];
+ if (entry->value_ == NULL) {
+ entry->key_ = old->key_;
+ entry->value_ = old->value_;
+ break;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+
+template <typename Key>
+Node** NodeCache<Key>::Find(Zone* zone, Key key) {
+ int32_t hash = NodeCacheHash(key);
+ if (entries_ == NULL) {
+ // Allocate the initial entries and insert the first entry.
+ int num_entries = INITIAL_SIZE + LINEAR_PROBE;
+ entries_ = zone->NewArray<Entry>(num_entries);
+ size_ = INITIAL_SIZE;
+ memset(entries_, 0, sizeof(Entry) * num_entries);
+ Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)];
+ entry->key_ = key;
+ return &entry->value_;
+ }
+
+ while (true) {
+ // Search up to N entries after (linear probing).
+ int start = hash & (size_ - 1);
+ int end = start + LINEAR_PROBE;
+ for (int i = start; i < end; i++) {
+ Entry* entry = &entries_[i];
+ if (entry->key_ == key) return &entry->value_;
+ if (entry->value_ == NULL) {
+ entry->key_ = key;
+ return &entry->value_;
+ }
+ }
+
+ if (!Resize(zone)) break; // Don't grow past the maximum size.
+ }
+
+ // If resized to maximum and still didn't find space, overwrite an entry.
+ Entry* entry = &entries_[hash & (size_ - 1)];
+ entry->key_ = key;
+ entry->value_ = NULL;
+ return &entry->value_;
+}
+
+
+template class NodeCache<int64_t>;
+template class NodeCache<int32_t>;
+template class NodeCache<void*>;
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
new file mode 100644
index 000000000..35352ea1e
--- /dev/null
+++ b/deps/v8/src/compiler/node-cache.h
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_CACHE_H_
+#define V8_COMPILER_NODE_CACHE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A cache for nodes based on a key. Useful for implementing canonicalization of
+// nodes such as constants, parameters, etc.
+template <typename Key>
+class NodeCache {
+ public:
+ explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {}
+
+ // Search for node associated with {key} and return a pointer to a memory
+ // location in this cache that stores an entry for the key. If the location
+ // returned by this method contains a non-NULL node, the caller can use that
+ // node. Otherwise it is the responsibility of the caller to fill the entry
+ // with a new node.
+ // Note that a previous cache entry may be overwritten if the cache becomes
+ // too full or encounters too many hash collisions.
+ Node** Find(Zone* zone, Key key);
+
+ private:
+ struct Entry {
+ Key key_;
+ Node* value_;
+ };
+
+ Entry* entries_; // lazily-allocated hash entries.
+ int32_t size_;
+ int32_t max_;
+
+ bool Resize(Zone* zone);
+};
+
+// Various default cache types.
+typedef NodeCache<int64_t> Int64NodeCache;
+typedef NodeCache<int32_t> Int32NodeCache;
+typedef NodeCache<void*> PtrNodeCache;
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_NODE_CACHE_H_
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
new file mode 100644
index 000000000..3b34d07c0
--- /dev/null
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_MATCHERS_H_
+#define V8_COMPILER_NODE_MATCHERS_H_
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A pattern matcher for nodes.
+struct NodeMatcher {
+ explicit NodeMatcher(Node* node) : node_(node) {}
+
+ Node* node() const { return node_; }
+ Operator* op() const { return node()->op(); }
+ IrOpcode::Value opcode() const { return node()->opcode(); }
+
+ bool HasProperty(Operator::Property property) const {
+ return op()->HasProperty(property);
+ }
+ Node* InputAt(int index) const { return node()->InputAt(index); }
+
+#define DEFINE_IS_OPCODE(Opcode) \
+ bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
+ ALL_OP_LIST(DEFINE_IS_OPCODE)
+#undef DEFINE_IS_OPCODE
+
+ private:
+ Node* node_;
+};
+
+
+// A pattern matcher for abitrary value constants.
+template <typename T>
+struct ValueMatcher : public NodeMatcher {
+ explicit ValueMatcher(Node* node)
+ : NodeMatcher(node),
+ value_(),
+ has_value_(CommonOperatorTraits<T>::HasValue(node->op())) {
+ if (has_value_) value_ = CommonOperatorTraits<T>::ValueOf(node->op());
+ }
+
+ bool HasValue() const { return has_value_; }
+ T Value() const {
+ DCHECK(HasValue());
+ return value_;
+ }
+
+ bool Is(T value) const {
+ return HasValue() && CommonOperatorTraits<T>::Equals(Value(), value);
+ }
+
+ bool IsInRange(T low, T high) const {
+ return HasValue() && low <= value_ && value_ <= high;
+ }
+
+ private:
+ T value_;
+ bool has_value_;
+};
+
+
+// A pattern matcher for integer constants.
+template <typename T>
+struct IntMatcher V8_FINAL : public ValueMatcher<T> {
+ explicit IntMatcher(Node* node) : ValueMatcher<T>(node) {}
+
+ bool IsPowerOf2() const {
+ return this->HasValue() && this->Value() > 0 &&
+ (this->Value() & (this->Value() - 1)) == 0;
+ }
+};
+
+typedef IntMatcher<int32_t> Int32Matcher;
+typedef IntMatcher<uint32_t> Uint32Matcher;
+typedef IntMatcher<int64_t> Int64Matcher;
+typedef IntMatcher<uint64_t> Uint64Matcher;
+
+
+// A pattern matcher for floating point constants.
+template <typename T>
+struct FloatMatcher V8_FINAL : public ValueMatcher<T> {
+ explicit FloatMatcher(Node* node) : ValueMatcher<T>(node) {}
+
+ bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
+};
+
+typedef FloatMatcher<double> Float64Matcher;
+
+
+// For shorter pattern matching code, this struct matches both the left and
+// right hand sides of a binary operation and can put constants on the right
+// if they appear on the left hand side of a commutative operation.
+template <typename Left, typename Right>
+struct BinopMatcher V8_FINAL : public NodeMatcher {
+ explicit BinopMatcher(Node* node)
+ : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
+ if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
+ }
+
+ const Left& left() const { return left_; }
+ const Right& right() const { return right_; }
+
+ bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
+ bool LeftEqualsRight() const { return left().node() == right().node(); }
+
+ private:
+ void PutConstantOnRight() {
+ if (left().HasValue() && !right().HasValue()) {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left().node());
+ node()->ReplaceInput(1, right().node());
+ }
+ }
+
+ Left left_;
+ Right right_;
+};
+
+typedef BinopMatcher<Int32Matcher, Int32Matcher> Int32BinopMatcher;
+typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher;
+typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
+typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
+typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_NODE_MATCHERS_H_
diff --git a/deps/v8/src/compiler/node-properties-inl.h b/deps/v8/src/compiler/node-properties-inl.h
new file mode 100644
index 000000000..2d63b0cc1
--- /dev/null
+++ b/deps/v8/src/compiler/node-properties-inl.h
@@ -0,0 +1,165 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_
+#define V8_COMPILER_NODE_PROPERTIES_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Input layout.
+// Inputs are always arranged in order as follows:
+// 0 [ values, context, effects, control ] node->InputCount()
+
+inline int NodeProperties::FirstValueIndex(Node* node) { return 0; }
+
+inline int NodeProperties::FirstContextIndex(Node* node) {
+ return PastValueIndex(node);
+}
+
+inline int NodeProperties::FirstEffectIndex(Node* node) {
+ return PastContextIndex(node);
+}
+
+inline int NodeProperties::FirstControlIndex(Node* node) {
+ return PastEffectIndex(node);
+}
+
+
+inline int NodeProperties::PastValueIndex(Node* node) {
+ return FirstValueIndex(node) +
+ OperatorProperties::GetValueInputCount(node->op());
+}
+
+inline int NodeProperties::PastContextIndex(Node* node) {
+ return FirstContextIndex(node) +
+ OperatorProperties::GetContextInputCount(node->op());
+}
+
+inline int NodeProperties::PastEffectIndex(Node* node) {
+ return FirstEffectIndex(node) +
+ OperatorProperties::GetEffectInputCount(node->op());
+}
+
+inline int NodeProperties::PastControlIndex(Node* node) {
+ return FirstControlIndex(node) +
+ OperatorProperties::GetControlInputCount(node->op());
+}
+
+
+// -----------------------------------------------------------------------------
+// Input accessors.
+
+inline Node* NodeProperties::GetValueInput(Node* node, int index) {
+ DCHECK(0 <= index &&
+ index < OperatorProperties::GetValueInputCount(node->op()));
+ return node->InputAt(FirstValueIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetContextInput(Node* node) {
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ return node->InputAt(FirstContextIndex(node));
+}
+
+inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
+ DCHECK(0 <= index &&
+ index < OperatorProperties::GetEffectInputCount(node->op()));
+ return node->InputAt(FirstEffectIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetControlInput(Node* node, int index) {
+ DCHECK(0 <= index &&
+ index < OperatorProperties::GetControlInputCount(node->op()));
+ return node->InputAt(FirstControlIndex(node) + index);
+}
+
+
+// -----------------------------------------------------------------------------
+// Edge kinds.
+
+inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
+ // TODO(titzer): edge.index() is linear time;
+ // edges maybe need to be marked as value/effect/control.
+ if (num == 0) return false;
+ int index = edge.index();
+ return first <= index && index < first + num;
+}
+
+inline bool NodeProperties::IsValueEdge(Node::Edge edge) {
+ Node* node = edge.from();
+ return IsInputRange(edge, FirstValueIndex(node),
+ OperatorProperties::GetValueInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsContextEdge(Node::Edge edge) {
+ Node* node = edge.from();
+ return IsInputRange(edge, FirstContextIndex(node),
+ OperatorProperties::GetContextInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsEffectEdge(Node::Edge edge) {
+ Node* node = edge.from();
+ return IsInputRange(edge, FirstEffectIndex(node),
+ OperatorProperties::GetEffectInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsControlEdge(Node::Edge edge) {
+ Node* node = edge.from();
+ return IsInputRange(edge, FirstControlIndex(node),
+ OperatorProperties::GetControlInputCount(node->op()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous predicates.
+
+inline bool NodeProperties::IsControl(Node* node) {
+ return IrOpcode::IsControlOpcode(node->opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous mutators.
+
+inline void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
+ node->ReplaceInput(FirstControlIndex(node), control);
+}
+
+inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
+ int index) {
+ DCHECK(index < OperatorProperties::GetEffectInputCount(node->op()));
+ return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
+}
+
+inline void NodeProperties::RemoveNonValueInputs(Node* node) {
+ node->TrimInputCount(OperatorProperties::GetValueInputCount(node->op()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Type Bounds.
+
+inline Bounds NodeProperties::GetBounds(Node* node) { return node->bounds(); }
+
+inline void NodeProperties::SetBounds(Node* node, Bounds b) {
+ node->set_bounds(b);
+}
+
+
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_NODE_PROPERTIES_INL_H_
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
new file mode 100644
index 000000000..6088a0a3a
--- /dev/null
+++ b/deps/v8/src/compiler/node-properties.h
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_H_
+#define V8_COMPILER_NODE_PROPERTIES_H_
+
+#include "src/compiler/node.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+// A facade that simplifies access to the different kinds of inputs to a node.
+class NodeProperties {
+ public:
+ static inline Node* GetValueInput(Node* node, int index);
+ static inline Node* GetContextInput(Node* node);
+ static inline Node* GetEffectInput(Node* node, int index = 0);
+ static inline Node* GetControlInput(Node* node, int index = 0);
+
+ static inline bool IsValueEdge(Node::Edge edge);
+ static inline bool IsContextEdge(Node::Edge edge);
+ static inline bool IsEffectEdge(Node::Edge edge);
+ static inline bool IsControlEdge(Node::Edge edge);
+
+ static inline bool IsControl(Node* node);
+
+ static inline void ReplaceControlInput(Node* node, Node* control);
+ static inline void ReplaceEffectInput(Node* node, Node* effect,
+ int index = 0);
+ static inline void RemoveNonValueInputs(Node* node);
+
+ static inline Bounds GetBounds(Node* node);
+ static inline void SetBounds(Node* node, Bounds bounds);
+
+ private:
+ static inline int FirstValueIndex(Node* node);
+ static inline int FirstContextIndex(Node* node);
+ static inline int FirstEffectIndex(Node* node);
+ static inline int FirstControlIndex(Node* node);
+ static inline int PastValueIndex(Node* node);
+ static inline int PastContextIndex(Node* node);
+ static inline int PastEffectIndex(Node* node);
+ static inline int PastControlIndex(Node* node);
+
+ static inline bool IsInputRange(Node::Edge edge, int first, int count);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_PROPERTIES_H_
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
new file mode 100644
index 000000000..4cb5748b4
--- /dev/null
+++ b/deps/v8/src/compiler/node.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void Node::CollectProjections(int projection_count, Node** projections) {
+ for (int i = 0; i < projection_count; ++i) projections[i] = NULL;
+ for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+ if ((*i)->opcode() != IrOpcode::kProjection) continue;
+ int32_t index = OpParameter<int32_t>(*i);
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, projection_count);
+ DCHECK_EQ(NULL, projections[index]);
+ projections[index] = *i;
+ }
+}
+
+
+Node* Node::FindProjection(int32_t projection_index) {
+ for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+ if ((*i)->opcode() == IrOpcode::kProjection &&
+ OpParameter<int32_t>(*i) == projection_index) {
+ return *i;
+ }
+ }
+ return NULL;
+}
+
+
+OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
+
+
+OStream& operator<<(OStream& os, const Node& n) {
+ os << n.id() << ": " << *n.op();
+ if (n.op()->InputCount() != 0) {
+ os << "(";
+ for (int i = 0; i < n.op()->InputCount(); ++i) {
+ if (i != 0) os << ", ";
+ os << n.InputAt(i)->id();
+ }
+ os << ")";
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
new file mode 100644
index 000000000..ddca510a0
--- /dev/null
+++ b/deps/v8/src/compiler/node.h
@@ -0,0 +1,95 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_H_
+#define V8_COMPILER_NODE_H_
+
+#include <deque>
+#include <set>
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeData {
+ public:
+ Operator* op() const { return op_; }
+ void set_op(Operator* op) { op_ = op; }
+
+ IrOpcode::Value opcode() const {
+ DCHECK(op_->opcode() <= IrOpcode::kLast);
+ return static_cast<IrOpcode::Value>(op_->opcode());
+ }
+
+ Bounds bounds() { return bounds_; }
+
+ protected:
+ Operator* op_;
+ Bounds bounds_;
+ explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {}
+
+ friend class NodeProperties;
+ void set_bounds(Bounds b) { bounds_ = b; }
+};
+
+// A Node is the basic primitive of an IR graph. In addition to the members
+// inherited from Vector, Nodes only contain a mutable Operator that may change
+// during compilation, e.g. during lowering passes. Other information that
+// needs to be associated with Nodes during compilation must be stored
+// out-of-line indexed by the Node's id.
+class Node : public GenericNode<NodeData, Node> {
+ public:
+ Node(GenericGraphBase* graph, int input_count)
+ : GenericNode<NodeData, Node>(graph, input_count) {}
+
+ void Initialize(Operator* op) { set_op(op); }
+
+ void CollectProjections(int projection_count, Node** projections);
+ Node* FindProjection(int32_t projection_index);
+};
+
+OStream& operator<<(OStream& os, const Node& n);
+
+typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
+
+typedef zone_allocator<Node*> NodePtrZoneAllocator;
+
+typedef std::set<Node*, std::less<Node*>, NodePtrZoneAllocator> NodeSet;
+typedef NodeSet::iterator NodeSetIter;
+typedef NodeSet::reverse_iterator NodeSetRIter;
+
+typedef std::deque<Node*, NodePtrZoneAllocator> NodeDeque;
+typedef NodeDeque::iterator NodeDequeIter;
+
+typedef std::vector<Node*, NodePtrZoneAllocator> NodeVector;
+typedef NodeVector::iterator NodeVectorIter;
+typedef NodeVector::reverse_iterator NodeVectorRIter;
+
+typedef zone_allocator<NodeVector> ZoneNodeVectorAllocator;
+typedef std::vector<NodeVector, ZoneNodeVectorAllocator> NodeVectorVector;
+typedef NodeVectorVector::iterator NodeVectorVectorIter;
+typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
+
+typedef Node::Uses::iterator UseIter;
+typedef Node::Inputs::iterator InputIter;
+
+// Helper to extract parameters from Operator1<*> nodes.
+template <typename T>
+static inline T OpParameter(Node* node) {
+ return reinterpret_cast<Operator1<T>*>(node->op())->parameter();
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_NODE_H_
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
new file mode 100644
index 000000000..1371bfd16
--- /dev/null
+++ b/deps/v8/src/compiler/opcodes.h
@@ -0,0 +1,297 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPCODES_H_
+#define V8_COMPILER_OPCODES_H_
+
+// Opcodes for control operators.
+#define CONTROL_OP_LIST(V) \
+ V(Start) \
+ V(Dead) \
+ V(Loop) \
+ V(End) \
+ V(Branch) \
+ V(IfTrue) \
+ V(IfFalse) \
+ V(Merge) \
+ V(Return) \
+ V(Throw) \
+ V(Continuation) \
+ V(LazyDeoptimization) \
+ V(Deoptimize)
+
+// Opcodes for common operators.
+#define LEAF_OP_LIST(V) \
+ V(Int32Constant) \
+ V(Int64Constant) \
+ V(Float64Constant) \
+ V(ExternalConstant) \
+ V(NumberConstant) \
+ V(HeapConstant)
+
+#define INNER_OP_LIST(V) \
+ V(Phi) \
+ V(EffectPhi) \
+ V(FrameState) \
+ V(StateValues) \
+ V(Call) \
+ V(Parameter) \
+ V(Projection)
+
+#define COMMON_OP_LIST(V) \
+ LEAF_OP_LIST(V) \
+ INNER_OP_LIST(V)
+
+// Opcodes for JavaScript operators.
+#define JS_COMPARE_BINOP_LIST(V) \
+ V(JSEqual) \
+ V(JSNotEqual) \
+ V(JSStrictEqual) \
+ V(JSStrictNotEqual) \
+ V(JSLessThan) \
+ V(JSGreaterThan) \
+ V(JSLessThanOrEqual) \
+ V(JSGreaterThanOrEqual)
+
+#define JS_BITWISE_BINOP_LIST(V) \
+ V(JSBitwiseOr) \
+ V(JSBitwiseXor) \
+ V(JSBitwiseAnd) \
+ V(JSShiftLeft) \
+ V(JSShiftRight) \
+ V(JSShiftRightLogical)
+
+#define JS_ARITH_BINOP_LIST(V) \
+ V(JSAdd) \
+ V(JSSubtract) \
+ V(JSMultiply) \
+ V(JSDivide) \
+ V(JSModulus)
+
+#define JS_SIMPLE_BINOP_LIST(V) \
+ JS_COMPARE_BINOP_LIST(V) \
+ JS_BITWISE_BINOP_LIST(V) \
+ JS_ARITH_BINOP_LIST(V)
+
+#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
+
+#define JS_CONVERSION_UNOP_LIST(V) \
+ V(JSToBoolean) \
+ V(JSToNumber) \
+ V(JSToString) \
+ V(JSToName) \
+ V(JSToObject)
+
+#define JS_OTHER_UNOP_LIST(V) V(JSTypeOf)
+
+#define JS_SIMPLE_UNOP_LIST(V) \
+ JS_LOGIC_UNOP_LIST(V) \
+ JS_CONVERSION_UNOP_LIST(V) \
+ JS_OTHER_UNOP_LIST(V)
+
+#define JS_OBJECT_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSStoreProperty) \
+ V(JSStoreNamed) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
+ V(JSInstanceOf)
+
+#define JS_CONTEXT_OP_LIST(V) \
+ V(JSLoadContext) \
+ V(JSStoreContext) \
+ V(JSCreateFunctionContext) \
+ V(JSCreateCatchContext) \
+ V(JSCreateWithContext) \
+ V(JSCreateBlockContext) \
+ V(JSCreateModuleContext) \
+ V(JSCreateGlobalContext)
+
+#define JS_OTHER_OP_LIST(V) \
+ V(JSCallConstruct) \
+ V(JSCallFunction) \
+ V(JSCallRuntime) \
+ V(JSYield) \
+ V(JSDebugger)
+
+#define JS_OP_LIST(V) \
+ JS_SIMPLE_BINOP_LIST(V) \
+ JS_SIMPLE_UNOP_LIST(V) \
+ JS_OBJECT_OP_LIST(V) \
+ JS_CONTEXT_OP_LIST(V) \
+ JS_OTHER_OP_LIST(V)
+
+// Opcodes for VirtuaMachine-level operators.
+#define SIMPLIFIED_OP_LIST(V) \
+ V(BooleanNot) \
+ V(NumberEqual) \
+ V(NumberLessThan) \
+ V(NumberLessThanOrEqual) \
+ V(NumberAdd) \
+ V(NumberSubtract) \
+ V(NumberMultiply) \
+ V(NumberDivide) \
+ V(NumberModulus) \
+ V(NumberToInt32) \
+ V(NumberToUint32) \
+ V(ReferenceEqual) \
+ V(StringEqual) \
+ V(StringLessThan) \
+ V(StringLessThanOrEqual) \
+ V(StringAdd) \
+ V(ChangeTaggedToInt32) \
+ V(ChangeTaggedToUint32) \
+ V(ChangeTaggedToFloat64) \
+ V(ChangeInt32ToTagged) \
+ V(ChangeUint32ToTagged) \
+ V(ChangeFloat64ToTagged) \
+ V(ChangeBoolToBit) \
+ V(ChangeBitToBool) \
+ V(LoadField) \
+ V(LoadElement) \
+ V(StoreField) \
+ V(StoreElement)
+
+// Opcodes for Machine-level operators.
+#define MACHINE_OP_LIST(V) \
+ V(Load) \
+ V(Store) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Equal) \
+ V(Word64And) \
+ V(Word64Or) \
+ V(Word64Xor) \
+ V(Word64Shl) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Equal) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32SubWithOverflow) \
+ V(Int32Mul) \
+ V(Int32Div) \
+ V(Int32UDiv) \
+ V(Int32Mod) \
+ V(Int32UMod) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
+ V(Int64Add) \
+ V(Int64Sub) \
+ V(Int64Mul) \
+ V(Int64Div) \
+ V(Int64UDiv) \
+ V(Int64Mod) \
+ V(Int64UMod) \
+ V(Int64LessThan) \
+ V(Int64LessThanOrEqual) \
+ V(ConvertInt64ToInt32) \
+ V(ConvertInt32ToInt64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual)
+
+#define VALUE_OP_LIST(V) \
+ COMMON_OP_LIST(V) \
+ SIMPLIFIED_OP_LIST(V) \
+ MACHINE_OP_LIST(V) \
+ JS_OP_LIST(V)
+
+// The combination of all operators at all levels and the common operators.
+#define ALL_OP_LIST(V) \
+ CONTROL_OP_LIST(V) \
+ VALUE_OP_LIST(V)
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Declare an enumeration with all the opcodes at all levels so that they
+// can be globally, uniquely numbered.
+class IrOpcode {
+ public:
+ enum Value {
+#define DECLARE_OPCODE(x) k##x,
+ ALL_OP_LIST(DECLARE_OPCODE)
+#undef DECLARE_OPCODE
+ kLast = -1
+#define COUNT_OPCODE(x) +1
+ ALL_OP_LIST(COUNT_OPCODE)
+#undef COUNT_OPCODE
+ };
+
+ // Returns the mnemonic name of an opcode.
+ static const char* Mnemonic(Value val) {
+ switch (val) {
+#define RETURN_NAME(x) \
+ case k##x: \
+ return #x;
+ ALL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+ default:
+ return "UnknownOpcode";
+ }
+ }
+
+ static bool IsJsOpcode(Value val) {
+ switch (val) {
+#define RETURN_NAME(x) \
+ case k##x: \
+ return true;
+ JS_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+ default:
+ return false;
+ }
+ }
+
+ static bool IsControlOpcode(Value val) {
+ switch (val) {
+#define RETURN_NAME(x) \
+ case k##x: \
+ return true;
+ CONTROL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+ default:
+ return false;
+ }
+ }
+
+ static bool IsCommonOpcode(Value val) {
+ switch (val) {
+#define RETURN_NAME(x) \
+ case k##x: \
+ return true;
+ CONTROL_OP_LIST(RETURN_NAME)
+ COMMON_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+ default:
+ return false;
+ }
+ }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_OPCODES_H_
diff --git a/deps/v8/src/compiler/operator-properties-inl.h b/deps/v8/src/compiler/operator-properties-inl.h
new file mode 100644
index 000000000..42833fdeb
--- /dev/null
+++ b/deps/v8/src/compiler/operator-properties-inl.h
@@ -0,0 +1,191 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/js-operator.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+inline bool OperatorProperties::HasValueInput(Operator* op) {
+ return OperatorProperties::GetValueInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasContextInput(Operator* op) {
+ IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+ return IrOpcode::IsJsOpcode(opcode);
+}
+
+inline bool OperatorProperties::HasEffectInput(Operator* op) {
+ return OperatorProperties::GetEffectInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasControlInput(Operator* op) {
+ return OperatorProperties::GetControlInputCount(op) > 0;
+}
+
+
+inline int OperatorProperties::GetValueInputCount(Operator* op) {
+ return op->InputCount();
+}
+
+inline int OperatorProperties::GetContextInputCount(Operator* op) {
+ return OperatorProperties::HasContextInput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetEffectInputCount(Operator* op) {
+ if (op->opcode() == IrOpcode::kEffectPhi) {
+ return static_cast<Operator1<int>*>(op)->parameter();
+ }
+ if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite))
+ return 0; // no effects.
+ return 1;
+}
+
+inline int OperatorProperties::GetControlInputCount(Operator* op) {
+ switch (op->opcode()) {
+ case IrOpcode::kPhi:
+ case IrOpcode::kEffectPhi:
+ return 1;
+#define OPCODE_CASE(x) case IrOpcode::k##x:
+ CONTROL_OP_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+ return static_cast<ControlOperator*>(op)->ControlInputCount();
+ default:
+ // If a node can lazily deoptimize, it needs control dependency.
+ if (CanLazilyDeoptimize(op)) {
+ return 1;
+ }
+ // Operators that have write effects must have a control
+ // dependency. Effect dependencies only ensure the correct order of
+ // write/read operations without consideration of control flow. Without an
+ // explicit control dependency writes can be float in the schedule too
+ // early along a path that shouldn't generate a side-effect.
+ return op->HasProperty(Operator::kNoWrite) ? 0 : 1;
+ }
+ return 0;
+}
+
+inline int OperatorProperties::GetTotalInputCount(Operator* op) {
+ return GetValueInputCount(op) + GetContextInputCount(op) +
+ GetEffectInputCount(op) + GetControlInputCount(op);
+}
+
+// -----------------------------------------------------------------------------
+// Output properties.
+
+inline bool OperatorProperties::HasValueOutput(Operator* op) {
+ return GetValueOutputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasEffectOutput(Operator* op) {
+ return op->opcode() == IrOpcode::kStart || GetEffectInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasControlOutput(Operator* op) {
+ IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+ return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode)) ||
+ CanLazilyDeoptimize(op);
+}
+
+
+inline int OperatorProperties::GetValueOutputCount(Operator* op) {
+ return op->OutputCount();
+}
+
+inline int OperatorProperties::GetEffectOutputCount(Operator* op) {
+ return HasEffectOutput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetControlOutputCount(Operator* node) {
+ return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1
+ : 0;
+}
+
+
+inline bool OperatorProperties::IsBasicBlockBegin(Operator* op) {
+ uint8_t opcode = op->opcode();
+ return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
+ opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
+ opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
+ opcode == IrOpcode::kIfFalse;
+}
+
+inline bool OperatorProperties::CanBeScheduled(Operator* op) { return true; }
+
+inline bool OperatorProperties::HasFixedSchedulePosition(Operator* op) {
+ IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+ return (IrOpcode::IsControlOpcode(opcode)) ||
+ opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi ||
+ opcode == IrOpcode::kPhi;
+}
+
+inline bool OperatorProperties::IsScheduleRoot(Operator* op) {
+ uint8_t opcode = op->opcode();
+ return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi ||
+ opcode == IrOpcode::kPhi;
+}
+
+inline bool OperatorProperties::CanLazilyDeoptimize(Operator* op) {
+ // TODO(jarin) This function allows turning on lazy deoptimization
+ // incrementally. It will change as we turn on lazy deopt for
+ // more nodes.
+
+ if (!FLAG_turbo_deoptimization) {
+ return false;
+ }
+
+ switch (op->opcode()) {
+ case IrOpcode::kCall: {
+ CallOperator* call_op = reinterpret_cast<CallOperator*>(op);
+ CallDescriptor* descriptor = call_op->parameter();
+ return descriptor->CanLazilyDeoptimize();
+ }
+ case IrOpcode::kJSCallRuntime: {
+ Runtime::FunctionId function =
+ reinterpret_cast<Operator1<Runtime::FunctionId>*>(op)->parameter();
+ // TODO(jarin) At the moment, we only support lazy deoptimization for
+ // the %DeoptimizeFunction runtime function.
+ return function == Runtime::kDeoptimizeFunction;
+ }
+
+ // JS function calls
+ case IrOpcode::kJSCallFunction:
+ case IrOpcode::kJSCallConstruct:
+
+ // Binary operations
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSBitwiseAnd:
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical:
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSSubtract:
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed:
+ return true;
+
+ default:
+ return false;
+ }
+ return false;
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
new file mode 100644
index 000000000..cbc8ed9af
--- /dev/null
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+class OperatorProperties {
+ public:
+ static inline bool HasValueInput(Operator* node);
+ static inline bool HasContextInput(Operator* node);
+ static inline bool HasEffectInput(Operator* node);
+ static inline bool HasControlInput(Operator* node);
+
+ static inline int GetValueInputCount(Operator* op);
+ static inline int GetContextInputCount(Operator* op);
+ static inline int GetEffectInputCount(Operator* op);
+ static inline int GetControlInputCount(Operator* op);
+ static inline int GetTotalInputCount(Operator* op);
+
+ static inline bool HasValueOutput(Operator* op);
+ static inline bool HasEffectOutput(Operator* op);
+ static inline bool HasControlOutput(Operator* op);
+
+ static inline int GetValueOutputCount(Operator* op);
+ static inline int GetEffectOutputCount(Operator* op);
+ static inline int GetControlOutputCount(Operator* op);
+
+ static inline bool IsBasicBlockBegin(Operator* op);
+
+ static inline bool CanBeScheduled(Operator* op);
+ static inline bool HasFixedSchedulePosition(Operator* op);
+ static inline bool IsScheduleRoot(Operator* op);
+
+ static inline bool CanLazilyDeoptimize(Operator* op);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_OPERATOR_PROPERTIES_H_
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
new file mode 100644
index 000000000..4294d344f
--- /dev/null
+++ b/deps/v8/src/compiler/operator.h
@@ -0,0 +1,280 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_H_
+#define V8_COMPILER_OPERATOR_H_
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/ostreams.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An operator represents description of the "computation" of a node in the
+// compiler IR. A computation takes values (i.e. data) as input and produces
+// zero or more values as output. The side-effects of a computation must be
+// captured by additional control and data dependencies which are part of the
+// IR graph.
+// Operators are immutable and describe the statically-known parts of a
+// computation. Thus they can be safely shared by many different nodes in the
+// IR graph, or even globally between graphs. Operators can have "static
+// parameters" which are compile-time constant parameters to the operator, such
+// as the name for a named field access, the ID of a runtime function, etc.
+// Static parameters are private to the operator and only semantically
+// meaningful to the operator itself.
+class Operator : public ZoneObject {
+ public:
+ Operator(uint8_t opcode, uint16_t properties)
+ : opcode_(opcode), properties_(properties) {}
+ virtual ~Operator() {}
+
+ // Properties inform the operator-independent optimizer about legal
+ // transformations for nodes that have this operator.
+ enum Property {
+ kNoProperties = 0,
+ kReducible = 1 << 0, // Participates in strength reduction.
+ kCommutative = 1 << 1, // OP(a, b) == OP(b, a) for all inputs.
+ kAssociative = 1 << 2, // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+ kIdempotent = 1 << 3, // OP(a); OP(a) == OP(a).
+ kNoRead = 1 << 4, // Has no scheduling dependency on Effects
+ kNoWrite = 1 << 5, // Does not modify any Effects and thereby
+ // create new scheduling dependencies.
+ kNoThrow = 1 << 6, // Can never generate an exception.
+ kFoldable = kNoRead | kNoWrite,
+ kEliminatable = kNoWrite | kNoThrow,
+ kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+ };
+
+ // A small integer unique to all instances of a particular kind of operator,
+ // useful for quick matching for specific kinds of operators. For fast access
+ // the opcode is stored directly in the operator object.
+ inline uint8_t opcode() const { return opcode_; }
+
+ // Returns a constant string representing the mnemonic of the operator,
+ // without the static parameters. Useful for debugging.
+ virtual const char* mnemonic() = 0;
+
+ // Check if this operator equals another operator. Equivalent operators can
+ // be merged, and nodes with equivalent operators and equivalent inputs
+ // can be merged.
+ virtual bool Equals(Operator* other) = 0;
+
+ // Compute a hashcode to speed up equivalence-set checking.
+ // Equal operators should always have equal hashcodes, and unequal operators
+ // should have unequal hashcodes with high probability.
+ virtual int HashCode() = 0;
+
+ // Check whether this operator has the given property.
+ inline bool HasProperty(Property property) const {
+ return (properties_ & static_cast<int>(property)) == property;
+ }
+
+ // Number of data inputs to the operator, for verifying graph structure.
+ virtual int InputCount() = 0;
+
+ // Number of data outputs from the operator, for verifying graph structure.
+ virtual int OutputCount() = 0;
+
+ inline Property properties() { return static_cast<Property>(properties_); }
+
+ // TODO(titzer): API for input and output types, for typechecking graph.
+ private:
+ // Print the full operator into the given stream, including any
+ // static parameters. Useful for debugging and visualizing the IR.
+ virtual OStream& PrintTo(OStream& os) const = 0; // NOLINT
+ friend OStream& operator<<(OStream& os, const Operator& op);
+
+ uint8_t opcode_;
+ uint16_t properties_;
+};
+
+OStream& operator<<(OStream& os, const Operator& op);
+
+// An implementation of Operator that has no static parameters. Such operators
+// have just a name, an opcode, and a fixed number of inputs and outputs.
+// They can represented by singletons and shared globally.
+class SimpleOperator : public Operator {
+ public:
+ SimpleOperator(uint8_t opcode, uint16_t properties, int input_count,
+ int output_count, const char* mnemonic)
+ : Operator(opcode, properties),
+ input_count_(input_count),
+ output_count_(output_count),
+ mnemonic_(mnemonic) {}
+
+ virtual const char* mnemonic() { return mnemonic_; }
+ virtual bool Equals(Operator* that) { return opcode() == that->opcode(); }
+ virtual int HashCode() { return opcode(); }
+ virtual int InputCount() { return input_count_; }
+ virtual int OutputCount() { return output_count_; }
+
+ private:
+ virtual OStream& PrintTo(OStream& os) const { // NOLINT
+ return os << mnemonic_;
+ }
+
+ int input_count_;
+ int output_count_;
+ const char* mnemonic_;
+};
+
+// Template specialization implements a kind of type class for dealing with the
+// static parameters of Operator1 automatically.
+template <typename T>
+struct StaticParameterTraits {
+ static OStream& PrintTo(OStream& os, T val) { // NOLINT
+ return os << "??";
+ }
+ static int HashCode(T a) { return 0; }
+ static bool Equals(T a, T b) {
+ return false; // Not every T has a ==. By default, be conservative.
+ }
+};
+
+template <>
+struct StaticParameterTraits<ExternalReference> {
+ static OStream& PrintTo(OStream& os, ExternalReference val) { // NOLINT
+ os << val.address();
+ const Runtime::Function* function =
+ Runtime::FunctionForEntry(val.address());
+ if (function != NULL) {
+ os << " <" << function->name << ".entry>";
+ }
+ return os;
+ }
+ static int HashCode(ExternalReference a) {
+ return reinterpret_cast<intptr_t>(a.address()) & 0xFFFFFFFF;
+ }
+ static bool Equals(ExternalReference a, ExternalReference b) {
+ return a == b;
+ }
+};
+
+// Specialization for static parameters of type {int}.
+template <>
+struct StaticParameterTraits<int> {
+ static OStream& PrintTo(OStream& os, int val) { // NOLINT
+ return os << val;
+ }
+ static int HashCode(int a) { return a; }
+ static bool Equals(int a, int b) { return a == b; }
+};
+
+// Specialization for static parameters of type {double}.
+template <>
+struct StaticParameterTraits<double> {
+ static OStream& PrintTo(OStream& os, double val) { // NOLINT
+ return os << val;
+ }
+ static int HashCode(double a) {
+ return static_cast<int>(BitCast<int64_t>(a));
+ }
+ static bool Equals(double a, double b) {
+ return BitCast<int64_t>(a) == BitCast<int64_t>(b);
+ }
+};
+
+// Specialization for static parameters of type {PrintableUnique<Object>}.
+template <>
+struct StaticParameterTraits<PrintableUnique<Object> > {
+ static OStream& PrintTo(OStream& os, PrintableUnique<Object> val) { // NOLINT
+ return os << val.string();
+ }
+ static int HashCode(PrintableUnique<Object> a) {
+ return static_cast<int>(a.Hashcode());
+ }
+ static bool Equals(PrintableUnique<Object> a, PrintableUnique<Object> b) {
+ return a == b;
+ }
+};
+
+// Specialization for static parameters of type {PrintableUnique<Name>}.
+template <>
+struct StaticParameterTraits<PrintableUnique<Name> > {
+ static OStream& PrintTo(OStream& os, PrintableUnique<Name> val) { // NOLINT
+ return os << val.string();
+ }
+ static int HashCode(PrintableUnique<Name> a) {
+ return static_cast<int>(a.Hashcode());
+ }
+ static bool Equals(PrintableUnique<Name> a, PrintableUnique<Name> b) {
+ return a == b;
+ }
+};
+
+#if DEBUG
+// Specialization for static parameters of type {Handle<Object>} to prevent any
+// direct usage of Handles in constants.
+template <>
+struct StaticParameterTraits<Handle<Object> > {
+ static OStream& PrintTo(OStream& os, Handle<Object> val) { // NOLINT
+ UNREACHABLE(); // Should use PrintableUnique<Object> instead
+ return os;
+ }
+ static int HashCode(Handle<Object> a) {
+ UNREACHABLE(); // Should use PrintableUnique<Object> instead
+ return 0;
+ }
+ static bool Equals(Handle<Object> a, Handle<Object> b) {
+ UNREACHABLE(); // Should use PrintableUnique<Object> instead
+ return false;
+ }
+};
+#endif
+
+// A templatized implementation of Operator that has one static parameter of
+// type {T}. If a specialization of StaticParameterTraits<{T}> exists, then
+// operators of this kind can automatically be hashed, compared, and printed.
+template <typename T>
+class Operator1 : public Operator {
+ public:
+ Operator1(uint8_t opcode, uint16_t properties, int input_count,
+ int output_count, const char* mnemonic, T parameter)
+ : Operator(opcode, properties),
+ input_count_(input_count),
+ output_count_(output_count),
+ mnemonic_(mnemonic),
+ parameter_(parameter) {}
+
+ const T& parameter() const { return parameter_; }
+
+ virtual const char* mnemonic() { return mnemonic_; }
+ virtual bool Equals(Operator* other) {
+ if (opcode() != other->opcode()) return false;
+ Operator1<T>* that = static_cast<Operator1<T>*>(other);
+ T temp1 = this->parameter_;
+ T temp2 = that->parameter_;
+ return StaticParameterTraits<T>::Equals(temp1, temp2);
+ }
+ virtual int HashCode() {
+ return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_);
+ }
+ virtual int InputCount() { return input_count_; }
+ virtual int OutputCount() { return output_count_; }
+ virtual OStream& PrintParameter(OStream& os) const { // NOLINT
+ return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]";
+ }
+
+ private:
+ virtual OStream& PrintTo(OStream& os) const { // NOLINT
+ return PrintParameter(os << mnemonic_);
+ }
+
+ int input_count_;
+ int output_count_;
+ const char* mnemonic_;
+ T parameter_;
+};
+
+// Type definitions for operators with specific types of parameters.
+typedef Operator1<PrintableUnique<Name> > NameOperator;
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_OPERATOR_H_
diff --git a/deps/v8/src/compiler/phi-reducer.h b/deps/v8/src/compiler/phi-reducer.h
new file mode 100644
index 000000000..a9b145043
--- /dev/null
+++ b/deps/v8/src/compiler/phi-reducer.h
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PHI_REDUCER_H_
+#define V8_COMPILER_PHI_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Replaces redundant phis if all the inputs are the same or the phi itself.
+class PhiReducer V8_FINAL : public Reducer {
+ public:
+ virtual Reduction Reduce(Node* node) V8_OVERRIDE {
+ if (node->opcode() != IrOpcode::kPhi &&
+ node->opcode() != IrOpcode::kEffectPhi)
+ return NoChange();
+
+ int n = node->op()->InputCount();
+ if (n == 1) return Replace(node->InputAt(0));
+
+ Node* replacement = NULL;
+ Node::Inputs inputs = node->inputs();
+ for (InputIter it = inputs.begin(); n > 0; --n, ++it) {
+ Node* input = *it;
+ if (input != node && input != replacement) {
+ if (replacement != NULL) return NoChange();
+ replacement = input;
+ }
+ }
+ DCHECK_NE(node, replacement);
+ return Replace(replacement);
+ }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_PHI_REDUCER_H_
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
new file mode 100644
index 000000000..b0b3eb76e
--- /dev/null
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -0,0 +1,341 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/code-generator.h"
+#include "src/compiler/graph-replay.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/phi-reducer.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/hydrogen.h"
+#include "src/ostreams.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class PhaseStats {
+ public:
+ enum PhaseKind { CREATE_GRAPH, OPTIMIZATION, CODEGEN };
+
+ PhaseStats(CompilationInfo* info, PhaseKind kind, const char* name)
+ : info_(info),
+ kind_(kind),
+ name_(name),
+ size_(info->zone()->allocation_size()) {
+ if (FLAG_turbo_stats) {
+ timer_.Start();
+ }
+ }
+
+ ~PhaseStats() {
+ if (FLAG_turbo_stats) {
+ base::TimeDelta delta = timer_.Elapsed();
+ size_t bytes = info_->zone()->allocation_size() - size_;
+ HStatistics* stats = info_->isolate()->GetTStatistics();
+ stats->SaveTiming(name_, delta, static_cast<int>(bytes));
+
+ switch (kind_) {
+ case CREATE_GRAPH:
+ stats->IncrementCreateGraph(delta);
+ break;
+ case OPTIMIZATION:
+ stats->IncrementOptimizeGraph(delta);
+ break;
+ case CODEGEN:
+ stats->IncrementGenerateCode(delta);
+ break;
+ }
+ }
+ }
+
+ private:
+ CompilationInfo* info_;
+ PhaseKind kind_;
+ const char* name_;
+ size_t size_;
+ base::ElapsedTimer timer_;
+};
+
+
+void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
+ if (FLAG_trace_turbo) {
+ char buffer[256];
+ Vector<char> filename(buffer, sizeof(buffer));
+ SmartArrayPointer<char> functionname =
+ info_->shared_info()->DebugName()->ToCString();
+ if (strlen(functionname.get()) > 0) {
+ SNPrintF(filename, "turbo-%s-%s.dot", functionname.get(), phase);
+ } else {
+ SNPrintF(filename, "turbo-%p-%s.dot", static_cast<void*>(info_), phase);
+ }
+ std::replace(filename.start(), filename.start() + filename.length(), ' ',
+ '_');
+ FILE* file = base::OS::FOpen(filename.start(), "w+");
+ OFStream of(file);
+ of << AsDOT(*graph);
+ fclose(file);
+
+ OFStream os(stdout);
+ os << "-- " << phase << " graph printed to file " << filename.start()
+ << "\n";
+ }
+ if (VerifyGraphs()) Verifier::Run(graph);
+}
+
+
+class AstGraphBuilderWithPositions : public AstGraphBuilder {
+ public:
+ explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph,
+ SourcePositionTable* source_positions)
+ : AstGraphBuilder(info, jsgraph), source_positions_(source_positions) {}
+
+ bool CreateGraph() {
+ SourcePositionTable::Scope pos(source_positions_,
+ SourcePosition::Unknown());
+ return AstGraphBuilder::CreateGraph();
+ }
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ SourcePositionTable::Scope pos(source_positions_, \
+ SourcePosition(node->position())); \
+ AstGraphBuilder::Visit##type(node); \
+ }
+ AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+ SourcePositionTable* source_positions_;
+};
+
+
+static void TraceSchedule(Schedule* schedule) {
+ if (!FLAG_trace_turbo) return;
+ OFStream os(stdout);
+ os << "-- Schedule --------------------------------------\n" << *schedule;
+}
+
+
+Handle<Code> Pipeline::GenerateCode() {
+ if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_);
+
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method "
+ << info()->function()->debug_name()->ToCString().get()
+ << " using Turbofan" << endl;
+ }
+
+ // Build the graph.
+ Graph graph(zone());
+ SourcePositionTable source_positions(&graph);
+ source_positions.AddDecorator();
+ // TODO(turbofan): there is no need to type anything during initial graph
+ // construction. This is currently only needed for the node cache, which the
+ // typer could sweep over later.
+ Typer typer(zone());
+ CommonOperatorBuilder common(zone());
+ JSGraph jsgraph(&graph, &common, &typer);
+ Node* context_node;
+ {
+ PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH,
+ "graph builder");
+ AstGraphBuilderWithPositions graph_builder(info(), &jsgraph,
+ &source_positions);
+ graph_builder.CreateGraph();
+ context_node = graph_builder.GetFunctionContext();
+ }
+ {
+ PhaseStats phi_reducer_stats(info(), PhaseStats::CREATE_GRAPH,
+ "phi reduction");
+ PhiReducer phi_reducer;
+ GraphReducer graph_reducer(&graph);
+ graph_reducer.AddReducer(&phi_reducer);
+ graph_reducer.ReduceGraph();
+ // TODO(mstarzinger): Running reducer once ought to be enough for everyone.
+ graph_reducer.ReduceGraph();
+ graph_reducer.ReduceGraph();
+ }
+
+ VerifyAndPrintGraph(&graph, "Initial untyped");
+
+ if (FLAG_context_specialization) {
+ SourcePositionTable::Scope pos_(&source_positions,
+ SourcePosition::Unknown());
+ // Specialize the code to the context as aggressively as possible.
+ JSContextSpecializer spec(info(), &jsgraph, context_node);
+ spec.SpecializeToContext();
+ VerifyAndPrintGraph(&graph, "Context specialized");
+ }
+
+ // Print a replay of the initial graph.
+ if (FLAG_print_turbo_replay) {
+ GraphReplayPrinter::PrintReplay(&graph);
+ }
+
+ if (FLAG_turbo_types) {
+ {
+ // Type the graph.
+ PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer");
+ typer.Run(&graph, info()->context());
+ }
+ // All new nodes must be typed.
+ typer.DecorateGraph(&graph);
+ {
+ // Lower JSOperators where we can determine types.
+ PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+ "typed lowering");
+ JSTypedLowering lowering(&jsgraph, &source_positions);
+ lowering.LowerAllNodes();
+
+ VerifyAndPrintGraph(&graph, "Lowered typed");
+ }
+ }
+
+ Handle<Code> code = Handle<Code>::null();
+ if (SupportedTarget()) {
+ {
+ // Lower any remaining generic JSOperators.
+ PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+ "generic lowering");
+ MachineOperatorBuilder machine(zone());
+ JSGenericLowering lowering(info(), &jsgraph, &machine, &source_positions);
+ lowering.LowerAllNodes();
+
+ VerifyAndPrintGraph(&graph, "Lowered generic");
+ }
+
+ // Compute a schedule.
+ Schedule* schedule = ComputeSchedule(&graph);
+ TraceSchedule(schedule);
+
+ {
+ // Generate optimized code.
+ PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
+ Linkage linkage(info());
+ code = GenerateCode(&linkage, &graph, schedule, &source_positions);
+ info()->SetCode(code);
+ }
+
+ // Print optimized code.
+ v8::internal::CodeGenerator::PrintCode(code, info());
+ }
+
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ os << "--------------------------------------------------\n"
+ << "Finished compiling method "
+ << info()->function()->debug_name()->ToCString().get()
+ << " using Turbofan" << endl;
+ }
+
+ return code;
+}
+
+
+Schedule* Pipeline::ComputeSchedule(Graph* graph) {
+ PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling");
+ return Scheduler::ComputeSchedule(graph);
+}
+
+
+Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
+ Graph* graph,
+ Schedule* schedule) {
+ CHECK(SupportedBackend());
+ if (schedule == NULL) {
+ VerifyAndPrintGraph(graph, "Machine");
+ schedule = ComputeSchedule(graph);
+ }
+ TraceSchedule(schedule);
+
+ SourcePositionTable source_positions(graph);
+ Handle<Code> code = GenerateCode(linkage, graph, schedule, &source_positions);
+#if ENABLE_DISASSEMBLER
+ if (!code.is_null() && FLAG_print_opt_code) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble("test code", os);
+ }
+#endif
+ return code;
+}
+
+
+Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
+ Schedule* schedule,
+ SourcePositionTable* source_positions) {
+ DCHECK_NOT_NULL(graph);
+ DCHECK_NOT_NULL(linkage);
+ DCHECK_NOT_NULL(schedule);
+ CHECK(SupportedBackend());
+
+ InstructionSequence sequence(linkage, graph, schedule);
+
+ // Select and schedule instructions covering the scheduled graph.
+ {
+ InstructionSelector selector(&sequence, source_positions);
+ selector.SelectInstructions();
+ }
+
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ os << "----- Instruction sequence before register allocation -----\n"
+ << sequence;
+ }
+
+ // Allocate registers.
+ {
+ int node_count = graph->NodeCount();
+ if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
+ linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
+ return Handle<Code>::null();
+ }
+ RegisterAllocator allocator(&sequence);
+ if (!allocator.Allocate()) {
+ linkage->info()->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
+ return Handle<Code>::null();
+ }
+ }
+
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ os << "----- Instruction sequence after register allocation -----\n"
+ << sequence;
+ }
+
+ // Generate native sequence.
+ CodeGenerator generator(&sequence);
+ return generator.GenerateCode();
+}
+
+
+void Pipeline::SetUp() {
+ InstructionOperand::SetUpCaches();
+}
+
+
+void Pipeline::TearDown() {
+ InstructionOperand::TearDownCaches();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
new file mode 100644
index 000000000..4c1c0bcea
--- /dev/null
+++ b/deps/v8/src/compiler/pipeline.h
@@ -0,0 +1,68 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PIPELINE_H_
+#define V8_COMPILER_PIPELINE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+
+// Note: TODO(turbofan) implies a performance improvement opportunity,
+// and TODO(name) implies an incomplete implementation
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+class CallDescriptor;
+class Graph;
+class Schedule;
+class SourcePositionTable;
+class Linkage;
+
+class Pipeline {
+ public:
+ explicit Pipeline(CompilationInfo* info) : info_(info) {}
+
+ // Run the entire pipeline and generate a handle to a code object.
+ Handle<Code> GenerateCode();
+
+ // Run the pipeline on a machine graph and generate code. If {schedule}
+ // is {NULL}, then compute a new schedule for code generation.
+ Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
+ Schedule* schedule = NULL);
+
+ CompilationInfo* info() const { return info_; }
+ Zone* zone() { return info_->zone(); }
+ Isolate* isolate() { return info_->isolate(); }
+
+ static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
+ static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
+
+ static inline bool VerifyGraphs() {
+#ifdef DEBUG
+ return true;
+#else
+ return FLAG_turbo_verify;
+#endif
+ }
+
+ static void SetUp();
+ static void TearDown();
+
+ private:
+ CompilationInfo* info_;
+
+ Schedule* ComputeSchedule(Graph* graph);
+ void VerifyAndPrintGraph(Graph* graph, const char* phase);
+ Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
+ SourcePositionTable* source_positions);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_PIPELINE_H_
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
new file mode 100644
index 000000000..afbd268dc
--- /dev/null
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -0,0 +1,158 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RawMachineAssembler::RawMachineAssembler(
+ Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+ MachineType word)
+ : GraphBuilder(graph),
+ schedule_(new (zone()) Schedule(zone())),
+ machine_(zone(), word),
+ common_(zone()),
+ call_descriptor_builder_(call_descriptor_builder),
+ parameters_(NULL),
+ exit_label_(schedule()->exit()),
+ current_block_(schedule()->entry()) {
+ Node* s = graph->NewNode(common_.Start(parameter_count()));
+ graph->SetStart(s);
+ if (parameter_count() == 0) return;
+ parameters_ = zone()->NewArray<Node*>(parameter_count());
+ for (int i = 0; i < parameter_count(); ++i) {
+ parameters_[i] = NewNode(common()->Parameter(i), graph->start());
+ }
+}
+
+
+Schedule* RawMachineAssembler::Export() {
+ // Compute the correct codegen order.
+ DCHECK(schedule_->rpo_order()->empty());
+ Scheduler::ComputeSpecialRPO(schedule_);
+ // Invalidate MachineAssembler.
+ Schedule* schedule = schedule_;
+ schedule_ = NULL;
+ return schedule;
+}
+
+
+Node* RawMachineAssembler::Parameter(int index) {
+ DCHECK(0 <= index && index < parameter_count());
+ return parameters_[index];
+}
+
+
+RawMachineAssembler::Label* RawMachineAssembler::Exit() {
+ exit_label_.used_ = true;
+ return &exit_label_;
+}
+
+
+void RawMachineAssembler::Goto(Label* label) {
+ DCHECK(current_block_ != schedule()->exit());
+ schedule()->AddGoto(CurrentBlock(), Use(label));
+ current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Branch(Node* condition, Label* true_val,
+ Label* false_val) {
+ DCHECK(current_block_ != schedule()->exit());
+ Node* branch = NewNode(common()->Branch(), condition);
+ schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
+ current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Return(Node* value) {
+ schedule()->AddReturn(CurrentBlock(), value);
+ current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Deoptimize(Node* state) {
+ Node* deopt = graph()->NewNode(common()->Deoptimize(), state);
+ schedule()->AddDeoptimize(CurrentBlock(), deopt);
+ current_block_ = NULL;
+}
+
+
+Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
+ Label* continuation, Label* deoptimization) {
+ CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone());
+ Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver);
+ schedule()->AddCall(CurrentBlock(), call, Use(continuation),
+ Use(deoptimization));
+ current_block_ = NULL;
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
+ Node* arg0, Label* continuation,
+ Label* deoptimization) {
+ CallDescriptor* descriptor =
+ Linkage::GetRuntimeCallDescriptor(function, 1, Operator::kNoProperties,
+ CallDescriptor::kCanDeoptimize, zone());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* ref = NewNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(1);
+ Node* context = Parameter(1);
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
+ arity, context);
+ schedule()->AddCall(CurrentBlock(), call, Use(continuation),
+ Use(deoptimization));
+ current_block_ = NULL;
+ return call;
+}
+
+
+void RawMachineAssembler::Bind(Label* label) {
+ DCHECK(current_block_ == NULL);
+ DCHECK(!label->bound_);
+ label->bound_ = true;
+ current_block_ = EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::Use(Label* label) {
+ label->used_ = true;
+ return EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
+ if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock();
+ return label->block_;
+}
+
+
+BasicBlock* RawMachineAssembler::CurrentBlock() {
+ DCHECK(current_block_);
+ return current_block_;
+}
+
+
+Node* RawMachineAssembler::MakeNode(Operator* op, int input_count,
+ Node** inputs) {
+ DCHECK(ScheduleValid());
+ DCHECK(current_block_ != NULL);
+ Node* node = graph()->NewNode(op, input_count, inputs);
+ BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
+ : CurrentBlock();
+ schedule()->AddNode(block, node);
+ return node;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
new file mode 100644
index 000000000..6839ade4f
--- /dev/null
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -0,0 +1,129 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Schedule;
+
+
+class RawMachineAssembler : public GraphBuilder,
+ public MachineNodeFactory<RawMachineAssembler> {
+ public:
+ class Label {
+ public:
+ Label() : block_(NULL), used_(false), bound_(false) {}
+ ~Label() { DCHECK(bound_ || !used_); }
+
+ BasicBlock* block() { return block_; }
+
+ private:
+ // Private constructor for exit label.
+ explicit Label(BasicBlock* block)
+ : block_(block), used_(false), bound_(false) {}
+
+ BasicBlock* block_;
+ bool used_;
+ bool bound_;
+ friend class RawMachineAssembler;
+ DISALLOW_COPY_AND_ASSIGN(Label);
+ };
+
+ RawMachineAssembler(Graph* graph,
+ MachineCallDescriptorBuilder* call_descriptor_builder,
+ MachineType word = MachineOperatorBuilder::pointer_rep());
+ virtual ~RawMachineAssembler() {}
+
+ Isolate* isolate() const { return zone()->isolate(); }
+ Zone* zone() const { return graph()->zone(); }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ CallDescriptor* call_descriptor() const {
+ return call_descriptor_builder_->BuildCallDescriptor(zone());
+ }
+ int parameter_count() const {
+ return call_descriptor_builder_->parameter_count();
+ }
+ const MachineType* parameter_types() const {
+ return call_descriptor_builder_->parameter_types();
+ }
+
+ // Parameters.
+ Node* Parameter(int index);
+
+ // Control flow.
+ Label* Exit();
+ void Goto(Label* label);
+ void Branch(Node* condition, Label* true_val, Label* false_val);
+ // Call to a JS function with zero parameters.
+ Node* CallJS0(Node* function, Node* receiver, Label* continuation,
+ Label* deoptimization);
+ // Call to a runtime function with zero parameters.
+ Node* CallRuntime1(Runtime::FunctionId function, Node* arg0,
+ Label* continuation, Label* deoptimization);
+ void Return(Node* value);
+ void Bind(Label* label);
+ void Deoptimize(Node* state);
+
+ // Variables.
+ Node* Phi(Node* n1, Node* n2) { return NewNode(common()->Phi(2), n1, n2); }
+ Node* Phi(Node* n1, Node* n2, Node* n3) {
+ return NewNode(common()->Phi(3), n1, n2, n3);
+ }
+ Node* Phi(Node* n1, Node* n2, Node* n3, Node* n4) {
+ return NewNode(common()->Phi(4), n1, n2, n3, n4);
+ }
+
+ // MachineAssembler is invalid after export.
+ Schedule* Export();
+
+ protected:
+ virtual Node* MakeNode(Operator* op, int input_count, Node** inputs);
+
+ Schedule* schedule() {
+ DCHECK(ScheduleValid());
+ return schedule_;
+ }
+
+ private:
+ bool ScheduleValid() { return schedule_ != NULL; }
+
+ BasicBlock* Use(Label* label);
+ BasicBlock* EnsureBlock(Label* label);
+ BasicBlock* CurrentBlock();
+
+ typedef std::vector<MachineType, zone_allocator<MachineType> >
+ RepresentationVector;
+
+ Schedule* schedule_;
+ MachineOperatorBuilder machine_;
+ CommonOperatorBuilder common_;
+ MachineCallDescriptorBuilder* call_descriptor_builder_;
+ Node** parameters_;
+ Label exit_label_;
+ BasicBlock* current_block_;
+
+ DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
new file mode 100644
index 000000000..972a90450
--- /dev/null
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -0,0 +1,2232 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/register-allocator.h"
+
+#include "src/compiler/linkage.h"
+#include "src/hydrogen.h"
+#include "src/string-stream.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+ return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+ return a.Value() > b.Value() ? a : b;
+}
+
+
+UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
+ InstructionOperand* hint)
+ : operand_(operand),
+ hint_(hint),
+ pos_(pos),
+ next_(NULL),
+ requires_reg_(false),
+ register_beneficial_(true) {
+ if (operand_ != NULL && operand_->IsUnallocated()) {
+ const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
+ requires_reg_ = unalloc->HasRegisterPolicy();
+ register_beneficial_ = !unalloc->HasAnyPolicy();
+ }
+ DCHECK(pos_.IsValid());
+}
+
+
+bool UsePosition::HasHint() const {
+ return hint_ != NULL && !hint_->IsUnallocated();
+}
+
+
+bool UsePosition::RequiresRegister() const { return requires_reg_; }
+
+
+bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
+
+
+void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+ DCHECK(Contains(pos) && pos.Value() != start().Value());
+ UseInterval* after = new (zone) UseInterval(pos, end_);
+ after->next_ = next_;
+ next_ = after;
+ end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+ UsePosition* cur = first_pos_;
+ while (cur != NULL) {
+ DCHECK(Start().Value() <= cur->pos().Value() &&
+ cur->pos().Value() <= End().Value());
+ cur = cur->next();
+ }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+ UseInterval* current_interval = first_interval_;
+ while (current_interval != NULL) {
+ // Intervals overlap if the start of one is contained in the other.
+ if (current_interval->Contains(target->start()) ||
+ target->Contains(current_interval->start())) {
+ return true;
+ }
+ current_interval = current_interval->next();
+ }
+ return false;
+}
+
+
+#endif
+
+
+LiveRange::LiveRange(int id, Zone* zone)
+ : id_(id),
+ spilled_(false),
+ is_phi_(false),
+ is_non_loop_phi_(false),
+ kind_(UNALLOCATED_REGISTERS),
+ assigned_register_(kInvalidAssignment),
+ last_interval_(NULL),
+ first_interval_(NULL),
+ first_pos_(NULL),
+ parent_(NULL),
+ next_(NULL),
+ current_interval_(NULL),
+ last_processed_use_(NULL),
+ current_hint_operand_(NULL),
+ spill_operand_(new (zone) InstructionOperand()),
+ spill_start_index_(kMaxInt) {}
+
+
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
+ DCHECK(!HasRegisterAssigned() && !IsSpilled());
+ assigned_register_ = reg;
+ ConvertOperands(zone);
+}
+
+
+void LiveRange::MakeSpilled(Zone* zone) {
+ DCHECK(!IsSpilled());
+ DCHECK(TopLevel()->HasAllocatedSpillOperand());
+ spilled_ = true;
+ assigned_register_ = kInvalidAssignment;
+ ConvertOperands(zone);
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+ DCHECK(spill_operand_ != NULL);
+ return !spill_operand_->IsIgnored();
+}
+
+
+void LiveRange::SetSpillOperand(InstructionOperand* operand) {
+ DCHECK(!operand->IsUnallocated());
+ DCHECK(spill_operand_ != NULL);
+ DCHECK(spill_operand_->IsIgnored());
+ spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+ UsePosition* use_pos = last_processed_use_;
+ if (use_pos == NULL) use_pos = first_pos();
+ while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+ use_pos = use_pos->next();
+ }
+ last_processed_use_ = use_pos;
+ return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+ LifetimePosition start) {
+ UsePosition* pos = NextUsePosition(start);
+ while (pos != NULL && !pos->RegisterIsBeneficial()) {
+ pos = pos->next();
+ }
+ return pos;
+}
+
+
+UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
+ LifetimePosition start) {
+ UsePosition* pos = first_pos();
+ UsePosition* prev = NULL;
+ while (pos != NULL && pos->pos().Value() < start.Value()) {
+ if (pos->RegisterIsBeneficial()) prev = pos;
+ pos = pos->next();
+ }
+ return prev;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+ UsePosition* pos = NextUsePosition(start);
+ while (pos != NULL && !pos->RequiresRegister()) {
+ pos = pos->next();
+ }
+ return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+ // We cannot spill a live range that has a use requiring a register
+ // at the current or the immediate next position.
+ UsePosition* use_pos = NextRegisterPosition(pos);
+ if (use_pos == NULL) return true;
+ return use_pos->pos().Value() >
+ pos.NextInstruction().InstructionEnd().Value();
+}
+
+
+InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
+ InstructionOperand* op = NULL;
+ if (HasRegisterAssigned()) {
+ DCHECK(!IsSpilled());
+ switch (Kind()) {
+ case GENERAL_REGISTERS:
+ op = RegisterOperand::Create(assigned_register(), zone);
+ break;
+ case DOUBLE_REGISTERS:
+ op = DoubleRegisterOperand::Create(assigned_register(), zone);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (IsSpilled()) {
+ DCHECK(!HasRegisterAssigned());
+ op = TopLevel()->GetSpillOperand();
+ DCHECK(!op->IsUnallocated());
+ } else {
+ UnallocatedOperand* unalloc =
+ new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
+ unalloc->set_virtual_register(id_);
+ op = unalloc;
+ }
+ return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+ LifetimePosition position) const {
+ if (current_interval_ == NULL) return first_interval_;
+ if (current_interval_->start().Value() > position.Value()) {
+ current_interval_ = NULL;
+ return first_interval_;
+ }
+ return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+ UseInterval* to_start_of, LifetimePosition but_not_past) const {
+ if (to_start_of == NULL) return;
+ if (to_start_of->start().Value() > but_not_past.Value()) return;
+ LifetimePosition start = current_interval_ == NULL
+ ? LifetimePosition::Invalid()
+ : current_interval_->start();
+ if (to_start_of->start().Value() > start.Value()) {
+ current_interval_ = to_start_of;
+ }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
+ Zone* zone) {
+ DCHECK(Start().Value() < position.Value());
+ DCHECK(result->IsEmpty());
+ // Find the last interval that ends before the position. If the
+ // position is contained in one of the intervals in the chain, we
+ // split that interval and use the first part.
+ UseInterval* current = FirstSearchIntervalForPosition(position);
+
+ // If the split position coincides with the beginning of a use interval
+ // we need to split use positons in a special way.
+ bool split_at_start = false;
+
+ if (current->start().Value() == position.Value()) {
+ // When splitting at start we need to locate the previous use interval.
+ current = first_interval_;
+ }
+
+ while (current != NULL) {
+ if (current->Contains(position)) {
+ current->SplitAt(position, zone);
+ break;
+ }
+ UseInterval* next = current->next();
+ if (next->start().Value() >= position.Value()) {
+ split_at_start = (next->start().Value() == position.Value());
+ break;
+ }
+ current = next;
+ }
+
+ // Partition original use intervals to the two live ranges.
+ UseInterval* before = current;
+ UseInterval* after = before->next();
+ result->last_interval_ =
+ (last_interval_ == before)
+ ? after // Only interval in the range after split.
+ : last_interval_; // Last interval of the original range.
+ result->first_interval_ = after;
+ last_interval_ = before;
+
+ // Find the last use position before the split and the first use
+ // position after it.
+ UsePosition* use_after = first_pos_;
+ UsePosition* use_before = NULL;
+ if (split_at_start) {
+ // The split position coincides with the beginning of a use interval (the
+ // end of a lifetime hole). Use at this position should be attributed to
+ // the split child because split child owns use interval covering it.
+ while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+ use_before = use_after;
+ use_after = use_after->next();
+ }
+ } else {
+ while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+ use_before = use_after;
+ use_after = use_after->next();
+ }
+ }
+
+ // Partition original use positions to the two live ranges.
+ if (use_before != NULL) {
+ use_before->next_ = NULL;
+ } else {
+ first_pos_ = NULL;
+ }
+ result->first_pos_ = use_after;
+
+ // Discard cached iteration state. It might be pointing
+ // to the use that no longer belongs to this live range.
+ last_processed_use_ = NULL;
+ current_interval_ = NULL;
+
+ // Link the new live range in the chain before any of the other
+ // ranges linked from the range before the split.
+ result->parent_ = (parent_ == NULL) ? this : parent_;
+ result->kind_ = result->parent_->kind_;
+ result->next_ = next_;
+ next_ = result;
+
+#ifdef DEBUG
+ Verify();
+ result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions. This is needed for the correctness of the register
+// allocation algorithm. If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used. This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+ LifetimePosition start = Start();
+ LifetimePosition other_start = other->Start();
+ if (start.Value() == other_start.Value()) {
+ UsePosition* pos = first_pos();
+ if (pos == NULL) return false;
+ UsePosition* other_pos = other->first_pos();
+ if (other_pos == NULL) return true;
+ return pos->pos().Value() < other_pos->pos().Value();
+ }
+ return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+ RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_,
+ start.Value());
+ DCHECK(first_interval_ != NULL);
+ DCHECK(first_interval_->start().Value() <= start.Value());
+ DCHECK(start.Value() < first_interval_->end().Value());
+ first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
+ Zone* zone) {
+ RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+ id_, start.Value(), end.Value());
+ LifetimePosition new_end = end;
+ while (first_interval_ != NULL &&
+ first_interval_->start().Value() <= end.Value()) {
+ if (first_interval_->end().Value() > end.Value()) {
+ new_end = first_interval_->end();
+ }
+ first_interval_ = first_interval_->next();
+ }
+
+ UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+ new_interval->next_ = first_interval_;
+ first_interval_ = new_interval;
+ if (new_interval->next() == NULL) {
+ last_interval_ = new_interval;
+ }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
+ Zone* zone) {
+ RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_,
+ start.Value(), end.Value());
+ if (first_interval_ == NULL) {
+ UseInterval* interval = new (zone) UseInterval(start, end);
+ first_interval_ = interval;
+ last_interval_ = interval;
+ } else {
+ if (end.Value() == first_interval_->start().Value()) {
+ first_interval_->set_start(start);
+ } else if (end.Value() < first_interval_->start().Value()) {
+ UseInterval* interval = new (zone) UseInterval(start, end);
+ interval->set_next(first_interval_);
+ first_interval_ = interval;
+ } else {
+ // Order of instruction's processing (see ProcessInstructions) guarantees
+ // that each new use interval either precedes or intersects with
+ // last added interval.
+ DCHECK(start.Value() < first_interval_->end().Value());
+ first_interval_->start_ = Min(start, first_interval_->start_);
+ first_interval_->end_ = Max(end, first_interval_->end_);
+ }
+ }
+}
+
+
+void LiveRange::AddUsePosition(LifetimePosition pos,
+ InstructionOperand* operand,
+ InstructionOperand* hint, Zone* zone) {
+ RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_,
+ pos.Value());
+ UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
+ UsePosition* prev_hint = NULL;
+ UsePosition* prev = NULL;
+ UsePosition* current = first_pos_;
+ while (current != NULL && current->pos().Value() < pos.Value()) {
+ prev_hint = current->HasHint() ? current : prev_hint;
+ prev = current;
+ current = current->next();
+ }
+
+ if (prev == NULL) {
+ use_pos->set_next(first_pos_);
+ first_pos_ = use_pos;
+ } else {
+ use_pos->next_ = prev->next_;
+ prev->next_ = use_pos;
+ }
+
+ if (prev_hint == NULL && use_pos->HasHint()) {
+ current_hint_operand_ = hint;
+ }
+}
+
+
+void LiveRange::ConvertOperands(Zone* zone) {
+ InstructionOperand* op = CreateAssignedOperand(zone);
+ UsePosition* use_pos = first_pos();
+ while (use_pos != NULL) {
+ DCHECK(Start().Value() <= use_pos->pos().Value() &&
+ use_pos->pos().Value() <= End().Value());
+
+ if (use_pos->HasOperand()) {
+ DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
+ !use_pos->RequiresRegister());
+ use_pos->operand()->ConvertTo(op->kind(), op->index());
+ }
+ use_pos = use_pos->next();
+ }
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+ if (IsEmpty()) return false;
+ return Start().Value() <= position.Value() &&
+ position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+ if (!CanCover(position)) return false;
+ UseInterval* start_search = FirstSearchIntervalForPosition(position);
+ for (UseInterval* interval = start_search; interval != NULL;
+ interval = interval->next()) {
+ DCHECK(interval->next() == NULL ||
+ interval->next()->start().Value() >= interval->start().Value());
+ AdvanceLastProcessedMarker(interval, position);
+ if (interval->Contains(position)) return true;
+ if (interval->start().Value() > position.Value()) return false;
+ }
+ return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+ UseInterval* b = other->first_interval();
+ if (b == NULL) return LifetimePosition::Invalid();
+ LifetimePosition advance_last_processed_up_to = b->start();
+ UseInterval* a = FirstSearchIntervalForPosition(b->start());
+ while (a != NULL && b != NULL) {
+ if (a->start().Value() > other->End().Value()) break;
+ if (b->start().Value() > End().Value()) break;
+ LifetimePosition cur_intersection = a->Intersect(b);
+ if (cur_intersection.IsValid()) {
+ return cur_intersection;
+ }
+ if (a->start().Value() < b->start().Value()) {
+ a = a->next();
+ if (a == NULL || a->start().Value() > other->End().Value()) break;
+ AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+ } else {
+ b = b->next();
+ }
+ }
+ return LifetimePosition::Invalid();
+}
+
+
+RegisterAllocator::RegisterAllocator(InstructionSequence* code)
+ : zone_(code->isolate()),
+ code_(code),
+ live_in_sets_(code->BasicBlockCount(), zone()),
+ live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+ fixed_live_ranges_(NULL),
+ fixed_double_live_ranges_(NULL),
+ unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+ active_live_ranges_(8, zone()),
+ inactive_live_ranges_(8, zone()),
+ reusable_slots_(8, zone()),
+ mode_(UNALLOCATED_REGISTERS),
+ num_registers_(-1),
+ allocation_ok_(true) {}
+
+
+void RegisterAllocator::InitializeLivenessAnalysis() {
+ // Initialize the live_in sets for each block to NULL.
+ int block_count = code()->BasicBlockCount();
+ live_in_sets_.Initialize(block_count, zone());
+ live_in_sets_.AddBlock(NULL, block_count, zone());
+}
+
+
+BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
+ // Compute live out for the given block, except not including backward
+ // successor edges.
+ BitVector* live_out =
+ new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
+
+ // Process all successor blocks.
+ BasicBlock::Successors successors = block->successors();
+ for (BasicBlock::Successors::iterator i = successors.begin();
+ i != successors.end(); ++i) {
+ // Add values live on entry to the successor. Note the successor's
+ // live_in will not be computed yet for backwards edges.
+ BasicBlock* successor = *i;
+ BitVector* live_in = live_in_sets_[successor->rpo_number_];
+ if (live_in != NULL) live_out->Union(*live_in);
+
+ // All phi input operands corresponding to this successor edge are live
+ // out from this block.
+ int index = successor->PredecessorIndexOf(block);
+ DCHECK(index >= 0);
+ DCHECK(index < static_cast<int>(successor->PredecessorCount()));
+ for (BasicBlock::const_iterator j = successor->begin();
+ j != successor->end(); ++j) {
+ Node* phi = *j;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+ Node* input = phi->InputAt(index);
+ live_out->Add(input->id());
+ }
+ }
+
+ return live_out;
+}
+
+
+void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
+ BitVector* live_out) {
+ // Add an interval that includes the entire block to the live range for
+ // each live_out value.
+ LifetimePosition start =
+ LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+ LifetimePosition end = LifetimePosition::FromInstructionIndex(
+ block->last_instruction_index()).NextInstruction();
+ BitVector::Iterator iterator(live_out);
+ while (!iterator.Done()) {
+ int operand_index = iterator.Current();
+ LiveRange* range = LiveRangeFor(operand_index);
+ range->AddUseInterval(start, end, zone());
+ iterator.Advance();
+ }
+}
+
+
+int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
+ return -index - 1 - Register::kMaxNumAllocatableRegisters;
+}
+
+
+InstructionOperand* RegisterAllocator::AllocateFixed(
+ UnallocatedOperand* operand, int pos, bool is_tagged) {
+ TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+ DCHECK(operand->HasFixedPolicy());
+ if (operand->HasFixedSlotPolicy()) {
+ operand->ConvertTo(InstructionOperand::STACK_SLOT,
+ operand->fixed_slot_index());
+ } else if (operand->HasFixedRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
+ operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
+ } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
+ operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
+ } else {
+ UNREACHABLE();
+ }
+ if (is_tagged) {
+ TraceAlloc("Fixed reg is tagged at %d\n", pos);
+ Instruction* instr = InstructionAt(pos);
+ if (instr->HasPointerMap()) {
+ instr->pointer_map()->RecordPointer(operand, code_zone());
+ }
+ }
+ return operand;
+}
+
+
+LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
+ DCHECK(index < Register::kMaxNumAllocatableRegisters);
+ LiveRange* result = fixed_live_ranges_[index];
+ if (result == NULL) {
+ // TODO(titzer): add a utility method to allocate a new LiveRange:
+ // The LiveRange object itself can go in this zone, but the
+ // InstructionOperand needs
+ // to go in the code zone, since it may survive register allocation.
+ result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
+ DCHECK(result->IsFixed());
+ result->kind_ = GENERAL_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
+ fixed_live_ranges_[index] = result;
+ }
+ return result;
+}
+
+
+LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
+ DCHECK(index < DoubleRegister::NumAllocatableRegisters());
+ LiveRange* result = fixed_double_live_ranges_[index];
+ if (result == NULL) {
+ result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
+ DCHECK(result->IsFixed());
+ result->kind_ = DOUBLE_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
+ fixed_double_live_ranges_[index] = result;
+ }
+ return result;
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(int index) {
+ if (index >= live_ranges_.length()) {
+ live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
+ }
+ LiveRange* result = live_ranges_[index];
+ if (result == NULL) {
+ result = new (zone()) LiveRange(index, code_zone());
+ live_ranges_[index] = result;
+ }
+ return result;
+}
+
+
+GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
+ int last_instruction = block->last_instruction_index();
+ return code()->GapAt(last_instruction - 1);
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
+ if (operand->IsUnallocated()) {
+ return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
+ } else if (operand->IsRegister()) {
+ return FixedLiveRangeFor(operand->index());
+ } else if (operand->IsDoubleRegister()) {
+ return FixedDoubleLiveRangeFor(operand->index());
+ } else {
+ return NULL;
+ }
+}
+
+
+void RegisterAllocator::Define(LifetimePosition position,
+ InstructionOperand* operand,
+ InstructionOperand* hint) {
+ LiveRange* range = LiveRangeFor(operand);
+ if (range == NULL) return;
+
+ if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+ // Can happen if there is a definition without use.
+ range->AddUseInterval(position, position.NextInstruction(), zone());
+ range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
+ } else {
+ range->ShortenTo(position);
+ }
+
+ if (operand->IsUnallocated()) {
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ range->AddUsePosition(position, unalloc_operand, hint, zone());
+ }
+}
+
+
+void RegisterAllocator::Use(LifetimePosition block_start,
+ LifetimePosition position,
+ InstructionOperand* operand,
+ InstructionOperand* hint) {
+ LiveRange* range = LiveRangeFor(operand);
+ if (range == NULL) return;
+ if (operand->IsUnallocated()) {
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ range->AddUsePosition(position, unalloc_operand, hint, zone());
+ }
+ range->AddUseInterval(block_start, position, zone());
+}
+
+
+void RegisterAllocator::AddConstraintsGapMove(int index,
+ InstructionOperand* from,
+ InstructionOperand* to) {
+ GapInstruction* gap = code()->GapAt(index);
+ ParallelMove* move =
+ gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+ if (from->IsUnallocated()) {
+ const ZoneList<MoveOperands>* move_operands = move->move_operands();
+ for (int i = 0; i < move_operands->length(); ++i) {
+ MoveOperands cur = move_operands->at(i);
+ InstructionOperand* cur_to = cur.destination();
+ if (cur_to->IsUnallocated()) {
+ if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
+ UnallocatedOperand::cast(from)->virtual_register()) {
+ move->AddMove(cur.source(), to, code_zone());
+ return;
+ }
+ }
+ }
+ }
+ move->AddMove(from, to, code_zone());
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
+ int start = block->first_instruction_index();
+ int end = block->last_instruction_index();
+ DCHECK_NE(-1, start);
+ for (int i = start; i <= end; ++i) {
+ if (code()->IsGapAt(i)) {
+ Instruction* instr = NULL;
+ Instruction* prev_instr = NULL;
+ if (i < end) instr = InstructionAt(i + 1);
+ if (i > start) prev_instr = InstructionAt(i - 1);
+ MeetConstraintsBetween(prev_instr, instr, i);
+ if (!AllocationOk()) return;
+ }
+ }
+
+ // Meet register constraints for the instruction in the end.
+ if (!code()->IsGapAt(end)) {
+ MeetRegisterConstraintsForLastInstructionInBlock(block);
+ }
+}
+
+
+void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
+ BasicBlock* block) {
+ int end = block->last_instruction_index();
+ Instruction* last_instruction = InstructionAt(end);
+ for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
+ InstructionOperand* output_operand = last_instruction->OutputAt(i);
+ DCHECK(!output_operand->IsConstant());
+ UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
+ int output_vreg = output->virtual_register();
+ LiveRange* range = LiveRangeFor(output_vreg);
+ bool assigned = false;
+ if (output->HasFixedPolicy()) {
+ AllocateFixed(output, -1, false);
+ // This value is produced on the stack, we never need to spill it.
+ if (output->IsStackSlot()) {
+ range->SetSpillOperand(output);
+ range->SetSpillStartIndex(end);
+ assigned = true;
+ }
+
+ BasicBlock::Successors successors = block->successors();
+ for (BasicBlock::Successors::iterator succ = successors.begin();
+ succ != successors.end(); ++succ) {
+ DCHECK((*succ)->PredecessorCount() == 1);
+ int gap_index = (*succ)->first_instruction_index() + 1;
+ DCHECK(code()->IsGapAt(gap_index));
+
+ // Create an unconstrained operand for the same virtual register
+ // and insert a gap move from the fixed output to the operand.
+ UnallocatedOperand* output_copy =
+ new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+ output_copy->set_virtual_register(output_vreg);
+
+ code()->AddGapMove(gap_index, output, output_copy);
+ }
+ }
+
+ if (!assigned) {
+ BasicBlock::Successors successors = block->successors();
+ for (BasicBlock::Successors::iterator succ = successors.begin();
+ succ != successors.end(); ++succ) {
+ DCHECK((*succ)->PredecessorCount() == 1);
+ int gap_index = (*succ)->first_instruction_index() + 1;
+ range->SetSpillStartIndex(gap_index);
+
+ // This move to spill operand is not a real use. Liveness analysis
+ // and splitting of live ranges do not account for it.
+ // Thus it should be inserted to a lifetime position corresponding to
+ // the instruction end.
+ GapInstruction* gap = code()->GapAt(gap_index);
+ ParallelMove* move =
+ gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+ move->AddMove(output, range->GetSpillOperand(), code_zone());
+ }
+ }
+ }
+}
+
+
+void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
+ Instruction* second,
+ int gap_index) {
+ if (first != NULL) {
+ // Handle fixed temporaries.
+ for (size_t i = 0; i < first->TempCount(); i++) {
+ UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+ if (temp->HasFixedPolicy()) {
+ AllocateFixed(temp, gap_index - 1, false);
+ }
+ }
+
+ // Handle constant/fixed output operands.
+ for (size_t i = 0; i < first->OutputCount(); i++) {
+ InstructionOperand* output = first->OutputAt(i);
+ if (output->IsConstant()) {
+ int output_vreg = output->index();
+ LiveRange* range = LiveRangeFor(output_vreg);
+ range->SetSpillStartIndex(gap_index - 1);
+ range->SetSpillOperand(output);
+ } else {
+ UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+ LiveRange* range = LiveRangeFor(first_output->virtual_register());
+ bool assigned = false;
+ if (first_output->HasFixedPolicy()) {
+ UnallocatedOperand* output_copy =
+ first_output->CopyUnconstrained(code_zone());
+ bool is_tagged = HasTaggedValue(first_output->virtual_register());
+ AllocateFixed(first_output, gap_index, is_tagged);
+
+ // This value is produced on the stack, we never need to spill it.
+ if (first_output->IsStackSlot()) {
+ range->SetSpillOperand(first_output);
+ range->SetSpillStartIndex(gap_index - 1);
+ assigned = true;
+ }
+ code()->AddGapMove(gap_index, first_output, output_copy);
+ }
+
+ // Make sure we add a gap move for spilling (if we have not done
+ // so already).
+ if (!assigned) {
+ range->SetSpillStartIndex(gap_index);
+
+ // This move to spill operand is not a real use. Liveness analysis
+ // and splitting of live ranges do not account for it.
+ // Thus it should be inserted to a lifetime position corresponding to
+ // the instruction end.
+ GapInstruction* gap = code()->GapAt(gap_index);
+ ParallelMove* move =
+ gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+ move->AddMove(first_output, range->GetSpillOperand(), code_zone());
+ }
+ }
+ }
+ }
+
+ if (second != NULL) {
+ // Handle fixed input operands of second instruction.
+ for (size_t i = 0; i < second->InputCount(); i++) {
+ InstructionOperand* input = second->InputAt(i);
+ if (input->IsImmediate()) continue; // Ignore immediates.
+ UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+ if (cur_input->HasFixedPolicy()) {
+ UnallocatedOperand* input_copy =
+ cur_input->CopyUnconstrained(code_zone());
+ bool is_tagged = HasTaggedValue(cur_input->virtual_register());
+ AllocateFixed(cur_input, gap_index + 1, is_tagged);
+ AddConstraintsGapMove(gap_index, input_copy, cur_input);
+ }
+ }
+
+ // Handle "output same as input" for second instruction.
+ for (size_t i = 0; i < second->OutputCount(); i++) {
+ InstructionOperand* output = second->OutputAt(i);
+ if (!output->IsUnallocated()) continue;
+ UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+ if (second_output->HasSameAsInputPolicy()) {
+ DCHECK(i == 0); // Only valid for first output.
+ UnallocatedOperand* cur_input =
+ UnallocatedOperand::cast(second->InputAt(0));
+ int output_vreg = second_output->virtual_register();
+ int input_vreg = cur_input->virtual_register();
+
+ UnallocatedOperand* input_copy =
+ cur_input->CopyUnconstrained(code_zone());
+ cur_input->set_virtual_register(second_output->virtual_register());
+ AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+ if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+ int index = gap_index + 1;
+ Instruction* instr = InstructionAt(index);
+ if (instr->HasPointerMap()) {
+ instr->pointer_map()->RecordPointer(input_copy, code_zone());
+ }
+ } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+ // The input is assumed to immediately have a tagged representation,
+ // before the pointer map can be used. I.e. the pointer map at the
+ // instruction will include the output operand (whose value at the
+ // beginning of the instruction is equal to the input operand). If
+ // this is not desired, then the pointer map at this instruction needs
+ // to be adjusted manually.
+ }
+ }
+ }
+ }
+}
+
+
+bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsRegister() && output->index() == index) return true;
+ }
+ return false;
+}
+
+
+bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
+ int index) {
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsDoubleRegister() && output->index() == index) return true;
+ }
+ return false;
+}
+
+
+void RegisterAllocator::ProcessInstructions(BasicBlock* block,
+ BitVector* live) {
+ int block_start = block->first_instruction_index();
+
+ LifetimePosition block_start_position =
+ LifetimePosition::FromInstructionIndex(block_start);
+
+ for (int index = block->last_instruction_index(); index >= block_start;
+ index--) {
+ LifetimePosition curr_position =
+ LifetimePosition::FromInstructionIndex(index);
+
+ Instruction* instr = InstructionAt(index);
+ DCHECK(instr != NULL);
+ if (instr->IsGapMoves()) {
+ // Process the moves of the gap instruction, making their sources live.
+ GapInstruction* gap = code()->GapAt(index);
+
+ // TODO(titzer): no need to create the parallel move if it doesn't exist.
+ ParallelMove* move =
+ gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+ const ZoneList<MoveOperands>* move_operands = move->move_operands();
+ for (int i = 0; i < move_operands->length(); ++i) {
+ MoveOperands* cur = &move_operands->at(i);
+ if (cur->IsIgnored()) continue;
+ InstructionOperand* from = cur->source();
+ InstructionOperand* to = cur->destination();
+ InstructionOperand* hint = to;
+ if (to->IsUnallocated()) {
+ int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
+ LiveRange* to_range = LiveRangeFor(to_vreg);
+ if (to_range->is_phi()) {
+ if (to_range->is_non_loop_phi()) {
+ hint = to_range->current_hint_operand();
+ }
+ } else {
+ if (live->Contains(to_vreg)) {
+ Define(curr_position, to, from);
+ live->Remove(to_vreg);
+ } else {
+ cur->Eliminate();
+ continue;
+ }
+ }
+ } else {
+ Define(curr_position, to, from);
+ }
+ Use(block_start_position, curr_position, from, hint);
+ if (from->IsUnallocated()) {
+ live->Add(UnallocatedOperand::cast(from)->virtual_register());
+ }
+ }
+ } else {
+ // Process output, inputs, and temps of this non-gap instruction.
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsUnallocated()) {
+ int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
+ live->Remove(out_vreg);
+ } else if (output->IsConstant()) {
+ int out_vreg = output->index();
+ live->Remove(out_vreg);
+ }
+ Define(curr_position, output, NULL);
+ }
+
+ if (instr->ClobbersRegisters()) {
+ for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+ if (!IsOutputRegisterOf(instr, i)) {
+ LiveRange* range = FixedLiveRangeFor(i);
+ range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+ zone());
+ }
+ }
+ }
+
+ if (instr->ClobbersDoubleRegisters()) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ if (!IsOutputDoubleRegisterOf(instr, i)) {
+ LiveRange* range = FixedDoubleLiveRangeFor(i);
+ range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+ zone());
+ }
+ }
+ }
+
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ InstructionOperand* input = instr->InputAt(i);
+ if (input->IsImmediate()) continue; // Ignore immediates.
+ LifetimePosition use_pos;
+ if (input->IsUnallocated() &&
+ UnallocatedOperand::cast(input)->IsUsedAtStart()) {
+ use_pos = curr_position;
+ } else {
+ use_pos = curr_position.InstructionEnd();
+ }
+
+ Use(block_start_position, use_pos, input, NULL);
+ if (input->IsUnallocated()) {
+ live->Add(UnallocatedOperand::cast(input)->virtual_register());
+ }
+ }
+
+ for (size_t i = 0; i < instr->TempCount(); i++) {
+ InstructionOperand* temp = instr->TempAt(i);
+ if (instr->ClobbersTemps()) {
+ if (temp->IsRegister()) continue;
+ if (temp->IsUnallocated()) {
+ UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
+ if (temp_unalloc->HasFixedPolicy()) {
+ continue;
+ }
+ }
+ }
+ Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+ Define(curr_position, temp, NULL);
+ }
+ }
+ }
+}
+
+
+void RegisterAllocator::ResolvePhis(BasicBlock* block) {
+ for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+ Node* phi = *i;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+
+ UnallocatedOperand* phi_operand =
+ new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
+ phi_operand->set_virtual_register(phi->id());
+
+ int j = 0;
+ Node::Inputs inputs = phi->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter, ++j) {
+ Node* op = *iter;
+ // TODO(mstarzinger): Use a ValueInputIterator instead.
+ if (j >= block->PredecessorCount()) continue;
+ UnallocatedOperand* operand =
+ new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+ operand->set_virtual_register(op->id());
+ BasicBlock* cur_block = block->PredecessorAt(j);
+ // The gap move must be added without any special processing as in
+ // the AddConstraintsGapMove.
+ code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
+ phi_operand);
+
+ Instruction* branch = InstructionAt(cur_block->last_instruction_index());
+ DCHECK(!branch->HasPointerMap());
+ USE(branch);
+ }
+
+ LiveRange* live_range = LiveRangeFor(phi->id());
+ BlockStartInstruction* block_start = code()->GetBlockStart(block);
+ block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+ ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
+ live_range->SetSpillStartIndex(block->first_instruction_index());
+
+ // We use the phi-ness of some nodes in some later heuristics.
+ live_range->set_is_phi(true);
+ if (!block->IsLoopHeader()) {
+ live_range->set_is_non_loop_phi(true);
+ }
+ }
+}
+
+
+bool RegisterAllocator::Allocate() {
+ assigned_registers_ = new (code_zone())
+ BitVector(Register::NumAllocatableRegisters(), code_zone());
+ assigned_double_registers_ = new (code_zone())
+ BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
+ MeetRegisterConstraints();
+ if (!AllocationOk()) return false;
+ ResolvePhis();
+ BuildLiveRanges();
+ AllocateGeneralRegisters();
+ if (!AllocationOk()) return false;
+ AllocateDoubleRegisters();
+ if (!AllocationOk()) return false;
+ PopulatePointerMaps();
+ ConnectRanges();
+ ResolveControlFlow();
+ code()->frame()->SetAllocatedRegisters(assigned_registers_);
+ code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
+ return true;
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints() {
+ RegisterAllocatorPhase phase("L_Register constraints", this);
+ for (int i = 0; i < code()->BasicBlockCount(); ++i) {
+ MeetRegisterConstraints(code()->BlockAt(i));
+ if (!AllocationOk()) return;
+ }
+}
+
+
+void RegisterAllocator::ResolvePhis() {
+ RegisterAllocatorPhase phase("L_Resolve phis", this);
+
+ // Process the blocks in reverse order.
+ for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
+ ResolvePhis(code()->BlockAt(i));
+ }
+}
+
+
+void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
+ BasicBlock* pred) {
+ LifetimePosition pred_end =
+ LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+ LifetimePosition cur_start =
+ LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+ LiveRange* pred_cover = NULL;
+ LiveRange* cur_cover = NULL;
+ LiveRange* cur_range = range;
+ while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+ if (cur_range->CanCover(cur_start)) {
+ DCHECK(cur_cover == NULL);
+ cur_cover = cur_range;
+ }
+ if (cur_range->CanCover(pred_end)) {
+ DCHECK(pred_cover == NULL);
+ pred_cover = cur_range;
+ }
+ cur_range = cur_range->next();
+ }
+
+ if (cur_cover->IsSpilled()) return;
+ DCHECK(pred_cover != NULL && cur_cover != NULL);
+ if (pred_cover != cur_cover) {
+ InstructionOperand* pred_op =
+ pred_cover->CreateAssignedOperand(code_zone());
+ InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
+ if (!pred_op->Equals(cur_op)) {
+ GapInstruction* gap = NULL;
+ if (block->PredecessorCount() == 1) {
+ gap = code()->GapAt(block->first_instruction_index());
+ } else {
+ DCHECK(pred->SuccessorCount() == 1);
+ gap = GetLastGap(pred);
+
+ Instruction* branch = InstructionAt(pred->last_instruction_index());
+ DCHECK(!branch->HasPointerMap());
+ USE(branch);
+ }
+ gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+ ->AddMove(pred_op, cur_op, code_zone());
+ }
+ }
+}
+
+
+ParallelMove* RegisterAllocator::GetConnectingParallelMove(
+ LifetimePosition pos) {
+ int index = pos.InstructionIndex();
+ if (code()->IsGapAt(index)) {
+ GapInstruction* gap = code()->GapAt(index);
+ return gap->GetOrCreateParallelMove(
+ pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
+ code_zone());
+ }
+ int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+ return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
+ (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
+ code_zone());
+}
+
+
+BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
+ return code()->GetBasicBlock(pos.InstructionIndex());
+}
+
+
+void RegisterAllocator::ConnectRanges() {
+ RegisterAllocatorPhase phase("L_Connect ranges", this);
+ for (int i = 0; i < live_ranges()->length(); ++i) {
+ LiveRange* first_range = live_ranges()->at(i);
+ if (first_range == NULL || first_range->parent() != NULL) continue;
+
+ LiveRange* second_range = first_range->next();
+ while (second_range != NULL) {
+ LifetimePosition pos = second_range->Start();
+
+ if (!second_range->IsSpilled()) {
+ // Add gap move if the two live ranges touch and there is no block
+ // boundary.
+ if (first_range->End().Value() == pos.Value()) {
+ bool should_insert = true;
+ if (IsBlockBoundary(pos)) {
+ should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+ }
+ if (should_insert) {
+ ParallelMove* move = GetConnectingParallelMove(pos);
+ InstructionOperand* prev_operand =
+ first_range->CreateAssignedOperand(code_zone());
+ InstructionOperand* cur_operand =
+ second_range->CreateAssignedOperand(code_zone());
+ move->AddMove(prev_operand, cur_operand, code_zone());
+ }
+ }
+ }
+
+ first_range = second_range;
+ second_range = second_range->next();
+ }
+ }
+}
+
+
+bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const {
+ if (block->PredecessorCount() != 1) return false;
+ return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
+}
+
+
+void RegisterAllocator::ResolveControlFlow() {
+ RegisterAllocatorPhase phase("L_Resolve control flow", this);
+ for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
+ BasicBlock* block = code()->BlockAt(block_id);
+ if (CanEagerlyResolveControlFlow(block)) continue;
+ BitVector* live = live_in_sets_[block->rpo_number_];
+ BitVector::Iterator iterator(live);
+ while (!iterator.Done()) {
+ int operand_index = iterator.Current();
+ BasicBlock::Predecessors predecessors = block->predecessors();
+ for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+ i != predecessors.end(); ++i) {
+ BasicBlock* cur = *i;
+ LiveRange* cur_range = LiveRangeFor(operand_index);
+ ResolveControlFlow(cur_range, block, cur);
+ }
+ iterator.Advance();
+ }
+ }
+}
+
+
+void RegisterAllocator::BuildLiveRanges() {
+ RegisterAllocatorPhase phase("L_Build live ranges", this);
+ InitializeLivenessAnalysis();
+ // Process the blocks in reverse order.
+ for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
+ --block_id) {
+ BasicBlock* block = code()->BlockAt(block_id);
+ BitVector* live = ComputeLiveOut(block);
+ // Initially consider all live_out values live for the entire block. We
+ // will shorten these intervals if necessary.
+ AddInitialIntervals(block, live);
+
+ // Process the instructions in reverse order, generating and killing
+ // live values.
+ ProcessInstructions(block, live);
+ // All phi output operands are killed by this block.
+ for (BasicBlock::const_iterator i = block->begin(); i != block->end();
+ ++i) {
+ Node* phi = *i;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+
+ // The live range interval already ends at the first instruction of the
+ // block.
+ live->Remove(phi->id());
+
+ InstructionOperand* hint = NULL;
+ InstructionOperand* phi_operand = NULL;
+ GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
+
+ // TODO(titzer): no need to create the parallel move if it doesn't exit.
+ ParallelMove* move =
+ gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+ for (int j = 0; j < move->move_operands()->length(); ++j) {
+ InstructionOperand* to = move->move_operands()->at(j).destination();
+ if (to->IsUnallocated() &&
+ UnallocatedOperand::cast(to)->virtual_register() == phi->id()) {
+ hint = move->move_operands()->at(j).source();
+ phi_operand = to;
+ break;
+ }
+ }
+ DCHECK(hint != NULL);
+
+ LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+ block->first_instruction_index());
+ Define(block_start, phi_operand, hint);
+ }
+
+ // Now live is live_in for this block except not including values live
+ // out on backward successor edges.
+ live_in_sets_[block_id] = live;
+
+ if (block->IsLoopHeader()) {
+ // Add a live range stretching from the first loop instruction to the last
+ // for each value live on entry to the header.
+ BitVector::Iterator iterator(live);
+ LifetimePosition start = LifetimePosition::FromInstructionIndex(
+ block->first_instruction_index());
+ int end_index =
+ code()->BlockAt(block->loop_end_)->last_instruction_index();
+ LifetimePosition end =
+ LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
+ while (!iterator.Done()) {
+ int operand_index = iterator.Current();
+ LiveRange* range = LiveRangeFor(operand_index);
+ range->EnsureInterval(start, end, zone());
+ iterator.Advance();
+ }
+
+ // Insert all values into the live in sets of all blocks in the loop.
+ for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) {
+ live_in_sets_[i]->Union(*live);
+ }
+ }
+
+#ifdef DEBUG
+ if (block_id == 0) {
+ BitVector::Iterator iterator(live);
+ bool found = false;
+ while (!iterator.Done()) {
+ found = true;
+ int operand_index = iterator.Current();
+ PrintF("Register allocator error: live v%d reached first block.\n",
+ operand_index);
+ LiveRange* range = LiveRangeFor(operand_index);
+ PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
+ CompilationInfo* info = code()->linkage()->info();
+ if (info->IsStub()) {
+ if (info->code_stub() == NULL) {
+ PrintF("\n");
+ } else {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ PrintF(" (function: %s)\n", CodeStub::MajorName(major_key, false));
+ }
+ } else {
+ DCHECK(info->IsOptimizing());
+ AllowHandleDereference allow_deref;
+ PrintF(" (function: %s)\n",
+ info->function()->debug_name()->ToCString().get());
+ }
+ iterator.Advance();
+ }
+ DCHECK(!found);
+ }
+#endif
+ }
+
+ for (int i = 0; i < live_ranges_.length(); ++i) {
+ if (live_ranges_[i] != NULL) {
+ live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+
+ // TODO(bmeurer): This is a horrible hack to make sure that for constant
+ // live ranges, every use requires the constant to be in a register.
+ // Without this hack, all uses with "any" policy would get the constant
+ // operand assigned.
+ LiveRange* range = live_ranges_[i];
+ if (range->HasAllocatedSpillOperand() &&
+ range->GetSpillOperand()->IsConstant()) {
+ for (UsePosition* pos = range->first_pos(); pos != NULL;
+ pos = pos->next_) {
+ pos->register_beneficial_ = true;
+ pos->requires_reg_ = true;
+ }
+ }
+ }
+ }
+}
+
+
+bool RegisterAllocator::SafePointsAreInOrder() const {
+ int safe_point = 0;
+ const PointerMapDeque* pointer_maps = code()->pointer_maps();
+ for (PointerMapDeque::const_iterator it = pointer_maps->begin();
+ it != pointer_maps->end(); ++it) {
+ PointerMap* map = *it;
+ if (safe_point > map->instruction_position()) return false;
+ safe_point = map->instruction_position();
+ }
+ return true;
+}
+
+
+void RegisterAllocator::PopulatePointerMaps() {
+ RegisterAllocatorPhase phase("L_Populate pointer maps", this);
+
+ DCHECK(SafePointsAreInOrder());
+
+ // Iterate over all safe point positions and record a pointer
+ // for all spilled live ranges at this point.
+ int last_range_start = 0;
+ const PointerMapDeque* pointer_maps = code()->pointer_maps();
+ PointerMapDeque::const_iterator first_it = pointer_maps->begin();
+ for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+ LiveRange* range = live_ranges()->at(range_idx);
+ if (range == NULL) continue;
+ // Iterate over the first parts of multi-part live ranges.
+ if (range->parent() != NULL) continue;
+ // Skip non-reference values.
+ if (!HasTaggedValue(range->id())) continue;
+ // Skip empty live ranges.
+ if (range->IsEmpty()) continue;
+
+ // Find the extent of the range and its children.
+ int start = range->Start().InstructionIndex();
+ int end = 0;
+ for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+ LifetimePosition this_end = cur->End();
+ if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+ DCHECK(cur->Start().InstructionIndex() >= start);
+ }
+
+ // Most of the ranges are in order, but not all. Keep an eye on when they
+ // step backwards and reset the first_it so we don't miss any safe points.
+ if (start < last_range_start) first_it = pointer_maps->begin();
+ last_range_start = start;
+
+ // Step across all the safe points that are before the start of this range,
+ // recording how far we step in order to save doing this for the next range.
+ for (; first_it != pointer_maps->end(); ++first_it) {
+ PointerMap* map = *first_it;
+ if (map->instruction_position() >= start) break;
+ }
+
+ // Step through the safe points to see whether they are in the range.
+ for (PointerMapDeque::const_iterator it = first_it;
+ it != pointer_maps->end(); ++it) {
+ PointerMap* map = *it;
+ int safe_point = map->instruction_position();
+
+ // The safe points are sorted so we can stop searching here.
+ if (safe_point - 1 > end) break;
+
+ // Advance to the next active range that covers the current
+ // safe point position.
+ LifetimePosition safe_point_pos =
+ LifetimePosition::FromInstructionIndex(safe_point);
+ LiveRange* cur = range;
+ while (cur != NULL && !cur->Covers(safe_point_pos)) {
+ cur = cur->next();
+ }
+ if (cur == NULL) continue;
+
+ // Check if the live range is spilled and the safe point is after
+ // the spill position.
+ if (range->HasAllocatedSpillOperand() &&
+ safe_point >= range->spill_start_index() &&
+ !range->GetSpillOperand()->IsConstant()) {
+ TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+ range->id(), range->spill_start_index(), safe_point);
+ map->RecordPointer(range->GetSpillOperand(), code_zone());
+ }
+
+ if (!cur->IsSpilled()) {
+ TraceAlloc(
+ "Pointer in register for range %d (start at %d) "
+ "at safe point %d\n",
+ cur->id(), cur->Start().Value(), safe_point);
+ InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
+ DCHECK(!operand->IsStackSlot());
+ map->RecordPointer(operand, code_zone());
+ }
+ }
+ }
+}
+
+
+void RegisterAllocator::AllocateGeneralRegisters() {
+ RegisterAllocatorPhase phase("L_Allocate general registers", this);
+ num_registers_ = Register::NumAllocatableRegisters();
+ mode_ = GENERAL_REGISTERS;
+ AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateDoubleRegisters() {
+ RegisterAllocatorPhase phase("L_Allocate double registers", this);
+ num_registers_ = DoubleRegister::NumAllocatableRegisters();
+ mode_ = DOUBLE_REGISTERS;
+ AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateRegisters() {
+ DCHECK(unhandled_live_ranges_.is_empty());
+
+ for (int i = 0; i < live_ranges_.length(); ++i) {
+ if (live_ranges_[i] != NULL) {
+ if (live_ranges_[i]->Kind() == mode_) {
+ AddToUnhandledUnsorted(live_ranges_[i]);
+ }
+ }
+ }
+ SortUnhandled();
+ DCHECK(UnhandledIsSorted());
+
+ DCHECK(reusable_slots_.is_empty());
+ DCHECK(active_live_ranges_.is_empty());
+ DCHECK(inactive_live_ranges_.is_empty());
+
+ if (mode_ == DOUBLE_REGISTERS) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ LiveRange* current = fixed_double_live_ranges_.at(i);
+ if (current != NULL) {
+ AddToInactive(current);
+ }
+ }
+ } else {
+ DCHECK(mode_ == GENERAL_REGISTERS);
+ for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+ LiveRange* current = fixed_live_ranges_.at(i);
+ if (current != NULL) {
+ AddToInactive(current);
+ }
+ }
+ }
+
+ while (!unhandled_live_ranges_.is_empty()) {
+ DCHECK(UnhandledIsSorted());
+ LiveRange* current = unhandled_live_ranges_.RemoveLast();
+ DCHECK(UnhandledIsSorted());
+ LifetimePosition position = current->Start();
+#ifdef DEBUG
+ allocation_finger_ = position;
+#endif
+ TraceAlloc("Processing interval %d start=%d\n", current->id(),
+ position.Value());
+
+ if (current->HasAllocatedSpillOperand()) {
+ TraceAlloc("Live range %d already has a spill operand\n", current->id());
+ LifetimePosition next_pos = position;
+ if (code()->IsGapAt(next_pos.InstructionIndex())) {
+ next_pos = next_pos.NextInstruction();
+ }
+ UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+ // If the range already has a spill operand and it doesn't need a
+ // register immediately, split it and spill the first part of the range.
+ if (pos == NULL) {
+ Spill(current);
+ continue;
+ } else if (pos->pos().Value() >
+ current->Start().NextInstruction().Value()) {
+ // Do not spill live range eagerly if use position that can benefit from
+ // the register is too close to the start of live range.
+ SpillBetween(current, current->Start(), pos->pos());
+ if (!AllocationOk()) return;
+ DCHECK(UnhandledIsSorted());
+ continue;
+ }
+ }
+
+ for (int i = 0; i < active_live_ranges_.length(); ++i) {
+ LiveRange* cur_active = active_live_ranges_.at(i);
+ if (cur_active->End().Value() <= position.Value()) {
+ ActiveToHandled(cur_active);
+ --i; // The live range was removed from the list of active live ranges.
+ } else if (!cur_active->Covers(position)) {
+ ActiveToInactive(cur_active);
+ --i; // The live range was removed from the list of active live ranges.
+ }
+ }
+
+ for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+ LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+ if (cur_inactive->End().Value() <= position.Value()) {
+ InactiveToHandled(cur_inactive);
+ --i; // Live range was removed from the list of inactive live ranges.
+ } else if (cur_inactive->Covers(position)) {
+ InactiveToActive(cur_inactive);
+ --i; // Live range was removed from the list of inactive live ranges.
+ }
+ }
+
+ DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+ bool result = TryAllocateFreeReg(current);
+ if (!AllocationOk()) return;
+
+ if (!result) AllocateBlockedReg(current);
+ if (!AllocationOk()) return;
+
+ if (current->HasRegisterAssigned()) {
+ AddToActive(current);
+ }
+ }
+
+ reusable_slots_.Rewind(0);
+ active_live_ranges_.Rewind(0);
+ inactive_live_ranges_.Rewind(0);
+}
+
+
+const char* RegisterAllocator::RegisterName(int allocation_index) {
+ if (mode_ == GENERAL_REGISTERS) {
+ return Register::AllocationIndexToString(allocation_index);
+ } else {
+ return DoubleRegister::AllocationIndexToString(allocation_index);
+ }
+}
+
+
+void RegisterAllocator::TraceAlloc(const char* msg, ...) {
+ if (FLAG_trace_alloc) {
+ va_list arguments;
+ va_start(arguments, msg);
+ base::OS::VPrint(msg, arguments);
+ va_end(arguments);
+ }
+}
+
+
+bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
+ return code()->IsReference(virtual_register);
+}
+
+
+RegisterKind RegisterAllocator::RequiredRegisterKind(
+ int virtual_register) const {
+ return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
+ : GENERAL_REGISTERS;
+}
+
+
+void RegisterAllocator::AddToActive(LiveRange* range) {
+ TraceAlloc("Add live range %d to active\n", range->id());
+ active_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToInactive(LiveRange* range) {
+ TraceAlloc("Add live range %d to inactive\n", range->id());
+ inactive_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
+ if (range == NULL || range->IsEmpty()) return;
+ DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+ DCHECK(allocation_finger_.Value() <= range->Start().Value());
+ for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+ LiveRange* cur_range = unhandled_live_ranges_.at(i);
+ if (range->ShouldBeAllocatedBefore(cur_range)) {
+ TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+ unhandled_live_ranges_.InsertAt(i + 1, range, zone());
+ DCHECK(UnhandledIsSorted());
+ return;
+ }
+ }
+ TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+ unhandled_live_ranges_.InsertAt(0, range, zone());
+ DCHECK(UnhandledIsSorted());
+}
+
+
+void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+ if (range == NULL || range->IsEmpty()) return;
+ DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+ TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+ unhandled_live_ranges_.Add(range, zone());
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+ DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
+ !(*b)->ShouldBeAllocatedBefore(*a));
+ if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+ if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+ return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list. This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void RegisterAllocator::SortUnhandled() {
+ TraceAlloc("Sort unhandled\n");
+ unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool RegisterAllocator::UnhandledIsSorted() {
+ int len = unhandled_live_ranges_.length();
+ for (int i = 1; i < len; i++) {
+ LiveRange* a = unhandled_live_ranges_.at(i - 1);
+ LiveRange* b = unhandled_live_ranges_.at(i);
+ if (a->Start().Value() < b->Start().Value()) return false;
+ }
+ return true;
+}
+
+
+void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
+ // Check that we are the last range.
+ if (range->next() != NULL) return;
+
+ if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+ InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+ if (spill_operand->IsConstant()) return;
+ if (spill_operand->index() >= 0) {
+ reusable_slots_.Add(range, zone());
+ }
+}
+
+
+InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
+ if (reusable_slots_.is_empty()) return NULL;
+ if (reusable_slots_.first()->End().Value() >
+ range->TopLevel()->Start().Value()) {
+ return NULL;
+ }
+ InstructionOperand* result =
+ reusable_slots_.first()->TopLevel()->GetSpillOperand();
+ reusable_slots_.Remove(0);
+ return result;
+}
+
+
+void RegisterAllocator::ActiveToHandled(LiveRange* range) {
+ DCHECK(active_live_ranges_.Contains(range));
+ active_live_ranges_.RemoveElement(range);
+ TraceAlloc("Moving live range %d from active to handled\n", range->id());
+ FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::ActiveToInactive(LiveRange* range) {
+ DCHECK(active_live_ranges_.Contains(range));
+ active_live_ranges_.RemoveElement(range);
+ inactive_live_ranges_.Add(range, zone());
+ TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void RegisterAllocator::InactiveToHandled(LiveRange* range) {
+ DCHECK(inactive_live_ranges_.Contains(range));
+ inactive_live_ranges_.RemoveElement(range);
+ TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+ FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::InactiveToActive(LiveRange* range) {
+ DCHECK(inactive_live_ranges_.Contains(range));
+ inactive_live_ranges_.RemoveElement(range);
+ active_live_ranges_.Add(range, zone());
+ TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+ Register::kMaxNumAllocatableRegisters);
+
+
+bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+ for (int i = 0; i < num_registers_; i++) {
+ free_until_pos[i] = LifetimePosition::MaxPosition();
+ }
+
+ for (int i = 0; i < active_live_ranges_.length(); ++i) {
+ LiveRange* cur_active = active_live_ranges_.at(i);
+ free_until_pos[cur_active->assigned_register()] =
+ LifetimePosition::FromInstructionIndex(0);
+ }
+
+ for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+ LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+ DCHECK(cur_inactive->End().Value() > current->Start().Value());
+ LifetimePosition next_intersection =
+ cur_inactive->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
+ int cur_reg = cur_inactive->assigned_register();
+ free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+ }
+
+ InstructionOperand* hint = current->FirstHint();
+ if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+ int register_index = hint->index();
+ TraceAlloc(
+ "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index), free_until_pos[register_index].Value(),
+ current->id(), current->End().Value());
+
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[register_index].Value() >= current->End().Value()) {
+ TraceAlloc("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index), current->id());
+ SetLiveRangeAssignedRegister(current, register_index);
+ return true;
+ }
+ }
+
+ // Find the register which stays free for the longest time.
+ int reg = 0;
+ for (int i = 1; i < RegisterCount(); ++i) {
+ if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+ reg = i;
+ }
+ }
+
+ LifetimePosition pos = free_until_pos[reg];
+
+ if (pos.Value() <= current->Start().Value()) {
+ // All registers are blocked.
+ return false;
+ }
+
+ if (pos.Value() < current->End().Value()) {
+ // Register reg is available at the range start but becomes blocked before
+ // the range end. Split current at position where it becomes blocked.
+ LiveRange* tail = SplitRangeAt(current, pos);
+ if (!AllocationOk()) return false;
+ AddToUnhandledSorted(tail);
+ }
+
+
+ // Register reg is available at the range start and is free until
+ // the range end.
+ DCHECK(pos.Value() >= current->End().Value());
+ TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
+ current->id());
+ SetLiveRangeAssignedRegister(current, reg);
+
+ return true;
+}
+
+
+void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
+ UsePosition* register_use = current->NextRegisterPosition(current->Start());
+ if (register_use == NULL) {
+ // There is no use in the current live range that requires a register.
+ // We can just spill it.
+ Spill(current);
+ return;
+ }
+
+
+ LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+ for (int i = 0; i < num_registers_; i++) {
+ use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+ }
+
+ for (int i = 0; i < active_live_ranges_.length(); ++i) {
+ LiveRange* range = active_live_ranges_[i];
+ int cur_reg = range->assigned_register();
+ if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+ block_pos[cur_reg] = use_pos[cur_reg] =
+ LifetimePosition::FromInstructionIndex(0);
+ } else {
+ UsePosition* next_use =
+ range->NextUsePositionRegisterIsBeneficial(current->Start());
+ if (next_use == NULL) {
+ use_pos[cur_reg] = range->End();
+ } else {
+ use_pos[cur_reg] = next_use->pos();
+ }
+ }
+ }
+
+ for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+ LiveRange* range = inactive_live_ranges_.at(i);
+ DCHECK(range->End().Value() > current->Start().Value());
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
+ int cur_reg = range->assigned_register();
+ if (range->IsFixed()) {
+ block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ } else {
+ use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ }
+ }
+
+ int reg = 0;
+ for (int i = 1; i < RegisterCount(); ++i) {
+ if (use_pos[i].Value() > use_pos[reg].Value()) {
+ reg = i;
+ }
+ }
+
+ LifetimePosition pos = use_pos[reg];
+
+ if (pos.Value() < register_use->pos().Value()) {
+ // All registers are blocked before the first use that requires a register.
+ // Spill starting part of live range up to that use.
+ SpillBetween(current, current->Start(), register_use->pos());
+ return;
+ }
+
+ if (block_pos[reg].Value() < current->End().Value()) {
+ // Register becomes blocked before the current range end. Split before that
+ // position.
+ LiveRange* tail = SplitBetween(current, current->Start(),
+ block_pos[reg].InstructionStart());
+ if (!AllocationOk()) return;
+ AddToUnhandledSorted(tail);
+ }
+
+ // Register reg is not blocked for the whole range.
+ DCHECK(block_pos[reg].Value() >= current->End().Value());
+ TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
+ current->id());
+ SetLiveRangeAssignedRegister(current, reg);
+
+ // This register was not free. Thus we need to find and spill
+ // parts of active and inactive live regions that use the same register
+ // at the same lifetime positions as current.
+ SplitAndSpillIntersecting(current);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
+ LiveRange* range, LifetimePosition pos) {
+ BasicBlock* block = GetBlock(pos.InstructionStart());
+ BasicBlock* loop_header =
+ block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
+
+ if (loop_header == NULL) return pos;
+
+ UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+ while (loop_header != NULL) {
+ // We are going to spill live range inside the loop.
+ // If possible try to move spilling position backwards to loop header.
+ // This will reduce number of memory moves on the back edge.
+ LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+ loop_header->first_instruction_index());
+
+ if (range->Covers(loop_start)) {
+ if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+ // No register beneficial use inside the loop before the pos.
+ pos = loop_start;
+ }
+ }
+
+ // Try hoisting out to an outer loop.
+ loop_header = code()->GetContainingLoop(loop_header);
+ }
+
+ return pos;
+}
+
+
+void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+ DCHECK(current->HasRegisterAssigned());
+ int reg = current->assigned_register();
+ LifetimePosition split_pos = current->Start();
+ for (int i = 0; i < active_live_ranges_.length(); ++i) {
+ LiveRange* range = active_live_ranges_[i];
+ if (range->assigned_register() == reg) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+ if (next_pos == NULL) {
+ SpillAfter(range, spill_pos);
+ } else {
+ // When spilling between spill_pos and next_pos ensure that the range
+ // remains spilled at least until the start of the current live range.
+ // This guarantees that we will not introduce new unhandled ranges that
+ // start before the current range as this violates allocation invariant
+ // and will lead to an inconsistent state of active and inactive
+ // live-ranges: ranges are allocated in order of their start positions,
+ // ranges are retired from active/inactive when the start of the
+ // current live-range is larger than their end.
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+ }
+ if (!AllocationOk()) return;
+ ActiveToHandled(range);
+ --i;
+ }
+ }
+
+ for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+ LiveRange* range = inactive_live_ranges_[i];
+ DCHECK(range->End().Value() > current->Start().Value());
+ if (range->assigned_register() == reg && !range->IsFixed()) {
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (next_intersection.IsValid()) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ if (next_pos == NULL) {
+ SpillAfter(range, split_pos);
+ } else {
+ next_intersection = Min(next_intersection, next_pos->pos());
+ SpillBetween(range, split_pos, next_intersection);
+ }
+ if (!AllocationOk()) return;
+ InactiveToHandled(range);
+ --i;
+ }
+ }
+ }
+}
+
+
+bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
+ return pos.IsInstructionStart() &&
+ InstructionAt(pos.InstructionIndex())->IsBlockStart();
+}
+
+
+LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
+ LifetimePosition pos) {
+ DCHECK(!range->IsFixed());
+ TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+ if (pos.Value() <= range->Start().Value()) return range;
+
+ // We can't properly connect liveranges if split occured at the end
+ // of control instruction.
+ DCHECK(pos.IsInstructionStart() ||
+ !InstructionAt(pos.InstructionIndex())->IsControl());
+
+ int vreg = GetVirtualRegister();
+ if (!AllocationOk()) return NULL;
+ LiveRange* result = LiveRangeFor(vreg);
+ range->SplitAt(pos, result, zone());
+ return result;
+}
+
+
+LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
+ DCHECK(!range->IsFixed());
+ TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+ range->id(), start.Value(), end.Value());
+
+ LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+ DCHECK(split_pos.Value() >= start.Value());
+ return SplitRangeAt(range, split_pos);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
+ LifetimePosition end) {
+ int start_instr = start.InstructionIndex();
+ int end_instr = end.InstructionIndex();
+ DCHECK(start_instr <= end_instr);
+
+ // We have no choice
+ if (start_instr == end_instr) return end;
+
+ BasicBlock* start_block = GetBlock(start);
+ BasicBlock* end_block = GetBlock(end);
+
+ if (end_block == start_block) {
+ // The interval is split in the same basic block. Split at the latest
+ // possible position.
+ return end;
+ }
+
+ BasicBlock* block = end_block;
+ // Find header of outermost loop.
+ // TODO(titzer): fix redundancy below.
+ while (code()->GetContainingLoop(block) != NULL &&
+ code()->GetContainingLoop(block)->rpo_number_ >
+ start_block->rpo_number_) {
+ block = code()->GetContainingLoop(block);
+ }
+
+ // We did not find any suitable outer loop. Split at the latest possible
+ // position unless end_block is a loop header itself.
+ if (block == end_block && !end_block->IsLoopHeader()) return end;
+
+ return LifetimePosition::FromInstructionIndex(
+ block->first_instruction_index());
+}
+
+
+void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+ LiveRange* second_part = SplitRangeAt(range, pos);
+ if (!AllocationOk()) return;
+ Spill(second_part);
+}
+
+
+void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
+ LifetimePosition end) {
+ SpillBetweenUntil(range, start, start, end);
+}
+
+
+void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end) {
+ CHECK(start.Value() < end.Value());
+ LiveRange* second_part = SplitRangeAt(range, start);
+ if (!AllocationOk()) return;
+
+ if (second_part->Start().Value() < end.Value()) {
+ // The split result intersects with [start, end[.
+ // Split it at position between ]start+1, end[, spill the middle part
+ // and put the rest to unhandled.
+ LiveRange* third_part = SplitBetween(
+ second_part, Max(second_part->Start().InstructionEnd(), until),
+ end.PrevInstruction().InstructionEnd());
+ if (!AllocationOk()) return;
+
+ DCHECK(third_part != second_part);
+
+ Spill(second_part);
+ AddToUnhandledSorted(third_part);
+ } else {
+ // The split result does not intersect with [start, end[.
+ // Nothing to spill. Just put it to unhandled as whole.
+ AddToUnhandledSorted(second_part);
+ }
+}
+
+
+void RegisterAllocator::Spill(LiveRange* range) {
+ DCHECK(!range->IsSpilled());
+ TraceAlloc("Spilling live range %d\n", range->id());
+ LiveRange* first = range->TopLevel();
+
+ if (!first->HasAllocatedSpillOperand()) {
+ InstructionOperand* op = TryReuseSpillSlot(range);
+ if (op == NULL) {
+ // Allocate a new operand referring to the spill slot.
+ RegisterKind kind = range->Kind();
+ int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+ if (kind == DOUBLE_REGISTERS) {
+ op = DoubleStackSlotOperand::Create(index, zone());
+ } else {
+ DCHECK(kind == GENERAL_REGISTERS);
+ op = StackSlotOperand::Create(index, zone());
+ }
+ }
+ first->SetSpillOperand(op);
+ }
+ range->MakeSpilled(code_zone());
+}
+
+
+int RegisterAllocator::RegisterCount() const { return num_registers_; }
+
+
+#ifdef DEBUG
+
+
+void RegisterAllocator::Verify() const {
+ for (int i = 0; i < live_ranges()->length(); ++i) {
+ LiveRange* current = live_ranges()->at(i);
+ if (current != NULL) current->Verify();
+ }
+}
+
+
+#endif
+
+
+void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
+ int reg) {
+ if (range->Kind() == DOUBLE_REGISTERS) {
+ assigned_double_registers_->Add(reg);
+ } else {
+ DCHECK(range->Kind() == GENERAL_REGISTERS);
+ assigned_registers_->Add(reg);
+ }
+ range->set_assigned_register(reg, code_zone());
+}
+
+
+RegisterAllocatorPhase::RegisterAllocatorPhase(const char* name,
+ RegisterAllocator* allocator)
+ : CompilationPhase(name, allocator->code()->linkage()->info()),
+ allocator_(allocator) {
+ if (FLAG_turbo_stats) {
+ allocator_zone_start_allocation_size_ =
+ allocator->zone()->allocation_size();
+ }
+}
+
+
+RegisterAllocatorPhase::~RegisterAllocatorPhase() {
+ if (FLAG_turbo_stats) {
+ unsigned size = allocator_->zone()->allocation_size() -
+ allocator_zone_start_allocation_size_;
+ isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size);
+ }
+#ifdef DEBUG
+ if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
new file mode 100644
index 000000000..881ce37f7
--- /dev/null
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -0,0 +1,548 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "src/allocation.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class BitVector;
+class InstructionOperand;
+class UnallocatedOperand;
+class ParallelMove;
+class PointerMap;
+
+namespace compiler {
+
+enum RegisterKind {
+ UNALLOCATED_REGISTERS,
+ GENERAL_REGISTERS,
+ DOUBLE_REGISTERS
+};
+
+
+// This class represents a single point of a InstructionOperand's lifetime. For
+// each instruction there are exactly two lifetime positions: the beginning and
+// the end of the instruction. Lifetime positions for different instructions are
+// disjoint.
+class LifetimePosition {
+ public:
+ // Return the lifetime position that corresponds to the beginning of
+ // the instruction with the given index.
+ static LifetimePosition FromInstructionIndex(int index) {
+ return LifetimePosition(index * kStep);
+ }
+
+ // Returns a numeric representation of this lifetime position.
+ int Value() const { return value_; }
+
+ // Returns the index of the instruction to which this lifetime position
+ // corresponds.
+ int InstructionIndex() const {
+ DCHECK(IsValid());
+ return value_ / kStep;
+ }
+
+ // Returns true if this lifetime position corresponds to the instruction
+ // start.
+ bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
+
+ // Returns the lifetime position for the start of the instruction which
+ // corresponds to this lifetime position.
+ LifetimePosition InstructionStart() const {
+ DCHECK(IsValid());
+ return LifetimePosition(value_ & ~(kStep - 1));
+ }
+
+ // Returns the lifetime position for the end of the instruction which
+ // corresponds to this lifetime position.
+ LifetimePosition InstructionEnd() const {
+ DCHECK(IsValid());
+ return LifetimePosition(InstructionStart().Value() + kStep / 2);
+ }
+
+ // Returns the lifetime position for the beginning of the next instruction.
+ LifetimePosition NextInstruction() const {
+ DCHECK(IsValid());
+ return LifetimePosition(InstructionStart().Value() + kStep);
+ }
+
+ // Returns the lifetime position for the beginning of the previous
+ // instruction.
+ LifetimePosition PrevInstruction() const {
+ DCHECK(IsValid());
+ DCHECK(value_ > 1);
+ return LifetimePosition(InstructionStart().Value() - kStep);
+ }
+
+ // Constructs the lifetime position which does not correspond to any
+ // instruction.
+ LifetimePosition() : value_(-1) {}
+
+ // Returns true if this lifetime positions corrensponds to some
+ // instruction.
+ bool IsValid() const { return value_ != -1; }
+
+ static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+ static inline LifetimePosition MaxPosition() {
+ // We have to use this kind of getter instead of static member due to
+ // crash bug in GDB.
+ return LifetimePosition(kMaxInt);
+ }
+
+ private:
+ static const int kStep = 2;
+
+ // Code relies on kStep being a power of two.
+ STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+ explicit LifetimePosition(int value) : value_(value) {}
+
+ int value_;
+};
+
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval : public ZoneObject {
+ public:
+ UseInterval(LifetimePosition start, LifetimePosition end)
+ : start_(start), end_(end), next_(NULL) {
+ DCHECK(start.Value() < end.Value());
+ }
+
+ LifetimePosition start() const { return start_; }
+ LifetimePosition end() const { return end_; }
+ UseInterval* next() const { return next_; }
+
+ // Split this interval at the given position without effecting the
+ // live range that owns it. The interval must contain the position.
+ void SplitAt(LifetimePosition pos, Zone* zone);
+
+ // If this interval intersects with other return smallest position
+ // that belongs to both of them.
+ LifetimePosition Intersect(const UseInterval* other) const {
+ if (other->start().Value() < start_.Value()) return other->Intersect(this);
+ if (other->start().Value() < end_.Value()) return other->start();
+ return LifetimePosition::Invalid();
+ }
+
+ bool Contains(LifetimePosition point) const {
+ return start_.Value() <= point.Value() && point.Value() < end_.Value();
+ }
+
+ void set_start(LifetimePosition start) { start_ = start; }
+ void set_next(UseInterval* next) { next_ = next; }
+
+ LifetimePosition start_;
+ LifetimePosition end_;
+ UseInterval* next_;
+};
+
+// Representation of a use position.
+class UsePosition : public ZoneObject {
+ public:
+ UsePosition(LifetimePosition pos, InstructionOperand* operand,
+ InstructionOperand* hint);
+
+ InstructionOperand* operand() const { return operand_; }
+ bool HasOperand() const { return operand_ != NULL; }
+
+ InstructionOperand* hint() const { return hint_; }
+ bool HasHint() const;
+ bool RequiresRegister() const;
+ bool RegisterIsBeneficial() const;
+
+ LifetimePosition pos() const { return pos_; }
+ UsePosition* next() const { return next_; }
+
+ void set_next(UsePosition* next) { next_ = next; }
+
+ InstructionOperand* const operand_;
+ InstructionOperand* const hint_;
+ LifetimePosition const pos_;
+ UsePosition* next_;
+ bool requires_reg_;
+ bool register_beneficial_;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange : public ZoneObject {
+ public:
+ static const int kInvalidAssignment = 0x7fffffff;
+
+ LiveRange(int id, Zone* zone);
+
+ UseInterval* first_interval() const { return first_interval_; }
+ UsePosition* first_pos() const { return first_pos_; }
+ LiveRange* parent() const { return parent_; }
+ LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+ LiveRange* next() const { return next_; }
+ bool IsChild() const { return parent() != NULL; }
+ int id() const { return id_; }
+ bool IsFixed() const { return id_ < 0; }
+ bool IsEmpty() const { return first_interval() == NULL; }
+ InstructionOperand* CreateAssignedOperand(Zone* zone);
+ int assigned_register() const { return assigned_register_; }
+ int spill_start_index() const { return spill_start_index_; }
+ void set_assigned_register(int reg, Zone* zone);
+ void MakeSpilled(Zone* zone);
+ bool is_phi() const { return is_phi_; }
+ void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
+ bool is_non_loop_phi() const { return is_non_loop_phi_; }
+ void set_is_non_loop_phi(bool is_non_loop_phi) {
+ is_non_loop_phi_ = is_non_loop_phi;
+ }
+
+ // Returns use position in this live range that follows both start
+ // and last processed use position.
+ // Modifies internal state of live range!
+ UsePosition* NextUsePosition(LifetimePosition start);
+
+ // Returns use position for which register is required in this live
+ // range and which follows both start and last processed use position
+ // Modifies internal state of live range!
+ UsePosition* NextRegisterPosition(LifetimePosition start);
+
+ // Returns use position for which register is beneficial in this live
+ // range and which follows both start and last processed use position
+ // Modifies internal state of live range!
+ UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+ // Returns use position for which register is beneficial in this live
+ // range and which precedes start.
+ UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+ // Can this live range be spilled at this position.
+ bool CanBeSpilled(LifetimePosition pos);
+
+ // Split this live range at the given position which must follow the start of
+ // the range.
+ // All uses following the given position will be moved from this
+ // live range to the result live range.
+ void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+
+ RegisterKind Kind() const { return kind_; }
+ bool HasRegisterAssigned() const {
+ return assigned_register_ != kInvalidAssignment;
+ }
+ bool IsSpilled() const { return spilled_; }
+
+ InstructionOperand* current_hint_operand() const {
+ DCHECK(current_hint_operand_ == FirstHint());
+ return current_hint_operand_;
+ }
+ InstructionOperand* FirstHint() const {
+ UsePosition* pos = first_pos_;
+ while (pos != NULL && !pos->HasHint()) pos = pos->next();
+ if (pos != NULL) return pos->hint();
+ return NULL;
+ }
+
+ LifetimePosition Start() const {
+ DCHECK(!IsEmpty());
+ return first_interval()->start();
+ }
+
+ LifetimePosition End() const {
+ DCHECK(!IsEmpty());
+ return last_interval_->end();
+ }
+
+ bool HasAllocatedSpillOperand() const;
+ InstructionOperand* GetSpillOperand() const { return spill_operand_; }
+ void SetSpillOperand(InstructionOperand* operand);
+
+ void SetSpillStartIndex(int start) {
+ spill_start_index_ = Min(start, spill_start_index_);
+ }
+
+ bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+ bool CanCover(LifetimePosition position) const;
+ bool Covers(LifetimePosition position);
+ LifetimePosition FirstIntersection(LiveRange* other);
+
+ // Add a new interval or a new use position to this live range.
+ void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+ void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+ void AddUsePosition(LifetimePosition pos, InstructionOperand* operand,
+ InstructionOperand* hint, Zone* zone);
+
+ // Shorten the most recently added interval by setting a new start.
+ void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+ // True if target overlaps an existing interval.
+ bool HasOverlap(UseInterval* target) const;
+ void Verify() const;
+#endif
+
+ private:
+ void ConvertOperands(Zone* zone);
+ UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+ void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+ LifetimePosition but_not_past) const;
+
+ int id_;
+ bool spilled_;
+ bool is_phi_;
+ bool is_non_loop_phi_;
+ RegisterKind kind_;
+ int assigned_register_;
+ UseInterval* last_interval_;
+ UseInterval* first_interval_;
+ UsePosition* first_pos_;
+ LiveRange* parent_;
+ LiveRange* next_;
+ // This is used as a cache, it doesn't affect correctness.
+ mutable UseInterval* current_interval_;
+ UsePosition* last_processed_use_;
+ // This is used as a cache, it's invalid outside of BuildLiveRanges.
+ InstructionOperand* current_hint_operand_;
+ InstructionOperand* spill_operand_;
+ int spill_start_index_;
+
+ friend class RegisterAllocator; // Assigns to kind_.
+};
+
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+ explicit RegisterAllocator(InstructionSequence* code);
+
+ static void TraceAlloc(const char* msg, ...);
+
+ // Checks whether the value of a given virtual register is a reference.
+ // TODO(titzer): rename this to IsReference.
+ bool HasTaggedValue(int virtual_register) const;
+
+ // Returns the register kind required by the given virtual register.
+ RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+ bool Allocate();
+
+ const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+ const Vector<LiveRange*>* fixed_live_ranges() const {
+ return &fixed_live_ranges_;
+ }
+ const Vector<LiveRange*>* fixed_double_live_ranges() const {
+ return &fixed_double_live_ranges_;
+ }
+
+ inline InstructionSequence* code() const { return code_; }
+
+ // This zone is for datastructures only needed during register allocation.
+ inline Zone* zone() { return &zone_; }
+
+ // This zone is for InstructionOperands and moves that live beyond register
+ // allocation.
+ inline Zone* code_zone() { return code()->zone(); }
+
+ int GetVirtualRegister() {
+ int vreg = code()->NextVirtualRegister();
+ if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
+ allocation_ok_ = false;
+ // Maintain the invariant that we return something below the maximum.
+ return 0;
+ }
+ return vreg;
+ }
+
+ bool AllocationOk() { return allocation_ok_; }
+
+#ifdef DEBUG
+ void Verify() const;
+#endif
+
+ BitVector* assigned_registers() { return assigned_registers_; }
+ BitVector* assigned_double_registers() { return assigned_double_registers_; }
+
+ private:
+ void MeetRegisterConstraints();
+ void ResolvePhis();
+ void BuildLiveRanges();
+ void AllocateGeneralRegisters();
+ void AllocateDoubleRegisters();
+ void ConnectRanges();
+ void ResolveControlFlow();
+ void PopulatePointerMaps(); // TODO(titzer): rename to PopulateReferenceMaps.
+ void AllocateRegisters();
+ bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
+ inline bool SafePointsAreInOrder() const;
+
+ // Liveness analysis support.
+ void InitializeLivenessAnalysis();
+ BitVector* ComputeLiveOut(BasicBlock* block);
+ void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
+ bool IsOutputRegisterOf(Instruction* instr, int index);
+ bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
+ void ProcessInstructions(BasicBlock* block, BitVector* live);
+ void MeetRegisterConstraints(BasicBlock* block);
+ void MeetConstraintsBetween(Instruction* first, Instruction* second,
+ int gap_index);
+ void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block);
+ void ResolvePhis(BasicBlock* block);
+
+ // Helper methods for building intervals.
+ InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
+ bool is_tagged);
+ LiveRange* LiveRangeFor(InstructionOperand* operand);
+ void Define(LifetimePosition position, InstructionOperand* operand,
+ InstructionOperand* hint);
+ void Use(LifetimePosition block_start, LifetimePosition position,
+ InstructionOperand* operand, InstructionOperand* hint);
+ void AddConstraintsGapMove(int index, InstructionOperand* from,
+ InstructionOperand* to);
+
+ // Helper methods for updating the life range lists.
+ void AddToActive(LiveRange* range);
+ void AddToInactive(LiveRange* range);
+ void AddToUnhandledSorted(LiveRange* range);
+ void AddToUnhandledUnsorted(LiveRange* range);
+ void SortUnhandled();
+ bool UnhandledIsSorted();
+ void ActiveToHandled(LiveRange* range);
+ void ActiveToInactive(LiveRange* range);
+ void InactiveToHandled(LiveRange* range);
+ void InactiveToActive(LiveRange* range);
+ void FreeSpillSlot(LiveRange* range);
+ InstructionOperand* TryReuseSpillSlot(LiveRange* range);
+
+ // Helper methods for allocating registers.
+ bool TryAllocateFreeReg(LiveRange* range);
+ void AllocateBlockedReg(LiveRange* range);
+
+ // Live range splitting helpers.
+
+ // Split the given range at the given position.
+ // If range starts at or after the given position then the
+ // original range is returned.
+ // Otherwise returns the live range that starts at pos and contains
+ // all uses from the original range that follow pos. Uses at pos will
+ // still be owned by the original range after splitting.
+ LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+
+ // Split the given range in a position from the interval [start, end].
+ LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
+ LifetimePosition end);
+
+ // Find a lifetime position in the interval [start, end] which
+ // is optimal for splitting: it is either header of the outermost
+ // loop covered by this interval or the latest possible position.
+ LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+ LifetimePosition end);
+
+ // Spill the given life range after position pos.
+ void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+ // Spill the given life range after position [start] and up to position [end].
+ void SpillBetween(LiveRange* range, LifetimePosition start,
+ LifetimePosition end);
+
+ // Spill the given life range after position [start] and up to position [end].
+ // Range is guaranteed to be spilled at least until position [until].
+ void SpillBetweenUntil(LiveRange* range, LifetimePosition start,
+ LifetimePosition until, LifetimePosition end);
+
+ void SplitAndSpillIntersecting(LiveRange* range);
+
+ // If we are trying to spill a range inside the loop try to
+ // hoist spill position out to the point just before the loop.
+ LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+ LifetimePosition pos);
+
+ void Spill(LiveRange* range);
+ bool IsBlockBoundary(LifetimePosition pos);
+
+ // Helper methods for resolving control flow.
+ void ResolveControlFlow(LiveRange* range, BasicBlock* block,
+ BasicBlock* pred);
+
+ inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+
+ // Return parallel move that should be used to connect ranges split at the
+ // given position.
+ ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+ // Return the block which contains give lifetime position.
+ BasicBlock* GetBlock(LifetimePosition pos);
+
+ // Helper methods for the fixed registers.
+ int RegisterCount() const;
+ static int FixedLiveRangeID(int index) { return -index - 1; }
+ static int FixedDoubleLiveRangeID(int index);
+ LiveRange* FixedLiveRangeFor(int index);
+ LiveRange* FixedDoubleLiveRangeFor(int index);
+ LiveRange* LiveRangeFor(int index);
+ GapInstruction* GetLastGap(BasicBlock* block);
+
+ const char* RegisterName(int allocation_index);
+
+ inline Instruction* InstructionAt(int index) {
+ return code()->InstructionAt(index);
+ }
+
+ Zone zone_;
+ InstructionSequence* code_;
+
+ // During liveness analysis keep a mapping from block id to live_in sets
+ // for blocks already analyzed.
+ ZoneList<BitVector*> live_in_sets_;
+
+ // Liveness analysis results.
+ ZoneList<LiveRange*> live_ranges_;
+
+ // Lists of live ranges
+ EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
+ fixed_live_ranges_;
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+ fixed_double_live_ranges_;
+ ZoneList<LiveRange*> unhandled_live_ranges_;
+ ZoneList<LiveRange*> active_live_ranges_;
+ ZoneList<LiveRange*> inactive_live_ranges_;
+ ZoneList<LiveRange*> reusable_slots_;
+
+ RegisterKind mode_;
+ int num_registers_;
+
+ BitVector* assigned_registers_;
+ BitVector* assigned_double_registers_;
+
+ // Indicates success or failure during register allocation.
+ bool allocation_ok_;
+
+#ifdef DEBUG
+ LifetimePosition allocation_finger_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+
+class RegisterAllocatorPhase : public CompilationPhase {
+ public:
+ RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator);
+ ~RegisterAllocatorPhase();
+
+ private:
+ RegisterAllocator* allocator_;
+ unsigned allocator_zone_start_allocation_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
new file mode 100644
index 000000000..bd5fb5f79
--- /dev/null
+++ b/deps/v8/src/compiler/representation-change.h
@@ -0,0 +1,411 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
+#define V8_COMPILER_REPRESENTATION_CHANGE_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The types and representations tracked during representation inference
+// and change insertion.
+// TODO(titzer): First, merge MachineType and RepType.
+// TODO(titzer): Second, Use the real type system instead of RepType.
+enum RepType {
+ // Representations.
+ rBit = 1 << 0,
+ rWord32 = 1 << 1,
+ rWord64 = 1 << 2,
+ rFloat64 = 1 << 3,
+ rTagged = 1 << 4,
+
+ // Types.
+ tBool = 1 << 5,
+ tInt32 = 1 << 6,
+ tUint32 = 1 << 7,
+ tInt64 = 1 << 8,
+ tUint64 = 1 << 9,
+ tNumber = 1 << 10,
+ tAny = 1 << 11
+};
+
+#define REP_TYPE_STRLEN 24
+
+typedef uint16_t RepTypeUnion;
+
+
+inline void RenderRepTypeUnion(char* buf, RepTypeUnion info) {
+ base::OS::SNPrintF(buf, REP_TYPE_STRLEN, "{%s%s%s%s%s %s%s%s%s%s%s%s}",
+ (info & rBit) ? "k" : " ", (info & rWord32) ? "w" : " ",
+ (info & rWord64) ? "q" : " ",
+ (info & rFloat64) ? "f" : " ",
+ (info & rTagged) ? "t" : " ", (info & tBool) ? "Z" : " ",
+ (info & tInt32) ? "I" : " ", (info & tUint32) ? "U" : " ",
+ (info & tInt64) ? "L" : " ", (info & tUint64) ? "J" : " ",
+ (info & tNumber) ? "N" : " ", (info & tAny) ? "*" : " ");
+}
+
+
+const RepTypeUnion rMask = rBit | rWord32 | rWord64 | rFloat64 | rTagged;
+const RepTypeUnion tMask =
+ tBool | tInt32 | tUint32 | tInt64 | tUint64 | tNumber | tAny;
+const RepType rPtr = kPointerSize == 4 ? rWord32 : rWord64;
+
+// Contains logic related to changing the representation of values for constants
+// and other nodes, as well as lowering Simplified->Machine operators.
+// Eagerly folds any representation changes for constants.
+class RepresentationChanger {
+ public:
+ RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
+ MachineOperatorBuilder* machine, Isolate* isolate)
+ : jsgraph_(jsgraph),
+ simplified_(simplified),
+ machine_(machine),
+ isolate_(isolate),
+ testing_type_errors_(false),
+ type_error_(false) {}
+
+
+ Node* GetRepresentationFor(Node* node, RepTypeUnion output_type,
+ RepTypeUnion use_type) {
+ if (!IsPowerOf2(output_type & rMask)) {
+ // There should be only one output representation.
+ return TypeError(node, output_type, use_type);
+ }
+ if ((use_type & rMask) == (output_type & rMask)) {
+ // Representations are the same. That's a no-op.
+ return node;
+ }
+ if (use_type & rTagged) {
+ return GetTaggedRepresentationFor(node, output_type);
+ } else if (use_type & rFloat64) {
+ return GetFloat64RepresentationFor(node, output_type);
+ } else if (use_type & rWord32) {
+ return GetWord32RepresentationFor(node, output_type, use_type & tUint32);
+ } else if (use_type & rBit) {
+ return GetBitRepresentationFor(node, output_type);
+ } else if (use_type & rWord64) {
+ return GetWord64RepresentationFor(node, output_type);
+ } else {
+ return node;
+ }
+ }
+
+ Node* GetTaggedRepresentationFor(Node* node, RepTypeUnion output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kHeapConstant:
+ return node; // No change necessary.
+ case IrOpcode::kInt32Constant:
+ if (output_type & tUint32) {
+ uint32_t value = ValueOf<uint32_t>(node->op());
+ return jsgraph()->Constant(static_cast<double>(value));
+ } else if (output_type & tInt32) {
+ int32_t value = ValueOf<int32_t>(node->op());
+ return jsgraph()->Constant(value);
+ } else if (output_type & rBit) {
+ return ValueOf<int32_t>(node->op()) == 0 ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
+ } else {
+ return TypeError(node, output_type, rTagged);
+ }
+ case IrOpcode::kFloat64Constant:
+ return jsgraph()->Constant(ValueOf<double>(node->op()));
+ default:
+ break;
+ }
+ // Select the correct X -> Tagged operator.
+ Operator* op;
+ if (output_type & rBit) {
+ op = simplified()->ChangeBitToBool();
+ } else if (output_type & rWord32) {
+ if (output_type & tUint32) {
+ op = simplified()->ChangeUint32ToTagged();
+ } else if (output_type & tInt32) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else {
+ return TypeError(node, output_type, rTagged);
+ }
+ } else if (output_type & rFloat64) {
+ op = simplified()->ChangeFloat64ToTagged();
+ } else {
+ return TypeError(node, output_type, rTagged);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+ }
+
+ Node* GetFloat64RepresentationFor(Node* node, RepTypeUnion output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float64Constant(ValueOf<double>(node->op()));
+ case IrOpcode::kInt32Constant:
+ if (output_type & tUint32) {
+ uint32_t value = ValueOf<uint32_t>(node->op());
+ return jsgraph()->Float64Constant(static_cast<double>(value));
+ } else {
+ int32_t value = ValueOf<int32_t>(node->op());
+ return jsgraph()->Float64Constant(value);
+ }
+ case IrOpcode::kFloat64Constant:
+ return node; // No change necessary.
+ default:
+ break;
+ }
+ // Select the correct X -> Float64 operator.
+ Operator* op;
+ if (output_type & rWord32) {
+ if (output_type & tUint32) {
+ op = machine()->ChangeUint32ToFloat64();
+ } else {
+ op = machine()->ChangeInt32ToFloat64();
+ }
+ } else if (output_type & rTagged) {
+ op = simplified()->ChangeTaggedToFloat64();
+ } else {
+ return TypeError(node, output_type, rFloat64);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+ }
+
+ Node* GetWord32RepresentationFor(Node* node, RepTypeUnion output_type,
+ bool use_unsigned) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return node; // No change necessary.
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant: {
+ double value = ValueOf<double>(node->op());
+ if (value < 0) {
+ DCHECK(IsInt32Double(value));
+ int32_t iv = static_cast<int32_t>(value);
+ return jsgraph()->Int32Constant(iv);
+ } else {
+ DCHECK(IsUint32Double(value));
+ int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
+ return jsgraph()->Int32Constant(iv);
+ }
+ }
+ default:
+ break;
+ }
+ // Select the correct X -> Word32 operator.
+ Operator* op = NULL;
+ if (output_type & rFloat64) {
+ if (output_type & tUint32 || use_unsigned) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else {
+ op = machine()->ChangeFloat64ToInt32();
+ }
+ } else if (output_type & rTagged) {
+ if (output_type & tUint32 || use_unsigned) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else {
+ op = simplified()->ChangeTaggedToInt32();
+ }
+ } else if (output_type & rBit) {
+ return node; // Sloppy comparison -> word32.
+ } else {
+ return TypeError(node, output_type, rWord32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+ }
+
+ Node* GetBitRepresentationFor(Node* node, RepTypeUnion output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant: {
+ int32_t value = ValueOf<int32_t>(node->op());
+ if (value == 0 || value == 1) return node;
+ return jsgraph()->OneConstant(); // value != 0
+ }
+ case IrOpcode::kHeapConstant: {
+ Handle<Object> handle = ValueOf<Handle<Object> >(node->op());
+ DCHECK(*handle == isolate()->heap()->true_value() ||
+ *handle == isolate()->heap()->false_value());
+ return jsgraph()->Int32Constant(
+ *handle == isolate()->heap()->true_value() ? 1 : 0);
+ }
+ default:
+ break;
+ }
+ // Select the correct X -> Bit operator.
+ Operator* op;
+ if (output_type & rWord32) {
+ return node; // No change necessary.
+ } else if (output_type & rWord64) {
+ return node; // TODO(titzer): No change necessary, on 64-bit.
+ } else if (output_type & rTagged) {
+ op = simplified()->ChangeBoolToBit();
+ } else {
+ return TypeError(node, output_type, rBit);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+ }
+
+ Node* GetWord64RepresentationFor(Node* node, RepTypeUnion output_type) {
+ if (output_type & rBit) {
+ return node; // Sloppy comparison -> word64
+ }
+ // Can't really convert Word64 to anything else. Purported to be internal.
+ return TypeError(node, output_type, rWord64);
+ }
+
+ static RepType TypeForMachineType(MachineType rep) {
+ // TODO(titzer): merge MachineType and RepType.
+ switch (rep) {
+ case kMachineWord8:
+ return rWord32;
+ case kMachineWord16:
+ return rWord32;
+ case kMachineWord32:
+ return rWord32;
+ case kMachineWord64:
+ return rWord64;
+ case kMachineFloat64:
+ return rFloat64;
+ case kMachineTagged:
+ return rTagged;
+ default:
+ UNREACHABLE();
+ return static_cast<RepType>(0);
+ }
+ }
+
+ Operator* Int32OperatorFor(IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Int32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Int32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+
+ Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Uint32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Uint32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+
+ Operator* Float64OperatorFor(IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Float64Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Float64Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Float64Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Float64Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Float64Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Float64Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Float64LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Float64LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+
+ RepType TypeForField(const FieldAccess& access) {
+ RepType tElement = static_cast<RepType>(0); // TODO(titzer)
+ RepType rElement = TypeForMachineType(access.representation);
+ return static_cast<RepType>(tElement | rElement);
+ }
+
+ RepType TypeForElement(const ElementAccess& access) {
+ RepType tElement = static_cast<RepType>(0); // TODO(titzer)
+ RepType rElement = TypeForMachineType(access.representation);
+ return static_cast<RepType>(tElement | rElement);
+ }
+
+ RepType TypeForBasePointer(const FieldAccess& access) {
+ if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged);
+ return kPointerSize == 8 ? rWord64 : rWord32;
+ }
+
+ RepType TypeForBasePointer(const ElementAccess& access) {
+ if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged);
+ return kPointerSize == 8 ? rWord64 : rWord32;
+ }
+
+ RepType TypeFromUpperBound(Type* type) {
+ if (type->Is(Type::None()))
+ return tAny; // TODO(titzer): should be an error
+ if (type->Is(Type::Signed32())) return tInt32;
+ if (type->Is(Type::Unsigned32())) return tUint32;
+ if (type->Is(Type::Number())) return tNumber;
+ if (type->Is(Type::Boolean())) return tBool;
+ return tAny;
+ }
+
+ private:
+ JSGraph* jsgraph_;
+ SimplifiedOperatorBuilder* simplified_;
+ MachineOperatorBuilder* machine_;
+ Isolate* isolate_;
+
+ friend class RepresentationChangerTester; // accesses the below fields.
+
+ bool testing_type_errors_; // If {true}, don't abort on a type error.
+ bool type_error_; // Set when a type error is detected.
+
+ Node* TypeError(Node* node, RepTypeUnion output_type, RepTypeUnion use) {
+ type_error_ = true;
+ if (!testing_type_errors_) {
+ char buf1[REP_TYPE_STRLEN];
+ char buf2[REP_TYPE_STRLEN];
+ RenderRepTypeUnion(buf1, output_type);
+ RenderRepTypeUnion(buf2, use);
+ V8_Fatal(__FILE__, __LINE__,
+ "RepresentationChangerError: node #%d:%s of rep"
+ "%s cannot be changed to rep%s",
+ node->id(), node->op()->mnemonic(), buf1, buf2);
+ }
+ return node;
+ }
+
+ JSGraph* jsgraph() { return jsgraph_; }
+ Isolate* isolate() { return isolate_; }
+ SimplifiedOperatorBuilder* simplified() { return simplified_; }
+ MachineOperatorBuilder* machine() { return machine_; }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_REPRESENTATION_CHANGE_H_
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
new file mode 100644
index 000000000..64766765b
--- /dev/null
+++ b/deps/v8/src/compiler/schedule.cc
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/schedule.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c) {
+ switch (c) {
+ case BasicBlockData::kNone:
+ return os << "none";
+ case BasicBlockData::kGoto:
+ return os << "goto";
+ case BasicBlockData::kBranch:
+ return os << "branch";
+ case BasicBlockData::kReturn:
+ return os << "return";
+ case BasicBlockData::kThrow:
+ return os << "throw";
+ case BasicBlockData::kCall:
+ return os << "call";
+ case BasicBlockData::kDeoptimize:
+ return os << "deoptimize";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const Schedule& s) {
+ // TODO(svenpanne) Const-correct the RPO stuff/iterators.
+ BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
+ for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
+ BasicBlock* block = *i;
+ os << "--- BLOCK B" << block->id();
+ if (block->PredecessorCount() != 0) os << " <- ";
+ BasicBlock::Predecessors predecessors = block->predecessors();
+ bool comma = false;
+ for (BasicBlock::Predecessors::iterator j = predecessors.begin();
+ j != predecessors.end(); ++j) {
+ if (comma) os << ", ";
+ comma = true;
+ os << "B" << (*j)->id();
+ }
+ os << " ---\n";
+ for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+ ++j) {
+ Node* node = *j;
+ os << " " << *node;
+ if (!NodeProperties::IsControl(node)) {
+ Bounds bounds = NodeProperties::GetBounds(node);
+ os << " : ";
+ bounds.lower->PrintTo(os);
+ if (!bounds.upper->Is(bounds.lower)) {
+ os << "..";
+ bounds.upper->PrintTo(os);
+ }
+ }
+ os << "\n";
+ }
+ BasicBlock::Control control = block->control_;
+ if (control != BasicBlock::kNone) {
+ os << " ";
+ if (block->control_input_ != NULL) {
+ os << *block->control_input_;
+ } else {
+ os << "Goto";
+ }
+ os << " -> ";
+ BasicBlock::Successors successors = block->successors();
+ comma = false;
+ for (BasicBlock::Successors::iterator j = successors.begin();
+ j != successors.end(); ++j) {
+ if (comma) os << ", ";
+ comma = true;
+ os << "B" << (*j)->id();
+ }
+ os << "\n";
+ }
+ }
+ return os;
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
new file mode 100644
index 000000000..e730f3324
--- /dev/null
+++ b/deps/v8/src/compiler/schedule.h
@@ -0,0 +1,335 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULE_H_
+#define V8_COMPILER_SCHEDULE_H_
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Graph;
+class ConstructScheduleData;
+class CodeGenerator; // Because of a namespace bug in clang.
+
+class BasicBlockData {
+ public:
+ // Possible control nodes that can end a block.
+ enum Control {
+ kNone, // Control not initialized yet.
+ kGoto, // Goto a single successor block.
+ kBranch, // Branch if true to first successor, otherwise second.
+ kReturn, // Return a value from this method.
+ kThrow, // Throw an exception.
+ kCall, // Call to a possibly deoptimizing or throwing function.
+ kDeoptimize // Deoptimize.
+ };
+
+ int32_t rpo_number_; // special RPO number of the block.
+ BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
+ // NULL if none. For loop headers, this points to
+ // enclosing loop header.
+ int32_t loop_depth_; // loop nesting, 0 is top-level
+ int32_t loop_end_; // end of the loop, if this block is a loop header.
+ int32_t code_start_; // start index of arch-specific code.
+ int32_t code_end_; // end index of arch-specific code.
+ bool deferred_; // {true} if this block is considered the slow
+ // path.
+ Control control_; // Control at the end of the block.
+ Node* control_input_; // Input value for control.
+ NodeVector nodes_; // nodes of this block in forward order.
+
+ explicit BasicBlockData(Zone* zone)
+ : rpo_number_(-1),
+ loop_header_(NULL),
+ loop_depth_(0),
+ loop_end_(-1),
+ code_start_(-1),
+ code_end_(-1),
+ deferred_(false),
+ control_(kNone),
+ control_input_(NULL),
+ nodes_(NodeVector::allocator_type(zone)) {}
+
+ inline bool IsLoopHeader() const { return loop_end_ >= 0; }
+ inline bool LoopContains(BasicBlockData* block) const {
+ // RPO numbers must be initialized.
+ DCHECK(rpo_number_ >= 0);
+ DCHECK(block->rpo_number_ >= 0);
+ if (loop_end_ < 0) return false; // This is not a loop.
+ return block->rpo_number_ >= rpo_number_ && block->rpo_number_ < loop_end_;
+ }
+ int first_instruction_index() {
+ DCHECK(code_start_ >= 0);
+ DCHECK(code_end_ > 0);
+ DCHECK(code_end_ >= code_start_);
+ return code_start_;
+ }
+ int last_instruction_index() {
+ DCHECK(code_start_ >= 0);
+ DCHECK(code_end_ > 0);
+ DCHECK(code_end_ >= code_start_);
+ return code_end_ - 1;
+ }
+};
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c);
+
+// A basic block contains an ordered list of nodes and ends with a control
+// node. Note that if a basic block has phis, then all phis must appear as the
+// first nodes in the block.
+class BasicBlock V8_FINAL : public GenericNode<BasicBlockData, BasicBlock> {
+ public:
+ BasicBlock(GenericGraphBase* graph, int input_count)
+ : GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {}
+
+ typedef Uses Successors;
+ typedef Inputs Predecessors;
+
+ Successors successors() { return static_cast<Successors>(uses()); }
+ Predecessors predecessors() { return static_cast<Predecessors>(inputs()); }
+
+ int PredecessorCount() { return InputCount(); }
+ BasicBlock* PredecessorAt(int index) { return InputAt(index); }
+
+ int SuccessorCount() { return UseCount(); }
+ BasicBlock* SuccessorAt(int index) { return UseAt(index); }
+
+ int PredecessorIndexOf(BasicBlock* predecessor) {
+ BasicBlock::Predecessors predecessors = this->predecessors();
+ for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+ i != predecessors.end(); ++i) {
+ if (*i == predecessor) return i.index();
+ }
+ return -1;
+ }
+
+ inline BasicBlock* loop_header() {
+ return static_cast<BasicBlock*>(loop_header_);
+ }
+ inline BasicBlock* ContainingLoop() {
+ if (IsLoopHeader()) return this;
+ return static_cast<BasicBlock*>(loop_header_);
+ }
+
+ typedef NodeVector::iterator iterator;
+ iterator begin() { return nodes_.begin(); }
+ iterator end() { return nodes_.end(); }
+
+ typedef NodeVector::const_iterator const_iterator;
+ const_iterator begin() const { return nodes_.begin(); }
+ const_iterator end() const { return nodes_.end(); }
+
+ typedef NodeVector::reverse_iterator reverse_iterator;
+ reverse_iterator rbegin() { return nodes_.rbegin(); }
+ reverse_iterator rend() { return nodes_.rend(); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicBlock);
+};
+
+typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock>
+ NullBasicBlockVisitor;
+
+typedef zone_allocator<BasicBlock*> BasicBlockPtrZoneAllocator;
+typedef std::vector<BasicBlock*, BasicBlockPtrZoneAllocator> BasicBlockVector;
+typedef BasicBlockVector::iterator BasicBlockVectorIter;
+typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
+
+// A schedule represents the result of assigning nodes to basic blocks
+// and ordering them within basic blocks. Prior to computing a schedule,
+// a graph has no notion of control flow ordering other than that induced
+// by the graph's dependencies. A schedule is required to generate code.
+class Schedule : public GenericGraph<BasicBlock> {
+ public:
+ explicit Schedule(Zone* zone)
+ : GenericGraph<BasicBlock>(zone),
+ zone_(zone),
+ all_blocks_(BasicBlockVector::allocator_type(zone)),
+ nodeid_to_block_(BasicBlockVector::allocator_type(zone)),
+ rpo_order_(BasicBlockVector::allocator_type(zone)),
+ immediate_dominator_(BasicBlockVector::allocator_type(zone)) {
+ NewBasicBlock(); // entry.
+ NewBasicBlock(); // exit.
+ SetStart(entry());
+ SetEnd(exit());
+ }
+
+ // TODO(titzer): rewrite users of these methods to use start() and end().
+ BasicBlock* entry() const { return all_blocks_[0]; } // Return entry block.
+ BasicBlock* exit() const { return all_blocks_[1]; } // Return exit block.
+
+ // Return the block which contains {node}, if any.
+ BasicBlock* block(Node* node) const {
+ if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
+ return nodeid_to_block_[node->id()];
+ }
+ return NULL;
+ }
+
+ BasicBlock* dominator(BasicBlock* block) {
+ return immediate_dominator_[block->id()];
+ }
+
+ bool IsScheduled(Node* node) {
+ int length = static_cast<int>(nodeid_to_block_.size());
+ if (node->id() >= length) return false;
+ return nodeid_to_block_[node->id()] != NULL;
+ }
+
+ BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; }
+
+ int BasicBlockCount() const { return NodeCount(); }
+ int RpoBlockCount() const { return static_cast<int>(rpo_order_.size()); }
+
+ typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks;
+
+ // Return a list of all the blocks in the schedule, in arbitrary order.
+ BasicBlocks all_blocks() { return BasicBlocks(&all_blocks_); }
+
+ // Check if nodes {a} and {b} are in the same block.
+ inline bool SameBasicBlock(Node* a, Node* b) const {
+ BasicBlock* block = this->block(a);
+ return block != NULL && block == this->block(b);
+ }
+
+ // BasicBlock building: create a new block.
+ inline BasicBlock* NewBasicBlock() {
+ BasicBlock* block =
+ BasicBlock::New(this, 0, static_cast<BasicBlock**>(NULL));
+ all_blocks_.push_back(block);
+ return block;
+ }
+
+ // BasicBlock building: records that a node will later be added to a block but
+ // doesn't actually add the node to the block.
+ inline void PlanNode(BasicBlock* block, Node* node) {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Planning node %d for future add to block %d\n", node->id(),
+ block->id());
+ }
+ DCHECK(this->block(node) == NULL);
+ SetBlockForNode(block, node);
+ }
+
+ // BasicBlock building: add a node to the end of the block.
+ inline void AddNode(BasicBlock* block, Node* node) {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Adding node %d to block %d\n", node->id(), block->id());
+ }
+ DCHECK(this->block(node) == NULL || this->block(node) == block);
+ block->nodes_.push_back(node);
+ SetBlockForNode(block, node);
+ }
+
+ // BasicBlock building: add a goto to the end of {block}.
+ void AddGoto(BasicBlock* block, BasicBlock* succ) {
+ DCHECK(block->control_ == BasicBlock::kNone);
+ block->control_ = BasicBlock::kGoto;
+ AddSuccessor(block, succ);
+ }
+
+ // BasicBlock building: add a (branching) call at the end of {block}.
+ void AddCall(BasicBlock* block, Node* call, BasicBlock* cont_block,
+ BasicBlock* deopt_block) {
+ DCHECK(block->control_ == BasicBlock::kNone);
+ DCHECK(call->opcode() == IrOpcode::kCall);
+ block->control_ = BasicBlock::kCall;
+ // Insert the deopt block first so that the RPO order builder picks
+ // it first (and thus it ends up late in the RPO order).
+ AddSuccessor(block, deopt_block);
+ AddSuccessor(block, cont_block);
+ SetControlInput(block, call);
+ }
+
+ // BasicBlock building: add a branch at the end of {block}.
+ void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
+ BasicBlock* fblock) {
+ DCHECK(block->control_ == BasicBlock::kNone);
+ DCHECK(branch->opcode() == IrOpcode::kBranch);
+ block->control_ = BasicBlock::kBranch;
+ AddSuccessor(block, tblock);
+ AddSuccessor(block, fblock);
+ SetControlInput(block, branch);
+ }
+
+ // BasicBlock building: add a return at the end of {block}.
+ void AddReturn(BasicBlock* block, Node* input) {
+ // TODO(titzer): require a Return node here.
+ DCHECK(block->control_ == BasicBlock::kNone);
+ block->control_ = BasicBlock::kReturn;
+ SetControlInput(block, input);
+ if (block != exit()) AddSuccessor(block, exit());
+ }
+
+ // BasicBlock building: add a throw at the end of {block}.
+ void AddThrow(BasicBlock* block, Node* input) {
+ DCHECK(block->control_ == BasicBlock::kNone);
+ block->control_ = BasicBlock::kThrow;
+ SetControlInput(block, input);
+ if (block != exit()) AddSuccessor(block, exit());
+ }
+
+ // BasicBlock building: add a deopt at the end of {block}.
+ void AddDeoptimize(BasicBlock* block, Node* state) {
+ DCHECK(block->control_ == BasicBlock::kNone);
+ block->control_ = BasicBlock::kDeoptimize;
+ SetControlInput(block, state);
+ block->deferred_ = true; // By default, consider deopts the slow path.
+ if (block != exit()) AddSuccessor(block, exit());
+ }
+
+ friend class Scheduler;
+ friend class CodeGenerator;
+
+ void AddSuccessor(BasicBlock* block, BasicBlock* succ) {
+ succ->AppendInput(zone_, block);
+ }
+
+ BasicBlockVector* rpo_order() { return &rpo_order_; }
+
+ private:
+ friend class ScheduleVisualizer;
+
+ void SetControlInput(BasicBlock* block, Node* node) {
+ block->control_input_ = node;
+ SetBlockForNode(block, node);
+ }
+
+ void SetBlockForNode(BasicBlock* block, Node* node) {
+ int length = static_cast<int>(nodeid_to_block_.size());
+ if (node->id() >= length) {
+ nodeid_to_block_.resize(node->id() + 1);
+ }
+ nodeid_to_block_[node->id()] = block;
+ }
+
+ Zone* zone_;
+ BasicBlockVector all_blocks_; // All basic blocks in the schedule.
+ BasicBlockVector nodeid_to_block_; // Map from node to containing block.
+ BasicBlockVector rpo_order_; // Reverse-post-order block list.
+ BasicBlockVector immediate_dominator_; // Maps to a block's immediate
+ // dominator, indexed by block
+ // id.
+};
+
+OStream& operator<<(OStream& os, const Schedule& s);
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_SCHEDULE_H_
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
new file mode 100644
index 000000000..6a4009169
--- /dev/null
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -0,0 +1,1048 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/scheduler.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+ : graph_(graph),
+ schedule_(schedule),
+ branches_(NodeVector::allocator_type(zone)),
+ calls_(NodeVector::allocator_type(zone)),
+ deopts_(NodeVector::allocator_type(zone)),
+ returns_(NodeVector::allocator_type(zone)),
+ loops_and_merges_(NodeVector::allocator_type(zone)),
+ node_block_placement_(BasicBlockVector::allocator_type(zone)),
+ unscheduled_uses_(IntVector::allocator_type(zone)),
+ scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
+ schedule_root_nodes_(NodeVector::allocator_type(zone)),
+ schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
+
+
+Schedule* Scheduler::ComputeSchedule(Graph* graph) {
+ Zone tmp_zone(graph->zone()->isolate());
+ Schedule* schedule = new (graph->zone()) Schedule(graph->zone());
+ Scheduler scheduler(&tmp_zone, graph, schedule);
+
+ schedule->AddNode(schedule->end(), graph->end());
+
+ scheduler.PrepareAuxiliaryNodeData();
+ scheduler.CreateBlocks();
+ scheduler.WireBlocks();
+ scheduler.PrepareAuxiliaryBlockData();
+
+ Scheduler::ComputeSpecialRPO(schedule);
+ scheduler.GenerateImmediateDominatorTree();
+
+ scheduler.PrepareUses();
+ scheduler.ScheduleEarly();
+ scheduler.ScheduleLate();
+
+ return schedule;
+}
+
+
+class CreateBlockVisitor : public NullNodeVisitor {
+ public:
+ explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {}
+
+ GenericGraphVisit::Control Post(Node* node) {
+ Schedule* schedule = scheduler_->schedule_;
+ switch (node->opcode()) {
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kContinuation:
+ case IrOpcode::kLazyDeoptimization: {
+ BasicBlock* block = schedule->NewBasicBlock();
+ schedule->AddNode(block, node);
+ break;
+ }
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge: {
+ BasicBlock* block = schedule->NewBasicBlock();
+ schedule->AddNode(block, node);
+ scheduler_->loops_and_merges_.push_back(node);
+ break;
+ }
+ case IrOpcode::kBranch: {
+ scheduler_->branches_.push_back(node);
+ break;
+ }
+ case IrOpcode::kDeoptimize: {
+ scheduler_->deopts_.push_back(node);
+ break;
+ }
+ case IrOpcode::kCall: {
+ if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
+ scheduler_->calls_.push_back(node);
+ }
+ break;
+ }
+ case IrOpcode::kReturn:
+ scheduler_->returns_.push_back(node);
+ break;
+ default:
+ break;
+ }
+
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ private:
+ Scheduler* scheduler_;
+};
+
+
+void Scheduler::CreateBlocks() {
+ CreateBlockVisitor create_blocks(this);
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("---------------- CREATING BLOCKS ------------------\n");
+ }
+ schedule_->AddNode(schedule_->entry(), graph_->start());
+ graph_->VisitNodeInputsFromEnd(&create_blocks);
+}
+
+
+void Scheduler::WireBlocks() {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("----------------- WIRING BLOCKS -------------------\n");
+ }
+ AddSuccessorsForBranches();
+ AddSuccessorsForReturns();
+ AddSuccessorsForCalls();
+ AddSuccessorsForDeopts();
+ AddPredecessorsForLoopsAndMerges();
+ // TODO(danno): Handle Throw, et al.
+}
+
+
+void Scheduler::PrepareAuxiliaryNodeData() {
+ unscheduled_uses_.resize(graph_->NodeCount(), 0);
+ schedule_early_rpo_index_.resize(graph_->NodeCount(), 0);
+}
+
+
+void Scheduler::PrepareAuxiliaryBlockData() {
+ Zone* zone = schedule_->zone();
+ scheduled_nodes_.resize(schedule_->BasicBlockCount(),
+ NodeVector(NodeVector::allocator_type(zone)));
+ schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
+}
+
+
+void Scheduler::AddPredecessorsForLoopsAndMerges() {
+ for (NodeVectorIter i = loops_and_merges_.begin();
+ i != loops_and_merges_.end(); ++i) {
+ Node* merge_or_loop = *i;
+ BasicBlock* block = schedule_->block(merge_or_loop);
+ DCHECK(block != NULL);
+ // For all of the merge's control inputs, add a goto at the end to the
+ // merge's basic block.
+ for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) {
+ if (OperatorProperties::IsBasicBlockBegin((*i)->op())) {
+ BasicBlock* predecessor_block = schedule_->block(*j);
+ if ((*j)->opcode() != IrOpcode::kReturn &&
+ (*j)->opcode() != IrOpcode::kDeoptimize) {
+ DCHECK(predecessor_block != NULL);
+ if (FLAG_trace_turbo_scheduler) {
+ IrOpcode::Value opcode = (*i)->opcode();
+ PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(),
+ IrOpcode::Mnemonic(opcode), predecessor_block->id(),
+ block->id());
+ }
+ schedule_->AddGoto(predecessor_block, block);
+ }
+ }
+ }
+ }
+}
+
+
+void Scheduler::AddSuccessorsForCalls() {
+ for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) {
+ Node* call = *i;
+ DCHECK(call->opcode() == IrOpcode::kCall);
+ DCHECK(OperatorProperties::CanLazilyDeoptimize(call->op()));
+
+ Node* lazy_deopt_node = NULL;
+ Node* cont_node = NULL;
+ // Find the continuation and lazy-deopt nodes among the uses.
+ for (UseIter use_iter = call->uses().begin();
+ use_iter != call->uses().end(); ++use_iter) {
+ switch ((*use_iter)->opcode()) {
+ case IrOpcode::kContinuation: {
+ DCHECK(cont_node == NULL);
+ cont_node = *use_iter;
+ break;
+ }
+ case IrOpcode::kLazyDeoptimization: {
+ DCHECK(lazy_deopt_node == NULL);
+ lazy_deopt_node = *use_iter;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ DCHECK(lazy_deopt_node != NULL);
+ DCHECK(cont_node != NULL);
+ BasicBlock* cont_successor_block = schedule_->block(cont_node);
+ BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node);
+ Node* call_block_node = NodeProperties::GetControlInput(call);
+ BasicBlock* call_block = schedule_->block(call_block_node);
+ if (FLAG_trace_turbo_scheduler) {
+ IrOpcode::Value opcode = call->opcode();
+ PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
+ IrOpcode::Mnemonic(opcode), call_block->id(),
+ cont_successor_block->id());
+ PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
+ IrOpcode::Mnemonic(opcode), call_block->id(),
+ deopt_successor_block->id());
+ }
+ schedule_->AddCall(call_block, call, cont_successor_block,
+ deopt_successor_block);
+ }
+}
+
+
+void Scheduler::AddSuccessorsForDeopts() {
+ for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) {
+ Node* deopt_block_node = NodeProperties::GetControlInput(*i);
+ BasicBlock* deopt_block = schedule_->block(deopt_block_node);
+ DCHECK(deopt_block != NULL);
+ if (FLAG_trace_turbo_scheduler) {
+ IrOpcode::Value opcode = (*i)->opcode();
+ PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
+ IrOpcode::Mnemonic(opcode), deopt_block->id());
+ }
+ schedule_->AddDeoptimize(deopt_block, *i);
+ }
+}
+
+
+void Scheduler::AddSuccessorsForBranches() {
+ for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) {
+ Node* branch = *i;
+ DCHECK(branch->opcode() == IrOpcode::kBranch);
+ Node* branch_block_node = NodeProperties::GetControlInput(branch);
+ BasicBlock* branch_block = schedule_->block(branch_block_node);
+ DCHECK(branch_block != NULL);
+ UseIter use_iter = branch->uses().begin();
+ Node* first_successor = *use_iter;
+ ++use_iter;
+ DCHECK(use_iter != branch->uses().end());
+ Node* second_successor = *use_iter;
+ DCHECK(++use_iter == branch->uses().end());
+ Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
+ ? first_successor
+ : second_successor;
+ Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
+ ? second_successor
+ : first_successor;
+ DCHECK(true_successor_node->opcode() == IrOpcode::kIfTrue);
+ DCHECK(false_successor_node->opcode() == IrOpcode::kIfFalse);
+ BasicBlock* true_successor_block = schedule_->block(true_successor_node);
+ BasicBlock* false_successor_block = schedule_->block(false_successor_node);
+ DCHECK(true_successor_block != NULL);
+ DCHECK(false_successor_block != NULL);
+ if (FLAG_trace_turbo_scheduler) {
+ IrOpcode::Value opcode = branch->opcode();
+ PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
+ IrOpcode::Mnemonic(opcode), branch_block->id(),
+ true_successor_block->id());
+ PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
+ IrOpcode::Mnemonic(opcode), branch_block->id(),
+ false_successor_block->id());
+ }
+ schedule_->AddBranch(branch_block, branch, true_successor_block,
+ false_successor_block);
+ }
+}
+
+
+void Scheduler::AddSuccessorsForReturns() {
+ for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) {
+ Node* return_block_node = NodeProperties::GetControlInput(*i);
+ BasicBlock* return_block = schedule_->block(return_block_node);
+ DCHECK(return_block != NULL);
+ if (FLAG_trace_turbo_scheduler) {
+ IrOpcode::Value opcode = (*i)->opcode();
+ PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
+ IrOpcode::Mnemonic(opcode), return_block->id());
+ }
+ schedule_->AddReturn(return_block, *i);
+ }
+}
+
+
+BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+ while (b1 != b2) {
+ int b1_rpo = GetRPONumber(b1);
+ int b2_rpo = GetRPONumber(b2);
+ DCHECK(b1_rpo != b2_rpo);
+ if (b1_rpo < b2_rpo) {
+ b2 = schedule_->immediate_dominator_[b2->id()];
+ } else {
+ b1 = schedule_->immediate_dominator_[b1->id()];
+ }
+ }
+ return b1;
+}
+
+
+void Scheduler::GenerateImmediateDominatorTree() {
+ // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
+ // if this becomes really slow.
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
+ }
+ for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
+ BasicBlock* current_rpo = schedule_->rpo_order_[i];
+ if (current_rpo != schedule_->entry()) {
+ BasicBlock::Predecessors::iterator current_pred =
+ current_rpo->predecessors().begin();
+ BasicBlock::Predecessors::iterator end =
+ current_rpo->predecessors().end();
+ DCHECK(current_pred != end);
+ BasicBlock* dominator = *current_pred;
+ ++current_pred;
+ // For multiple predecessors, walk up the rpo ordering until a common
+ // dominator is found.
+ int current_rpo_pos = GetRPONumber(current_rpo);
+ while (current_pred != end) {
+ // Don't examine backwards edges
+ BasicBlock* pred = *current_pred;
+ if (GetRPONumber(pred) < current_rpo_pos) {
+ dominator = GetCommonDominator(dominator, *current_pred);
+ }
+ ++current_pred;
+ }
+ schedule_->immediate_dominator_[current_rpo->id()] = dominator;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
+ }
+ }
+ }
+}
+
+
+class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
+ public:
+ explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
+ : has_changed_rpo_constraints_(true),
+ scheduler_(scheduler),
+ schedule_(scheduler->schedule_) {}
+
+ GenericGraphVisit::Control Pre(Node* node) {
+ int id = node->id();
+ int max_rpo = 0;
+ // Fixed nodes already know their schedule early position.
+ if (IsFixedNode(node)) {
+ BasicBlock* block = schedule_->block(node);
+ DCHECK(block != NULL);
+ max_rpo = block->rpo_number_;
+ if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
+ has_changed_rpo_constraints_ = true;
+ }
+ scheduler_->schedule_early_rpo_index_[id] = max_rpo;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
+ }
+ }
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ GenericGraphVisit::Control Post(Node* node) {
+ int id = node->id();
+ int max_rpo = 0;
+ // Otherwise, the minimum rpo for the node is the max of all of the inputs.
+ if (!IsFixedNode(node)) {
+ DCHECK(!OperatorProperties::IsBasicBlockBegin(node->op()));
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end();
+ ++i) {
+ int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()];
+ if (control_rpo > max_rpo) {
+ max_rpo = control_rpo;
+ }
+ }
+ if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
+ has_changed_rpo_constraints_ = true;
+ }
+ scheduler_->schedule_early_rpo_index_[id] = max_rpo;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo);
+ }
+ }
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ static bool IsFixedNode(Node* node) {
+ return OperatorProperties::HasFixedSchedulePosition(node->op()) ||
+ !OperatorProperties::CanBeScheduled(node->op());
+ }
+
+ // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
+ // rewritten to use a pre-order traversal from the start instead.
+ bool has_changed_rpo_constraints_;
+
+ private:
+ Scheduler* scheduler_;
+ Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleEarly() {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("------------------- SCHEDULE EARLY ----------------\n");
+ }
+
+ int fixpoint_count = 0;
+ ScheduleEarlyNodeVisitor visitor(this);
+ while (visitor.has_changed_rpo_constraints_) {
+ visitor.has_changed_rpo_constraints_ = false;
+ graph_->VisitNodeInputsFromEnd(&visitor);
+ fixpoint_count++;
+ }
+
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count);
+ }
+}
+
+
+class PrepareUsesVisitor : public NullNodeVisitor {
+ public:
+ explicit PrepareUsesVisitor(Scheduler* scheduler)
+ : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+
+ GenericGraphVisit::Control Pre(Node* node) {
+ // Some nodes must be scheduled explicitly to ensure they are in exactly the
+ // right place; it's a convenient place during the preparation of use counts
+ // to schedule them.
+ if (!schedule_->IsScheduled(node) &&
+ OperatorProperties::HasFixedSchedulePosition(node->op())) {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Fixed position node %d is unscheduled, scheduling now\n",
+ node->id());
+ }
+ IrOpcode::Value opcode = node->opcode();
+ BasicBlock* block =
+ opcode == IrOpcode::kParameter
+ ? schedule_->entry()
+ : schedule_->block(NodeProperties::GetControlInput(node));
+ DCHECK(block != NULL);
+ schedule_->AddNode(block, node);
+ }
+
+ if (OperatorProperties::IsScheduleRoot(node->op())) {
+ scheduler_->schedule_root_nodes_.push_back(node);
+ }
+
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ void PostEdge(Node* from, int index, Node* to) {
+ // If the edge is from an unscheduled node, then tally it in the use count
+ // for all of its inputs. The same criterion will be used in ScheduleLate
+ // for decrementing use counts.
+ if (!schedule_->IsScheduled(from) &&
+ OperatorProperties::CanBeScheduled(from->op())) {
+ DCHECK(!OperatorProperties::HasFixedSchedulePosition(from->op()));
+ ++scheduler_->unscheduled_uses_[to->id()];
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Incrementing uses of node %d from %d to %d\n", to->id(),
+ from->id(), scheduler_->unscheduled_uses_[to->id()]);
+ }
+ }
+ }
+
+ private:
+ Scheduler* scheduler_;
+ Schedule* schedule_;
+};
+
+
+void Scheduler::PrepareUses() {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("------------------- PREPARE USES ------------------\n");
+ }
+ // Count the uses of every node, it will be used to ensure that all of a
+ // node's uses are scheduled before the node itself.
+ PrepareUsesVisitor prepare_uses(this);
+ graph_->VisitNodeInputsFromEnd(&prepare_uses);
+}
+
+
+class ScheduleLateNodeVisitor : public NullNodeVisitor {
+ public:
+ explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
+ : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+
+ GenericGraphVisit::Control Pre(Node* node) {
+ // Don't schedule nodes that cannot be scheduled or are already scheduled.
+ if (!OperatorProperties::CanBeScheduled(node->op()) ||
+ schedule_->IsScheduled(node)) {
+ return GenericGraphVisit::CONTINUE;
+ }
+ DCHECK(!OperatorProperties::HasFixedSchedulePosition(node->op()));
+
+ // If all the uses of a node have been scheduled, then the node itself can
+ // be scheduled.
+ bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(),
+ eligible ? "true" : "false");
+ }
+ if (!eligible) return GenericGraphVisit::DEFER;
+
+ // Determine the dominating block for all of the uses of this node. It is
+ // the latest block that this node can be scheduled in.
+ BasicBlock* block = NULL;
+ for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
+ ++i) {
+ BasicBlock* use_block = GetBlockForUse(i.edge());
+ block = block == NULL ? use_block : use_block == NULL
+ ? block
+ : scheduler_->GetCommonDominator(
+ block, use_block);
+ }
+ DCHECK(block != NULL);
+
+ int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()];
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF(
+ "Schedule late conservative for node %d is block %d at "
+ "loop depth %d, min rpo = %d\n",
+ node->id(), block->id(), block->loop_depth_, min_rpo);
+ }
+ // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
+ // into enlcosing loop pre-headers until they would preceed their
+ // ScheduleEarly position.
+ BasicBlock* hoist_block = block;
+ while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
+ if (hoist_block->loop_depth_ < block->loop_depth_) {
+ block = hoist_block;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Hoisting node %d to block %d\n", node->id(), block->id());
+ }
+ }
+ // Try to hoist to the pre-header of the loop header.
+ hoist_block = hoist_block->loop_header();
+ if (hoist_block != NULL) {
+ BasicBlock* pre_header = schedule_->dominator(hoist_block);
+ DCHECK(pre_header == NULL ||
+ *hoist_block->predecessors().begin() == pre_header);
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF(
+ "Try hoist to pre-header block %d of loop header block %d,"
+ " depth would be %d\n",
+ pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
+ }
+ hoist_block = pre_header;
+ }
+ }
+
+ ScheduleNode(block, node);
+
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ private:
+ BasicBlock* GetBlockForUse(Node::Edge edge) {
+ Node* use = edge.from();
+ IrOpcode::Value opcode = use->opcode();
+ // If the use is a phi, forward through the the phi to the basic block
+ // corresponding to the phi's input.
+ if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ int index = edge.index();
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Use %d is input %d to a phi\n", use->id(), index);
+ }
+ use = NodeProperties::GetControlInput(use, 0);
+ opcode = use->opcode();
+ DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+ use = NodeProperties::GetControlInput(use, index);
+ }
+ BasicBlock* result = schedule_->block(use);
+ if (result == NULL) return NULL;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Must dominate use %d in block %d\n", use->id(), result->id());
+ }
+ return result;
+ }
+
+ bool IsNodeEligible(Node* node) {
+ bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
+ return eligible;
+ }
+
+ void ScheduleNode(BasicBlock* block, Node* node) {
+ schedule_->PlanNode(block, node);
+ scheduler_->scheduled_nodes_[block->id()].push_back(node);
+
+ // Reduce the use count of the node's inputs to potentially make them
+ // scheduable.
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+ DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0);
+ --scheduler_->unscheduled_uses_[(*i)->id()];
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Decrementing use count for node %d from node %d (now %d)\n",
+ (*i)->id(), i.edge().from()->id(),
+ scheduler_->unscheduled_uses_[(*i)->id()]);
+ if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) {
+ PrintF("node %d is now eligible for scheduling\n", (*i)->id());
+ }
+ }
+ }
+ }
+
+ Scheduler* scheduler_;
+ Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleLate() {
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("------------------- SCHEDULE LATE -----------------\n");
+ }
+
+ // Schedule: Places nodes in dominator block of all their uses.
+ ScheduleLateNodeVisitor schedule_late_visitor(this);
+
+ for (NodeVectorIter i = schedule_root_nodes_.begin();
+ i != schedule_root_nodes_.end(); ++i) {
+ GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
+ NodeInputIterationTraits<Node> >(
+ graph_, *i, &schedule_late_visitor);
+ }
+
+ // Add collected nodes for basic blocks to their blocks in the right order.
+ int block_num = 0;
+ for (NodeVectorVectorIter i = scheduled_nodes_.begin();
+ i != scheduled_nodes_.end(); ++i) {
+ for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
+ schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
+ }
+ block_num++;
+ }
+}
+
+
+// Numbering for BasicBlockData.rpo_number_ for this block traversal:
+static const int kBlockOnStack = -2;
+static const int kBlockVisited1 = -3;
+static const int kBlockVisited2 = -4;
+static const int kBlockUnvisited1 = -1;
+static const int kBlockUnvisited2 = kBlockVisited1;
+
+struct SpecialRPOStackFrame {
+ BasicBlock* block;
+ int index;
+};
+
+struct BlockList {
+ BasicBlock* block;
+ BlockList* next;
+
+ BlockList* Add(Zone* zone, BasicBlock* b) {
+ BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
+ list->block = b;
+ list->next = this;
+ return list;
+ }
+
+ void Serialize(BasicBlockVector* final_order) {
+ for (BlockList* l = this; l != NULL; l = l->next) {
+ l->block->rpo_number_ = static_cast<int>(final_order->size());
+ final_order->push_back(l->block);
+ }
+ }
+};
+
+struct LoopInfo {
+ BasicBlock* header;
+ ZoneList<BasicBlock*>* outgoing;
+ BitVector* members;
+ LoopInfo* prev;
+ BlockList* end;
+ BlockList* start;
+
+ void AddOutgoing(Zone* zone, BasicBlock* block) {
+ if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+ outgoing->Add(block, zone);
+ }
+};
+
+
+static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
+ int unvisited) {
+ if (child->rpo_number_ == unvisited) {
+ stack[depth].block = child;
+ stack[depth].index = 0;
+ child->rpo_number_ = kBlockOnStack;
+ return depth + 1;
+ }
+ return depth;
+}
+
+
+// Computes loop membership from the backedges of the control flow graph.
+static LoopInfo* ComputeLoopInfo(
+ Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
+ ZoneList<std::pair<BasicBlock*, int> >* backedges) {
+ LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
+ memset(loops, 0, num_loops * sizeof(LoopInfo));
+
+ // Compute loop membership starting from backedges.
+ // O(max(loop_depth) * max(|loop|)
+ for (int i = 0; i < backedges->length(); i++) {
+ BasicBlock* member = backedges->at(i).first;
+ BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
+ int loop_num = header->loop_end_;
+ if (loops[loop_num].header == NULL) {
+ loops[loop_num].header = header;
+ loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
+ }
+
+ int queue_length = 0;
+ if (member != header) {
+ // As long as the header doesn't have a backedge to itself,
+ // Push the member onto the queue and process its predecessors.
+ if (!loops[loop_num].members->Contains(member->id())) {
+ loops[loop_num].members->Add(member->id());
+ }
+ queue[queue_length++].block = member;
+ }
+
+ // Propagate loop membership backwards. All predecessors of M up to the
+ // loop header H are members of the loop too. O(|blocks between M and H|).
+ while (queue_length > 0) {
+ BasicBlock* block = queue[--queue_length].block;
+ for (int i = 0; i < block->PredecessorCount(); i++) {
+ BasicBlock* pred = block->PredecessorAt(i);
+ if (pred != header) {
+ if (!loops[loop_num].members->Contains(pred->id())) {
+ loops[loop_num].members->Add(pred->id());
+ queue[queue_length++].block = pred;
+ }
+ }
+ }
+ }
+ }
+ return loops;
+}
+
+
+#if DEBUG
+static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
+ PrintF("-- RPO with %d loops ", num_loops);
+ if (num_loops > 0) {
+ PrintF("(");
+ for (int i = 0; i < num_loops; i++) {
+ if (i > 0) PrintF(" ");
+ PrintF("B%d", loops[i].header->id());
+ }
+ PrintF(") ");
+ }
+ PrintF("-- \n");
+
+ for (int i = 0; i < static_cast<int>(order->size()); i++) {
+ BasicBlock* block = (*order)[i];
+ int bid = block->id();
+ PrintF("%5d:", i);
+ for (int i = 0; i < num_loops; i++) {
+ bool membership = loops[i].members->Contains(bid);
+ bool range = loops[i].header->LoopContains(block);
+ PrintF(membership ? " |" : " ");
+ PrintF(range ? "x" : " ");
+ }
+ PrintF(" B%d: ", bid);
+ if (block->loop_end_ >= 0) {
+ PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
+ }
+ PrintF("\n");
+ }
+}
+
+
+static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
+ BasicBlockVector* order) {
+ DCHECK(order->size() > 0);
+ DCHECK((*order)[0]->id() == 0); // entry should be first.
+
+ for (int i = 0; i < num_loops; i++) {
+ LoopInfo* loop = &loops[i];
+ BasicBlock* header = loop->header;
+
+ DCHECK(header != NULL);
+ DCHECK(header->rpo_number_ >= 0);
+ DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
+ DCHECK(header->loop_end_ >= 0);
+ DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
+ DCHECK(header->loop_end_ > header->rpo_number_);
+
+ // Verify the start ... end list relationship.
+ int links = 0;
+ BlockList* l = loop->start;
+ DCHECK(l != NULL && l->block == header);
+ bool end_found;
+ while (true) {
+ if (l == NULL || l == loop->end) {
+ end_found = (loop->end == l);
+ break;
+ }
+ // The list should be in same order as the final result.
+ DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
+ links++;
+ l = l->next;
+ DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
+ }
+ DCHECK(links > 0);
+ DCHECK(links == (header->loop_end_ - header->rpo_number_));
+ DCHECK(end_found);
+
+ // Check the contiguousness of loops.
+ int count = 0;
+ for (int j = 0; j < static_cast<int>(order->size()); j++) {
+ BasicBlock* block = order->at(j);
+ DCHECK(block->rpo_number_ == j);
+ if (j < header->rpo_number_ || j >= header->loop_end_) {
+ DCHECK(!loop->members->Contains(block->id()));
+ } else {
+ if (block == header) {
+ DCHECK(!loop->members->Contains(block->id()));
+ } else {
+ DCHECK(loop->members->Contains(block->id()));
+ }
+ count++;
+ }
+ }
+ DCHECK(links == count);
+ }
+}
+#endif // DEBUG
+
+
+// Compute the special reverse-post-order block ordering, which is essentially
+// a RPO of the graph where loop bodies are contiguous. Properties:
+// 1. If block A is a predecessor of B, then A appears before B in the order,
+// unless B is a loop header and A is in the loop headed at B
+// (i.e. A -> B is a backedge).
+// => If block A dominates block B, then A appears before B in the order.
+// => If block A is a loop header, A appears before all blocks in the loop
+// headed at A.
+// 2. All loops are contiguous in the order (i.e. no intervening blocks that
+// do not belong to the loop.)
+// Note a simple RPO traversal satisfies (1) but not (3).
+BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
+ Zone tmp_zone(schedule->zone()->isolate());
+ Zone* zone = &tmp_zone;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("------------- COMPUTING SPECIAL RPO ---------------\n");
+ }
+ // RPO should not have been computed for this schedule yet.
+ CHECK_EQ(kBlockUnvisited1, schedule->entry()->rpo_number_);
+ CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
+
+ // Perform an iterative RPO traversal using an explicit stack,
+ // recording backedges that form cycles. O(|B|).
+ ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
+ SpecialRPOStackFrame* stack =
+ zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
+ BasicBlock* entry = schedule->entry();
+ BlockList* order = NULL;
+ int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
+ int num_loops = 0;
+
+ while (stack_depth > 0) {
+ int current = stack_depth - 1;
+ SpecialRPOStackFrame* frame = stack + current;
+
+ if (frame->index < frame->block->SuccessorCount()) {
+ // Process the next successor.
+ BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
+ if (succ->rpo_number_ == kBlockVisited1) continue;
+ if (succ->rpo_number_ == kBlockOnStack) {
+ // The successor is on the stack, so this is a backedge (cycle).
+ backedges.Add(
+ std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
+ if (succ->loop_end_ < 0) {
+ // Assign a new loop number to the header if it doesn't have one.
+ succ->loop_end_ = num_loops++;
+ }
+ } else {
+ // Push the successor onto the stack.
+ DCHECK(succ->rpo_number_ == kBlockUnvisited1);
+ stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
+ }
+ } else {
+ // Finished with all successors; pop the stack and add the block.
+ order = order->Add(zone, frame->block);
+ frame->block->rpo_number_ = kBlockVisited1;
+ stack_depth--;
+ }
+ }
+
+ // If no loops were encountered, then the order we computed was correct.
+ LoopInfo* loops = NULL;
+ if (num_loops != 0) {
+ // Otherwise, compute the loop information from the backedges in order
+ // to perform a traversal that groups loop bodies together.
+ loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
+ &backedges);
+
+ // Initialize the "loop stack". Note the entry could be a loop header.
+ LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
+ order = NULL;
+
+ // Perform an iterative post-order traversal, visiting loop bodies before
+ // edges that lead out of loops. Visits each block once, but linking loop
+ // sections together is linear in the loop size, so overall is
+ // O(|B| + max(loop_depth) * max(|loop|))
+ stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
+ while (stack_depth > 0) {
+ SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
+ BasicBlock* block = frame->block;
+ BasicBlock* succ = NULL;
+
+ if (frame->index < block->SuccessorCount()) {
+ // Process the next normal successor.
+ succ = block->SuccessorAt(frame->index++);
+ } else if (block->IsLoopHeader()) {
+ // Process additional outgoing edges from the loop header.
+ if (block->rpo_number_ == kBlockOnStack) {
+ // Finish the loop body the first time the header is left on the
+ // stack.
+ DCHECK(loop != NULL && loop->header == block);
+ loop->start = order->Add(zone, block);
+ order = loop->end;
+ block->rpo_number_ = kBlockVisited2;
+ // Pop the loop stack and continue visiting outgoing edges within the
+ // the context of the outer loop, if any.
+ loop = loop->prev;
+ // We leave the loop header on the stack; the rest of this iteration
+ // and later iterations will go through its outgoing edges list.
+ }
+
+ // Use the next outgoing edge if there are any.
+ int outgoing_index = frame->index - block->SuccessorCount();
+ LoopInfo* info = &loops[block->loop_end_];
+ DCHECK(loop != info);
+ if (info->outgoing != NULL &&
+ outgoing_index < info->outgoing->length()) {
+ succ = info->outgoing->at(outgoing_index);
+ frame->index++;
+ }
+ }
+
+ if (succ != NULL) {
+ // Process the next successor.
+ if (succ->rpo_number_ == kBlockOnStack) continue;
+ if (succ->rpo_number_ == kBlockVisited2) continue;
+ DCHECK(succ->rpo_number_ == kBlockUnvisited2);
+ if (loop != NULL && !loop->members->Contains(succ->id())) {
+ // The successor is not in the current loop or any nested loop.
+ // Add it to the outgoing edges of this loop and visit it later.
+ loop->AddOutgoing(zone, succ);
+ } else {
+ // Push the successor onto the stack.
+ stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
+ if (succ->IsLoopHeader()) {
+ // Push the inner loop onto the loop stack.
+ DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
+ LoopInfo* next = &loops[succ->loop_end_];
+ next->end = order;
+ next->prev = loop;
+ loop = next;
+ }
+ }
+ } else {
+ // Finished with all successors of the current block.
+ if (block->IsLoopHeader()) {
+ // If we are going to pop a loop header, then add its entire body.
+ LoopInfo* info = &loops[block->loop_end_];
+ for (BlockList* l = info->start; true; l = l->next) {
+ if (l->next == info->end) {
+ l->next = order;
+ info->end = order;
+ break;
+ }
+ }
+ order = info->start;
+ } else {
+ // Pop a single node off the stack and add it to the order.
+ order = order->Add(zone, block);
+ block->rpo_number_ = kBlockVisited2;
+ }
+ stack_depth--;
+ }
+ }
+ }
+
+ // Construct the final order from the list.
+ BasicBlockVector* final_order = &schedule->rpo_order_;
+ order->Serialize(final_order);
+
+ // Compute the correct loop header for every block and set the correct loop
+ // ends.
+ LoopInfo* current_loop = NULL;
+ BasicBlock* current_header = NULL;
+ int loop_depth = 0;
+ for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
+ ++i) {
+ BasicBlock* current = *i;
+ current->loop_header_ = current_header;
+ if (current->IsLoopHeader()) {
+ loop_depth++;
+ current_loop = &loops[current->loop_end_];
+ BlockList* end = current_loop->end;
+ current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
+ : end->block->rpo_number_;
+ current_header = current_loop->header;
+ if (FLAG_trace_turbo_scheduler) {
+ PrintF("Block %d is a loop header, increment loop depth to %d\n",
+ current->id(), loop_depth);
+ }
+ } else {
+ while (current_header != NULL &&
+ current->rpo_number_ >= current_header->loop_end_) {
+ DCHECK(current_header->IsLoopHeader());
+ DCHECK(current_loop != NULL);
+ current_loop = current_loop->prev;
+ current_header = current_loop == NULL ? NULL : current_loop->header;
+ --loop_depth;
+ }
+ }
+ current->loop_depth_ = loop_depth;
+ if (FLAG_trace_turbo_scheduler) {
+ if (current->loop_header_ == NULL) {
+ PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(),
+ current->loop_depth_);
+ } else {
+ PrintF("Block %d's loop header is block %d, loop depth %d\n",
+ current->id(), current->loop_header_->id(),
+ current->loop_depth_);
+ }
+ }
+ }
+
+#if DEBUG
+ if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
+ VerifySpecialRPO(num_loops, loops, final_order);
+#endif
+ return final_order;
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
new file mode 100644
index 000000000..db620edb5
--- /dev/null
+++ b/deps/v8/src/compiler/scheduler.h
@@ -0,0 +1,84 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULER_H_
+#define V8_COMPILER_SCHEDULER_H_
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Computes a schedule from a graph, placing nodes into basic blocks and
+// ordering the basic blocks in the special RPO order.
+class Scheduler {
+ public:
+ // Create a new schedule and place all computations from the graph in it.
+ static Schedule* ComputeSchedule(Graph* graph);
+
+ // Compute the RPO of blocks in an existing schedule.
+ static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule);
+
+ private:
+ Graph* graph_;
+ Schedule* schedule_;
+ NodeVector branches_;
+ NodeVector calls_;
+ NodeVector deopts_;
+ NodeVector returns_;
+ NodeVector loops_and_merges_;
+ BasicBlockVector node_block_placement_;
+ IntVector unscheduled_uses_;
+ NodeVectorVector scheduled_nodes_;
+ NodeVector schedule_root_nodes_;
+ IntVector schedule_early_rpo_index_;
+
+ Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+
+ int GetRPONumber(BasicBlock* block) {
+ DCHECK(block->rpo_number_ >= 0 &&
+ block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
+ DCHECK(schedule_->rpo_order_[block->rpo_number_] == block);
+ return block->rpo_number_;
+ }
+
+ void PrepareAuxiliaryNodeData();
+ void PrepareAuxiliaryBlockData();
+
+ friend class CreateBlockVisitor;
+ void CreateBlocks();
+
+ void WireBlocks();
+
+ void AddPredecessorsForLoopsAndMerges();
+ void AddSuccessorsForBranches();
+ void AddSuccessorsForReturns();
+ void AddSuccessorsForCalls();
+ void AddSuccessorsForDeopts();
+
+ void GenerateImmediateDominatorTree();
+ BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+
+ friend class ScheduleEarlyNodeVisitor;
+ void ScheduleEarly();
+
+ friend class PrepareUsesVisitor;
+ void PrepareUses();
+
+ friend class ScheduleLateNodeVisitor;
+ void ScheduleLate();
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_SCHEDULER_H_
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
new file mode 100644
index 000000000..e32a51e13
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -0,0 +1,1014 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-lowering.h"
+
+#include <deque>
+#include <queue>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Macro for outputting trace information from representation inference.
+#define TRACE(x) \
+ if (FLAG_trace_representation) PrintF x
+
+// Representation selection and lowering of {Simplified} operators to machine
+// operators are interwined. We use a fixpoint calculation to compute both the
+// output representation and the best possible lowering for {Simplified} nodes.
+// Representation change insertion ensures that all values are in the correct
+// machine representation after this phase, as dictated by the machine
+// operators themselves.
+enum Phase {
+ // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information
+ // backwards from uses to definitions, around cycles in phis, according
+ // to local rules for each operator.
+ // During this phase, the usage information for a node determines the best
+ // possible lowering for each operator so far, and that in turn determines
+ // the output representation.
+ // Therefore, to be correct, this phase must iterate to a fixpoint before
+ // the next phase can begin.
+ PROPAGATE,
+
+ // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
+ // operators for some nodes, expanding some nodes to multiple nodes, or
+ // removing some (redundant) nodes.
+ // During this phase, use the {RepresentationChanger} to insert
+ // representation changes between uses that demand a particular
+ // representation and nodes that produce a different representation.
+ LOWER
+};
+
+
+class RepresentationSelector {
+ public:
+ // Information for each node tracked during the fixpoint.
+ struct NodeInfo {
+ RepTypeUnion use : 14; // Union of all usages for the node.
+ bool queued : 1; // Bookkeeping for the traversal.
+ bool visited : 1; // Bookkeeping for the traversal.
+ RepTypeUnion output : 14; // Output type of the node.
+ };
+
+ RepresentationSelector(JSGraph* jsgraph, Zone* zone,
+ RepresentationChanger* changer)
+ : jsgraph_(jsgraph),
+ count_(jsgraph->graph()->NodeCount()),
+ info_(zone->NewArray<NodeInfo>(count_)),
+ nodes_(NodeVector::allocator_type(zone)),
+ replacements_(NodeVector::allocator_type(zone)),
+ contains_js_nodes_(false),
+ phase_(PROPAGATE),
+ changer_(changer),
+ queue_(std::deque<Node*, NodePtrZoneAllocator>(
+ NodePtrZoneAllocator(zone))) {
+ memset(info_, 0, sizeof(NodeInfo) * count_);
+ }
+
+ void Run(SimplifiedLowering* lowering) {
+ // Run propagation phase to a fixpoint.
+ TRACE(("--{Propagation phase}--\n"));
+ phase_ = PROPAGATE;
+ Enqueue(jsgraph_->graph()->end());
+ // Process nodes from the queue until it is empty.
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ NodeInfo* info = GetInfo(node);
+ queue_.pop();
+ info->queued = false;
+ TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+ VisitNode(node, info->use, NULL);
+ TRACE((" ==> output "));
+ PrintInfo(info->output);
+ TRACE(("\n"));
+ }
+
+ // Run lowering and change insertion phase.
+ TRACE(("--{Simplified lowering phase}--\n"));
+ phase_ = LOWER;
+ // Process nodes from the collected {nodes_} vector.
+ for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
+ Node* node = *i;
+ TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+ // Reuse {VisitNode()} so the representation rules are in one place.
+ VisitNode(node, GetUseInfo(node), lowering);
+ }
+
+ // Perform the final replacements.
+ for (NodeVector::iterator i = replacements_.begin();
+ i != replacements_.end(); ++i) {
+ Node* node = *i;
+ Node* replacement = *(++i);
+ node->ReplaceUses(replacement);
+ }
+ }
+
+ // Enqueue {node} if the {use} contains new information for that node.
+ // Add {node} to {nodes_} if this is the first time it's been visited.
+ void Enqueue(Node* node, RepTypeUnion use = 0) {
+ if (phase_ != PROPAGATE) return;
+ NodeInfo* info = GetInfo(node);
+ if (!info->visited) {
+ // First visit of this node.
+ info->visited = true;
+ info->queued = true;
+ nodes_.push_back(node);
+ queue_.push(node);
+ TRACE((" initial: "));
+ info->use |= use;
+ PrintUseInfo(node);
+ return;
+ }
+ TRACE((" queue?: "));
+ PrintUseInfo(node);
+ if ((info->use & use) != use) {
+ // New usage information for the node is available.
+ if (!info->queued) {
+ queue_.push(node);
+ info->queued = true;
+ TRACE((" added: "));
+ } else {
+ TRACE((" inqueue: "));
+ }
+ info->use |= use;
+ PrintUseInfo(node);
+ }
+ }
+
+ bool lower() { return phase_ == LOWER; }
+
+ void Enqueue(Node* node, RepType use) {
+ Enqueue(node, static_cast<RepTypeUnion>(use));
+ }
+
+ void SetOutput(Node* node, RepTypeUnion output) {
+ // Every node should have at most one output representation. Note that
+ // phis can have 0, if they have not been used in a representation-inducing
+ // instruction.
+ DCHECK((output & rMask) == 0 || IsPowerOf2(output & rMask));
+ GetInfo(node)->output = output;
+ }
+
+ bool BothInputsAre(Node* node, Type* type) {
+ DCHECK_EQ(2, node->InputCount());
+ return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
+ NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
+ }
+
+ void ProcessInput(Node* node, int index, RepTypeUnion use) {
+ Node* input = node->InputAt(index);
+ if (phase_ == PROPAGATE) {
+ // In the propagate phase, propagate the usage information backward.
+ Enqueue(input, use);
+ } else {
+ // In the change phase, insert a change before the use if necessary.
+ if ((use & rMask) == 0) return; // No input requirement on the use.
+ RepTypeUnion output = GetInfo(input)->output;
+ if ((output & rMask & use) == 0) {
+ // Output representation doesn't match usage.
+ TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(),
+ node->op()->mnemonic(), index, input->id(),
+ input->op()->mnemonic()));
+ TRACE((" from "));
+ PrintInfo(output);
+ TRACE((" to "));
+ PrintInfo(use);
+ TRACE(("\n"));
+ Node* n = changer_->GetRepresentationFor(input, output, use);
+ node->ReplaceInput(index, n);
+ }
+ }
+ }
+
+ static const RepTypeUnion kFloat64 = rFloat64 | tNumber;
+ static const RepTypeUnion kInt32 = rWord32 | tInt32;
+ static const RepTypeUnion kUint32 = rWord32 | tUint32;
+ static const RepTypeUnion kInt64 = rWord64 | tInt64;
+ static const RepTypeUnion kUint64 = rWord64 | tUint64;
+ static const RepTypeUnion kAnyTagged = rTagged | tAny;
+
+ // The default, most general visitation case. For {node}, process all value,
+ // context, effect, and control inputs, assuming that value inputs should have
+ // {rTagged} representation and can observe all output values {tAny}.
+ void VisitInputs(Node* node) {
+ InputIter i = node->inputs().begin();
+ for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+ ++i, j--) {
+ ProcessInput(node, i.index(), kAnyTagged); // Value inputs
+ }
+ for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+ ++i, j--) {
+ ProcessInput(node, i.index(), kAnyTagged); // Context inputs
+ }
+ for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+ ++i, j--) {
+ Enqueue(*i); // Effect inputs: just visit
+ }
+ for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+ ++i, j--) {
+ Enqueue(*i); // Control inputs: just visit
+ }
+ SetOutput(node, kAnyTagged);
+ }
+
+ // Helper for binops of the I x I -> O variety.
+ void VisitBinop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
+ DCHECK_EQ(2, node->InputCount());
+ ProcessInput(node, 0, input_use);
+ ProcessInput(node, 1, input_use);
+ SetOutput(node, output);
+ }
+
+ // Helper for unops of the I -> O variety.
+ void VisitUnop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
+ DCHECK_EQ(1, node->InputCount());
+ ProcessInput(node, 0, input_use);
+ SetOutput(node, output);
+ }
+
+ // Helper for leaf nodes.
+ void VisitLeaf(Node* node, RepTypeUnion output) {
+ DCHECK_EQ(0, node->InputCount());
+ SetOutput(node, output);
+ }
+
+ // Helpers for specific types of binops.
+ void VisitFloat64Binop(Node* node) { VisitBinop(node, kFloat64, kFloat64); }
+ void VisitInt32Binop(Node* node) { VisitBinop(node, kInt32, kInt32); }
+ void VisitUint32Binop(Node* node) { VisitBinop(node, kUint32, kUint32); }
+ void VisitInt64Binop(Node* node) { VisitBinop(node, kInt64, kInt64); }
+ void VisitUint64Binop(Node* node) { VisitBinop(node, kUint64, kUint64); }
+ void VisitFloat64Cmp(Node* node) { VisitBinop(node, kFloat64, rBit); }
+ void VisitInt32Cmp(Node* node) { VisitBinop(node, kInt32, rBit); }
+ void VisitUint32Cmp(Node* node) { VisitBinop(node, kUint32, rBit); }
+ void VisitInt64Cmp(Node* node) { VisitBinop(node, kInt64, rBit); }
+ void VisitUint64Cmp(Node* node) { VisitBinop(node, kUint64, rBit); }
+
+ // Helper for handling phis.
+ void VisitPhi(Node* node, RepTypeUnion use) {
+ // First, propagate the usage information to inputs of the phi.
+ int values = OperatorProperties::GetValueInputCount(node->op());
+ Node::Inputs inputs = node->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter, --values) {
+ // Propagate {use} of the phi to value inputs, and 0 to control.
+ // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+ ProcessInput(node, iter.index(), values > 0 ? use : 0);
+ }
+ // Phis adapt to whatever output representation their uses demand,
+ // pushing representation changes to their inputs.
+ RepTypeUnion use_rep = GetUseInfo(node) & rMask;
+ RepTypeUnion use_type = GetUseInfo(node) & tMask;
+ RepTypeUnion rep = 0;
+ if (use_rep & rTagged) {
+ rep = rTagged; // Tagged overrides everything.
+ } else if (use_rep & rFloat64) {
+ rep = rFloat64;
+ } else if (use_rep & rWord64) {
+ rep = rWord64;
+ } else if (use_rep & rWord32) {
+ rep = rWord32;
+ } else if (use_rep & rBit) {
+ rep = rBit;
+ } else {
+ // There was no representation associated with any of the uses.
+ // TODO(titzer): Select the best rep using phi's type, not the usage type?
+ if (use_type & tAny) {
+ rep = rTagged;
+ } else if (use_type & tNumber) {
+ rep = rFloat64;
+ } else if (use_type & tInt64 || use_type & tUint64) {
+ rep = rWord64;
+ } else if (use_type & tInt32 || use_type & tUint32) {
+ rep = rWord32;
+ } else if (use_type & tBool) {
+ rep = rBit;
+ } else {
+ UNREACHABLE(); // should have at least a usage type!
+ }
+ }
+ // Preserve the usage type, but set the representation.
+ Type* upper = NodeProperties::GetBounds(node).upper;
+ SetOutput(node, rep | changer_->TypeFromUpperBound(upper));
+ }
+
+ Operator* Int32Op(Node* node) {
+ return changer_->Int32OperatorFor(node->opcode());
+ }
+
+ Operator* Uint32Op(Node* node) {
+ return changer_->Uint32OperatorFor(node->opcode());
+ }
+
+ Operator* Float64Op(Node* node) {
+ return changer_->Float64OperatorFor(node->opcode());
+ }
+
+ // Dispatching routine for visiting the node {node} with the usage {use}.
+ // Depending on the operator, propagate new usage info to the inputs.
+ void VisitNode(Node* node, RepTypeUnion use, SimplifiedLowering* lowering) {
+ switch (node->opcode()) {
+ //------------------------------------------------------------------
+ // Common operators.
+ //------------------------------------------------------------------
+ case IrOpcode::kStart:
+ case IrOpcode::kDead:
+ return VisitLeaf(node, 0);
+ case IrOpcode::kParameter: {
+ // TODO(titzer): use representation from linkage.
+ Type* upper = NodeProperties::GetBounds(node).upper;
+ ProcessInput(node, 0, 0);
+ SetOutput(node, rTagged | changer_->TypeFromUpperBound(upper));
+ return;
+ }
+ case IrOpcode::kInt32Constant:
+ return VisitLeaf(node, rWord32);
+ case IrOpcode::kInt64Constant:
+ return VisitLeaf(node, rWord64);
+ case IrOpcode::kFloat64Constant:
+ return VisitLeaf(node, rFloat64);
+ case IrOpcode::kExternalConstant:
+ return VisitLeaf(node, rPtr);
+ case IrOpcode::kNumberConstant:
+ return VisitLeaf(node, rTagged);
+ case IrOpcode::kHeapConstant:
+ return VisitLeaf(node, rTagged);
+
+ case IrOpcode::kEnd:
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kReturn:
+ case IrOpcode::kMerge:
+ case IrOpcode::kThrow:
+ return VisitInputs(node); // default visit for all node inputs.
+
+ case IrOpcode::kBranch:
+ ProcessInput(node, 0, rBit);
+ Enqueue(NodeProperties::GetControlInput(node, 0));
+ break;
+ case IrOpcode::kPhi:
+ return VisitPhi(node, use);
+
+//------------------------------------------------------------------
+// JavaScript operators.
+//------------------------------------------------------------------
+// For now, we assume that all JS operators were too complex to lower
+// to Simplified and that they will always require tagged value inputs
+// and produce tagged value outputs.
+// TODO(turbofan): it might be possible to lower some JSOperators here,
+// but that responsibility really lies in the typed lowering phase.
+#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
+ JS_OP_LIST(DEFINE_JS_CASE)
+#undef DEFINE_JS_CASE
+ contains_js_nodes_ = true;
+ VisitInputs(node);
+ return SetOutput(node, rTagged);
+
+ //------------------------------------------------------------------
+ // Simplified operators.
+ //------------------------------------------------------------------
+ case IrOpcode::kBooleanNot: {
+ if (lower()) {
+ RepTypeUnion input = GetInfo(node->InputAt(0))->output;
+ if (input & rBit) {
+ // BooleanNot(x: rBit) => WordEqual(x, #0)
+ node->set_op(lowering->machine()->WordEqual());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+ } else {
+ // BooleanNot(x: rTagged) => WordEqual(x, #false)
+ node->set_op(lowering->machine()->WordEqual());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+ }
+ } else {
+ // No input representation requirement; adapt during lowering.
+ ProcessInput(node, 0, tBool);
+ SetOutput(node, rBit);
+ }
+ break;
+ }
+ case IrOpcode::kNumberEqual:
+ case IrOpcode::kNumberLessThan:
+ case IrOpcode::kNumberLessThanOrEqual: {
+ // Number comparisons reduce to integer comparisons for integer inputs.
+ if (BothInputsAre(node, Type::Signed32())) {
+ // => signed Int32Cmp
+ VisitInt32Cmp(node);
+ if (lower()) node->set_op(Int32Op(node));
+ } else if (BothInputsAre(node, Type::Unsigned32())) {
+ // => unsigned Int32Cmp
+ VisitUint32Cmp(node);
+ if (lower()) node->set_op(Uint32Op(node));
+ } else {
+ // => Float64Cmp
+ VisitFloat64Cmp(node);
+ if (lower()) node->set_op(Float64Op(node));
+ }
+ break;
+ }
+ case IrOpcode::kNumberAdd:
+ case IrOpcode::kNumberSubtract: {
+ // Add and subtract reduce to Int32Add/Sub if the inputs
+ // are already integers and all uses are truncating.
+ if (BothInputsAre(node, Type::Signed32()) &&
+ (use & (tUint32 | tNumber | tAny)) == 0) {
+ // => signed Int32Add/Sub
+ VisitInt32Binop(node);
+ if (lower()) node->set_op(Int32Op(node));
+ } else if (BothInputsAre(node, Type::Unsigned32()) &&
+ (use & (tInt32 | tNumber | tAny)) == 0) {
+ // => unsigned Int32Add/Sub
+ VisitUint32Binop(node);
+ if (lower()) node->set_op(Uint32Op(node));
+ } else {
+ // => Float64Add/Sub
+ VisitFloat64Binop(node);
+ if (lower()) node->set_op(Float64Op(node));
+ }
+ break;
+ }
+ case IrOpcode::kNumberMultiply:
+ case IrOpcode::kNumberDivide:
+ case IrOpcode::kNumberModulus: {
+ // Float64Mul/Div/Mod
+ VisitFloat64Binop(node);
+ if (lower()) node->set_op(Float64Op(node));
+ break;
+ }
+ case IrOpcode::kNumberToInt32: {
+ RepTypeUnion use_rep = use & rMask;
+ if (lower()) {
+ RepTypeUnion in = GetInfo(node->InputAt(0))->output;
+ if ((in & tMask) == tInt32 || (in & rMask) == rWord32) {
+ // If the input has type int32, or is already a word32, just change
+ // representation if necessary.
+ VisitUnop(node, tInt32 | use_rep, tInt32 | use_rep);
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ // Require the input in float64 format and perform truncation.
+ // TODO(turbofan): could also avoid the truncation with a tag check.
+ VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
+ // TODO(titzer): should be a truncation.
+ node->set_op(lowering->machine()->ChangeFloat64ToInt32());
+ }
+ } else {
+ // Propagate a type to the input, but pass through representation.
+ VisitUnop(node, tInt32, tInt32 | use_rep);
+ }
+ break;
+ }
+ case IrOpcode::kNumberToUint32: {
+ RepTypeUnion use_rep = use & rMask;
+ if (lower()) {
+ RepTypeUnion in = GetInfo(node->InputAt(0))->output;
+ if ((in & tMask) == tUint32 || (in & rMask) == rWord32) {
+ // The input has type int32, just change representation.
+ VisitUnop(node, tUint32 | use_rep, tUint32 | use_rep);
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ // Require the input in float64 format to perform truncation.
+ // TODO(turbofan): could also avoid the truncation with a tag check.
+ VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
+ // TODO(titzer): should be a truncation.
+ node->set_op(lowering->machine()->ChangeFloat64ToUint32());
+ }
+ } else {
+ // Propagate a type to the input, but pass through representation.
+ VisitUnop(node, tUint32, tUint32 | use_rep);
+ }
+ break;
+ }
+ case IrOpcode::kReferenceEqual: {
+ VisitBinop(node, kAnyTagged, rBit);
+ if (lower()) node->set_op(lowering->machine()->WordEqual());
+ break;
+ }
+ case IrOpcode::kStringEqual: {
+ VisitBinop(node, kAnyTagged, rBit);
+ // TODO(titzer): lower StringEqual to stub/runtime call.
+ break;
+ }
+ case IrOpcode::kStringLessThan: {
+ VisitBinop(node, kAnyTagged, rBit);
+ // TODO(titzer): lower StringLessThan to stub/runtime call.
+ break;
+ }
+ case IrOpcode::kStringLessThanOrEqual: {
+ VisitBinop(node, kAnyTagged, rBit);
+ // TODO(titzer): lower StringLessThanOrEqual to stub/runtime call.
+ break;
+ }
+ case IrOpcode::kStringAdd: {
+ VisitBinop(node, kAnyTagged, kAnyTagged);
+ // TODO(titzer): lower StringAdd to stub/runtime call.
+ break;
+ }
+ case IrOpcode::kLoadField: {
+ FieldAccess access = FieldAccessOf(node->op());
+ ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ SetOutput(node, changer_->TypeForField(access));
+ if (lower()) lowering->DoLoadField(node);
+ break;
+ }
+ case IrOpcode::kStoreField: {
+ FieldAccess access = FieldAccessOf(node->op());
+ ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ ProcessInput(node, 1, changer_->TypeForField(access));
+ SetOutput(node, 0);
+ if (lower()) lowering->DoStoreField(node);
+ break;
+ }
+ case IrOpcode::kLoadElement: {
+ ElementAccess access = ElementAccessOf(node->op());
+ ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ ProcessInput(node, 1, kInt32); // element index
+ SetOutput(node, changer_->TypeForElement(access));
+ if (lower()) lowering->DoLoadElement(node);
+ break;
+ }
+ case IrOpcode::kStoreElement: {
+ ElementAccess access = ElementAccessOf(node->op());
+ ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ ProcessInput(node, 1, kInt32); // element index
+ ProcessInput(node, 2, changer_->TypeForElement(access));
+ SetOutput(node, 0);
+ if (lower()) lowering->DoStoreElement(node);
+ break;
+ }
+
+ //------------------------------------------------------------------
+ // Machine-level operators.
+ //------------------------------------------------------------------
+ case IrOpcode::kLoad: {
+ // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+ RepType tBase = rTagged;
+ MachineType rep = OpParameter<MachineType>(node);
+ ProcessInput(node, 0, tBase); // pointer or object
+ ProcessInput(node, 1, kInt32); // index
+ SetOutput(node, changer_->TypeForMachineType(rep));
+ break;
+ }
+ case IrOpcode::kStore: {
+ // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+ RepType tBase = rTagged;
+ StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
+ ProcessInput(node, 0, tBase); // pointer or object
+ ProcessInput(node, 1, kInt32); // index
+ ProcessInput(node, 2, changer_->TypeForMachineType(rep.rep));
+ SetOutput(node, 0);
+ break;
+ }
+ case IrOpcode::kWord32Shr:
+ // We output unsigned int32 for shift right because JavaScript.
+ return VisitBinop(node, rWord32, rWord32 | tUint32);
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord32Or:
+ case IrOpcode::kWord32Xor:
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord32Sar:
+ // We use signed int32 as the output type for these word32 operations,
+ // though the machine bits are the same for either signed or unsigned,
+ // because JavaScript considers the result from these operations signed.
+ return VisitBinop(node, rWord32, rWord32 | tInt32);
+ case IrOpcode::kWord32Equal:
+ return VisitBinop(node, rWord32, rBit);
+
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt32Sub:
+ case IrOpcode::kInt32Mul:
+ case IrOpcode::kInt32Div:
+ case IrOpcode::kInt32Mod:
+ return VisitInt32Binop(node);
+ case IrOpcode::kInt32UDiv:
+ case IrOpcode::kInt32UMod:
+ return VisitUint32Binop(node);
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ return VisitInt32Cmp(node);
+
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ return VisitUint32Cmp(node);
+
+ case IrOpcode::kInt64Add:
+ case IrOpcode::kInt64Sub:
+ case IrOpcode::kInt64Mul:
+ case IrOpcode::kInt64Div:
+ case IrOpcode::kInt64Mod:
+ return VisitInt64Binop(node);
+ case IrOpcode::kInt64LessThan:
+ case IrOpcode::kInt64LessThanOrEqual:
+ return VisitInt64Cmp(node);
+
+ case IrOpcode::kInt64UDiv:
+ case IrOpcode::kInt64UMod:
+ return VisitUint64Binop(node);
+
+ case IrOpcode::kWord64And:
+ case IrOpcode::kWord64Or:
+ case IrOpcode::kWord64Xor:
+ case IrOpcode::kWord64Shl:
+ case IrOpcode::kWord64Shr:
+ case IrOpcode::kWord64Sar:
+ return VisitBinop(node, rWord64, rWord64);
+ case IrOpcode::kWord64Equal:
+ return VisitBinop(node, rWord64, rBit);
+
+ case IrOpcode::kConvertInt32ToInt64:
+ return VisitUnop(node, tInt32 | rWord32, tInt32 | rWord64);
+ case IrOpcode::kConvertInt64ToInt32:
+ return VisitUnop(node, tInt64 | rWord64, tInt32 | rWord32);
+
+ case IrOpcode::kChangeInt32ToFloat64:
+ return VisitUnop(node, tInt32 | rWord32, tInt32 | rFloat64);
+ case IrOpcode::kChangeUint32ToFloat64:
+ return VisitUnop(node, tUint32 | rWord32, tUint32 | rFloat64);
+ case IrOpcode::kChangeFloat64ToInt32:
+ return VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
+ case IrOpcode::kChangeFloat64ToUint32:
+ return VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
+
+ case IrOpcode::kFloat64Add:
+ case IrOpcode::kFloat64Sub:
+ case IrOpcode::kFloat64Mul:
+ case IrOpcode::kFloat64Div:
+ case IrOpcode::kFloat64Mod:
+ return VisitFloat64Binop(node);
+ case IrOpcode::kFloat64Equal:
+ case IrOpcode::kFloat64LessThan:
+ case IrOpcode::kFloat64LessThanOrEqual:
+ return VisitFloat64Cmp(node);
+ default:
+ VisitInputs(node);
+ break;
+ }
+ }
+
+ void DeferReplacement(Node* node, Node* replacement) {
+ if (replacement->id() < count_) {
+ // Replace with a previously existing node eagerly.
+ node->ReplaceUses(replacement);
+ } else {
+ // Otherwise, we are replacing a node with a representation change.
+ // Such a substitution must be done after all lowering is done, because
+ // new nodes do not have {NodeInfo} entries, and that would confuse
+ // the representation change insertion for uses of it.
+ replacements_.push_back(node);
+ replacements_.push_back(replacement);
+ }
+ // TODO(titzer) node->RemoveAllInputs(); // Node is now dead.
+ }
+
+ void PrintUseInfo(Node* node) {
+ TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
+ PrintInfo(GetUseInfo(node));
+ TRACE(("\n"));
+ }
+
+ void PrintInfo(RepTypeUnion info) {
+ if (FLAG_trace_representation) {
+ char buf[REP_TYPE_STRLEN];
+ RenderRepTypeUnion(buf, info);
+ TRACE(("%s", buf));
+ }
+ }
+
+ private:
+ JSGraph* jsgraph_;
+ int count_; // number of nodes in the graph
+ NodeInfo* info_; // node id -> usage information
+ NodeVector nodes_; // collected nodes
+ NodeVector replacements_; // replacements to be done after lowering
+ bool contains_js_nodes_; // {true} if a JS operator was seen
+ Phase phase_; // current phase of algorithm
+ RepresentationChanger* changer_; // for inserting representation changes
+
+ std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue_;
+
+ NodeInfo* GetInfo(Node* node) {
+ DCHECK(node->id() >= 0);
+ DCHECK(node->id() < count_);
+ return &info_[node->id()];
+ }
+
+ RepTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
+};
+
+
+Node* SimplifiedLowering::IsTagged(Node* node) {
+ // TODO(titzer): factor this out to a TaggingScheme abstraction.
+ STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
+ return graph()->NewNode(machine()->WordAnd(), node,
+ jsgraph()->Int32Constant(kSmiTagMask));
+}
+
+
+void SimplifiedLowering::LowerAllNodes() {
+ SimplifiedOperatorBuilder simplified(graph()->zone());
+ RepresentationChanger changer(jsgraph(), &simplified, machine(),
+ graph()->zone()->isolate());
+ RepresentationSelector selector(jsgraph(), zone(), &changer);
+ selector.Run(this);
+
+ LoweringBuilder::LowerAllNodes();
+}
+
+
+Node* SimplifiedLowering::Untag(Node* node) {
+ // TODO(titzer): factor this out to a TaggingScheme abstraction.
+ Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
+ return graph()->NewNode(machine()->WordSar(), node, shift_amount);
+}
+
+
+Node* SimplifiedLowering::SmiTag(Node* node) {
+ // TODO(titzer): factor this out to a TaggingScheme abstraction.
+ Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
+ return graph()->NewNode(machine()->WordShl(), node, shift_amount);
+}
+
+
+Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
+ return jsgraph()->Int32Constant(offset - kHeapObjectTag);
+}
+
+
+static void UpdateControlSuccessors(Node* before, Node* node) {
+ DCHECK(IrOpcode::IsControlOpcode(before->opcode()));
+ UseIter iter = before->uses().begin();
+ while (iter != before->uses().end()) {
+ if (IrOpcode::IsControlOpcode((*iter)->opcode()) &&
+ NodeProperties::IsControlEdge(iter.edge())) {
+ iter = iter.UpdateToAndIncrement(node);
+ continue;
+ }
+ ++iter;
+ }
+}
+
+
+void SimplifiedLowering::DoChangeTaggedToUI32(Node* node, Node* effect,
+ Node* control, bool is_signed) {
+ // if (IsTagged(val))
+ // ConvertFloat64To(Int32|Uint32)(Load[kMachineFloat64](input, #value_offset))
+ // else Untag(val)
+ Node* val = node->InputAt(0);
+ Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val), control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* loaded = graph()->NewNode(
+ machine()->Load(kMachineFloat64), val,
+ OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
+ Operator* op = is_signed ? machine()->ChangeFloat64ToInt32()
+ : machine()->ChangeFloat64ToUint32();
+ Node* converted = graph()->NewNode(op, loaded);
+
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ Node* untagged = Untag(val);
+
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), converted, untagged, merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
+}
+
+
+void SimplifiedLowering::DoChangeTaggedToFloat64(Node* node, Node* effect,
+ Node* control) {
+ // if (IsTagged(input)) Load[kMachineFloat64](input, #value_offset)
+ // else ConvertFloat64(Untag(input))
+ Node* val = node->InputAt(0);
+ Node* branch = graph()->NewNode(common()->Branch(), IsTagged(val), control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* loaded = graph()->NewNode(
+ machine()->Load(kMachineFloat64), val,
+ OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
+
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ Node* untagged = Untag(val);
+ Node* converted =
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), untagged);
+
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), loaded, converted, merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
+}
+
+
+void SimplifiedLowering::DoChangeUI32ToTagged(Node* node, Node* effect,
+ Node* control, bool is_signed) {
+ Node* val = node->InputAt(0);
+ Node* is_smi = NULL;
+ if (is_signed) {
+ if (SmiValuesAre32Bits()) {
+ // All int32s fit in this case.
+ DCHECK(kPointerSize == 8);
+ return node->ReplaceUses(SmiTag(val));
+ } else {
+ // TODO(turbofan): use an Int32AddWithOverflow to tag and check here.
+ Node* lt = graph()->NewNode(machine()->Int32LessThanOrEqual(), val,
+ jsgraph()->Int32Constant(Smi::kMaxValue));
+ Node* gt =
+ graph()->NewNode(machine()->Int32LessThanOrEqual(),
+ jsgraph()->Int32Constant(Smi::kMinValue), val);
+ is_smi = graph()->NewNode(machine()->Word32And(), lt, gt);
+ }
+ } else {
+ // Check if Uint32 value is in the smi range.
+ is_smi = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
+ jsgraph()->Int32Constant(Smi::kMaxValue));
+ }
+
+ // TODO(turbofan): fold smi test branch eagerly.
+ // if (IsSmi(input)) SmiTag(input);
+ // else InlineAllocAndInitHeapNumber(ConvertToFloat64(input)))
+ Node* branch = graph()->NewNode(common()->Branch(), is_smi, control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* smi_tagged = SmiTag(val);
+
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ Node* heap_num = jsgraph()->Constant(0.0); // TODO(titzer): alloc and init
+
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), smi_tagged, heap_num, merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
+}
+
+
+void SimplifiedLowering::DoChangeFloat64ToTagged(Node* node, Node* effect,
+ Node* control) {
+ return; // TODO(titzer): need to call runtime to allocate in one branch
+}
+
+
+void SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
+ Node* control) {
+ Node* cmp = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+ jsgraph()->TrueConstant());
+ node->ReplaceUses(cmp);
+}
+
+
+void SimplifiedLowering::DoChangeBitToBool(Node* node, Node* effect,
+ Node* control) {
+ Node* val = node->InputAt(0);
+ Node* branch = graph()->NewNode(common()->Branch(), val, control);
+
+ // true branch.
+ Node* tbranch = graph()->NewNode(common()->IfTrue(), branch);
+ // false branch.
+ Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
+ // merge.
+ Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
+ Node* phi = graph()->NewNode(common()->Phi(2), jsgraph()->TrueConstant(),
+ jsgraph()->FalseConstant(), merge);
+ UpdateControlSuccessors(control, merge);
+ branch->ReplaceInput(1, control);
+ node->ReplaceUses(phi);
+}
+
+
+static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineType representation,
+ Type* type) {
+ // TODO(turbofan): skip write barriers for Smis, etc.
+ if (base_is_tagged == kTaggedBase && representation == kMachineTagged) {
+ // Write barriers are only for writes into heap objects (i.e. tagged base).
+ return kFullWriteBarrier;
+ }
+ return kNoWriteBarrier;
+}
+
+
+void SimplifiedLowering::DoLoadField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ node->set_op(machine_.Load(access.representation));
+ Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+ node->InsertInput(zone(), 1, offset);
+}
+
+
+void SimplifiedLowering::DoStoreField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ WriteBarrierKind kind = ComputeWriteBarrierKind(
+ access.base_is_tagged, access.representation, access.type);
+ node->set_op(machine_.Store(access.representation, kind));
+ Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+ node->InsertInput(zone(), 1, offset);
+}
+
+
+Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
+ Node* index) {
+ int element_size = 0;
+ switch (access.representation) {
+ case kMachineTagged:
+ element_size = kPointerSize;
+ break;
+ case kMachineWord8:
+ element_size = 1;
+ break;
+ case kMachineWord16:
+ element_size = 2;
+ break;
+ case kMachineWord32:
+ element_size = 4;
+ break;
+ case kMachineWord64:
+ case kMachineFloat64:
+ element_size = 8;
+ break;
+ case kMachineLast:
+ UNREACHABLE();
+ break;
+ }
+ if (element_size != 1) {
+ index = graph()->NewNode(machine()->Int32Mul(),
+ jsgraph()->Int32Constant(element_size), index);
+ }
+ int fixed_offset = access.header_size - access.tag();
+ if (fixed_offset == 0) return index;
+ return graph()->NewNode(machine()->Int32Add(), index,
+ jsgraph()->Int32Constant(fixed_offset));
+}
+
+
+void SimplifiedLowering::DoLoadElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ node->set_op(machine_.Load(access.representation));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+}
+
+
+void SimplifiedLowering::DoStoreElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ WriteBarrierKind kind = ComputeWriteBarrierKind(
+ access.base_is_tagged, access.representation, access.type);
+ node->set_op(machine_.Store(access.representation, kind));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+}
+
+
+void SimplifiedLowering::Lower(Node* node) {}
+
+
+void SimplifiedLowering::LowerChange(Node* node, Node* effect, Node* control) {
+ switch (node->opcode()) {
+ case IrOpcode::kChangeTaggedToInt32:
+ DoChangeTaggedToUI32(node, effect, control, true);
+ break;
+ case IrOpcode::kChangeTaggedToUint32:
+ DoChangeTaggedToUI32(node, effect, control, false);
+ break;
+ case IrOpcode::kChangeTaggedToFloat64:
+ DoChangeTaggedToFloat64(node, effect, control);
+ break;
+ case IrOpcode::kChangeInt32ToTagged:
+ DoChangeUI32ToTagged(node, effect, control, true);
+ break;
+ case IrOpcode::kChangeUint32ToTagged:
+ DoChangeUI32ToTagged(node, effect, control, false);
+ break;
+ case IrOpcode::kChangeFloat64ToTagged:
+ DoChangeFloat64ToTagged(node, effect, control);
+ break;
+ case IrOpcode::kChangeBoolToBit:
+ DoChangeBoolToBit(node, effect, control);
+ break;
+ case IrOpcode::kChangeBitToBool:
+ DoChangeBitToBool(node, effect, control);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
new file mode 100644
index 000000000..c85515d94
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -0,0 +1,71 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
+#define V8_COMPILER_SIMPLIFIED_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/lowering-builder.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedLowering : public LoweringBuilder {
+ public:
+ explicit SimplifiedLowering(JSGraph* jsgraph,
+ SourcePositionTable* source_positions)
+ : LoweringBuilder(jsgraph->graph(), source_positions),
+ jsgraph_(jsgraph),
+ machine_(jsgraph->zone()) {}
+ virtual ~SimplifiedLowering() {}
+
+ void LowerAllNodes();
+
+ virtual void Lower(Node* node);
+ void LowerChange(Node* node, Node* effect, Node* control);
+
+ // TODO(titzer): These are exposed for direct testing. Use a friend class.
+ void DoLoadField(Node* node);
+ void DoStoreField(Node* node);
+ void DoLoadElement(Node* node);
+ void DoStoreElement(Node* node);
+
+ private:
+ JSGraph* jsgraph_;
+ MachineOperatorBuilder machine_;
+
+ Node* SmiTag(Node* node);
+ Node* IsTagged(Node* node);
+ Node* Untag(Node* node);
+ Node* OffsetMinusTagConstant(int32_t offset);
+ Node* ComputeIndex(const ElementAccess& access, Node* index);
+
+ void DoChangeTaggedToUI32(Node* node, Node* effect, Node* control,
+ bool is_signed);
+ void DoChangeUI32ToTagged(Node* node, Node* effect, Node* control,
+ bool is_signed);
+ void DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
+ void DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
+ void DoChangeBoolToBit(Node* node, Node* effect, Node* control);
+ void DoChangeBitToBool(Node* node, Node* effect, Node* control);
+
+ friend class RepresentationSelector;
+
+ Zone* zone() { return jsgraph_->zone(); }
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph()->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
+ MachineOperatorBuilder* machine() { return &machine_; }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_SIMPLIFIED_LOWERING_H_
diff --git a/deps/v8/src/compiler/simplified-node-factory.h b/deps/v8/src/compiler/simplified-node-factory.h
new file mode 100644
index 000000000..8660ce670
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-node-factory.h
@@ -0,0 +1,128 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
+#define V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
+
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define SIMPLIFIED() static_cast<NodeFactory*>(this)->simplified()
+#define NEW_NODE_1(op, a) static_cast<NodeFactory*>(this)->NewNode(op, a)
+#define NEW_NODE_2(op, a, b) static_cast<NodeFactory*>(this)->NewNode(op, a, b)
+#define NEW_NODE_3(op, a, b, c) \
+ static_cast<NodeFactory*>(this)->NewNode(op, a, b, c)
+
+template <typename NodeFactory>
+class SimplifiedNodeFactory {
+ public:
+ Node* BooleanNot(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->BooleanNot(), a);
+ }
+
+ Node* NumberEqual(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberEqual(), a, b);
+ }
+ Node* NumberNotEqual(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberNotEqual(), a, b);
+ }
+ Node* NumberLessThan(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberLessThan(), a, b);
+ }
+ Node* NumberLessThanOrEqual(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberLessThanOrEqual(), a, b);
+ }
+ Node* NumberAdd(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberAdd(), a, b);
+ }
+ Node* NumberSubtract(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberSubtract(), a, b);
+ }
+ Node* NumberMultiply(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberMultiply(), a, b);
+ }
+ Node* NumberDivide(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberDivide(), a, b);
+ }
+ Node* NumberModulus(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->NumberModulus(), a, b);
+ }
+ Node* NumberToInt32(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->NumberToInt32(), a);
+ }
+ Node* NumberToUint32(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->NumberToUint32(), a);
+ }
+
+ Node* ReferenceEqual(Type* type, Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->ReferenceEqual(), a, b);
+ }
+
+ Node* StringEqual(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->StringEqual(), a, b);
+ }
+ Node* StringLessThan(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->StringLessThan(), a, b);
+ }
+ Node* StringLessThanOrEqual(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->StringLessThanOrEqual(), a, b);
+ }
+ Node* StringAdd(Node* a, Node* b) {
+ return NEW_NODE_2(SIMPLIFIED()->StringAdd(), a, b);
+ }
+
+ Node* ChangeTaggedToInt32(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToInt32(), a);
+ }
+ Node* ChangeTaggedToUint32(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToUint32(), a);
+ }
+ Node* ChangeTaggedToFloat64(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeTaggedToFloat64(), a);
+ }
+ Node* ChangeInt32ToTagged(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeInt32ToTagged(), a);
+ }
+ Node* ChangeUint32ToTagged(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeUint32ToTagged(), a);
+ }
+ Node* ChangeFloat64ToTagged(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeFloat64ToTagged(), a);
+ }
+ Node* ChangeBoolToBit(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeBoolToBit(), a);
+ }
+ Node* ChangeBitToBool(Node* a) {
+ return NEW_NODE_1(SIMPLIFIED()->ChangeBitToBool(), a);
+ }
+
+ Node* LoadField(const FieldAccess& access, Node* object) {
+ return NEW_NODE_1(SIMPLIFIED()->LoadField(access), object);
+ }
+ Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
+ return NEW_NODE_2(SIMPLIFIED()->StoreField(access), object, value);
+ }
+ Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
+ return NEW_NODE_2(SIMPLIFIED()->LoadElement(access), object, index);
+ }
+ Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
+ Node* value) {
+ return NEW_NODE_3(SIMPLIFIED()->StoreElement(access), object, index, value);
+ }
+};
+
+#undef NEW_NODE_1
+#undef NEW_NODE_2
+#undef NEW_NODE_3
+#undef SIMPLIFIED
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_SIMPLIFIED_NODE_FACTORY_H_
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
new file mode 100644
index 000000000..9cf08c370
--- /dev/null
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -0,0 +1,189 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+
+// An access descriptor for loads/stores of fixed structures like field
+// accesses of heap objects. Accesses from either tagged or untagged base
+// pointers are supported; untagging is done automatically during lowering.
+struct FieldAccess {
+ BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
+ int offset; // offset of the field, without tag.
+ Handle<Name> name; // debugging only.
+ Type* type; // type of the field.
+ MachineType representation; // machine representation of field.
+
+ int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
+};
+
+
+// An access descriptor for loads/stores of indexed structures like characters
+// in strings or off-heap backing stores. Accesses from either tagged or
+// untagged base pointers are supported; untagging is done automatically during
+// lowering.
+struct ElementAccess {
+ BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
+ int header_size; // size of the header, without tag.
+ Type* type; // type of the element.
+ MachineType representation; // machine representation of element.
+
+ int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
+};
+
+
+// If the accessed object is not a heap object, add this to the header_size.
+static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
+
+
+// Specialization for static parameters of type {FieldAccess}.
+template <>
+struct StaticParameterTraits<const FieldAccess> {
+ static OStream& PrintTo(OStream& os, const FieldAccess& val) { // NOLINT
+ return os << val.offset;
+ }
+ static int HashCode(const FieldAccess& val) {
+ return (val.offset < 16) | (val.representation & 0xffff);
+ }
+ static bool Equals(const FieldAccess& a, const FieldAccess& b) {
+ return a.base_is_tagged == b.base_is_tagged && a.offset == b.offset &&
+ a.representation == b.representation && a.type->Is(b.type);
+ }
+};
+
+
+// Specialization for static parameters of type {ElementAccess}.
+template <>
+struct StaticParameterTraits<const ElementAccess> {
+ static OStream& PrintTo(OStream& os, const ElementAccess& val) { // NOLINT
+ return os << val.header_size;
+ }
+ static int HashCode(const ElementAccess& val) {
+ return (val.header_size < 16) | (val.representation & 0xffff);
+ }
+ static bool Equals(const ElementAccess& a, const ElementAccess& b) {
+ return a.base_is_tagged == b.base_is_tagged &&
+ a.header_size == b.header_size &&
+ a.representation == b.representation && a.type->Is(b.type);
+ }
+};
+
+
+inline const FieldAccess FieldAccessOf(Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kLoadField ||
+ op->opcode() == IrOpcode::kStoreField);
+ return static_cast<Operator1<FieldAccess>*>(op)->parameter();
+}
+
+
+inline const ElementAccess ElementAccessOf(Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kLoadElement ||
+ op->opcode() == IrOpcode::kStoreElement);
+ return static_cast<Operator1<ElementAccess>*>(op)->parameter();
+}
+
+
+// Interface for building simplified operators, which represent the
+// medium-level operations of V8, including adding numbers, allocating objects,
+// indexing into objects and arrays, etc.
+// All operators are typed but many are representation independent.
+
+// Number values from JS can be in one of these representations:
+// - Tagged: word-sized integer that is either
+// - a signed small integer (31 or 32 bits plus a tag)
+// - a tagged pointer to a HeapNumber object that has a float64 field
+// - Int32: an untagged signed 32-bit integer
+// - Uint32: an untagged unsigned 32-bit integer
+// - Float64: an untagged float64
+
+// Additional representations for intermediate code or non-JS code:
+// - Int64: an untagged signed 64-bit integer
+// - Uint64: an untagged unsigned 64-bit integer
+// - Float32: an untagged float32
+
+// Boolean values can be:
+// - Bool: a tagged pointer to either the canonical JS #false or
+// the canonical JS #true object
+// - Bit: an untagged integer 0 or 1, but word-sized
+class SimplifiedOperatorBuilder {
+ public:
+ explicit inline SimplifiedOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define SIMPLE(name, properties, inputs, outputs) \
+ return new (zone_) \
+ SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define OP1(name, ptype, pname, properties, inputs, outputs) \
+ return new (zone_) \
+ Operator1<ptype>(IrOpcode::k##name, properties | Operator::kNoThrow, \
+ inputs, outputs, #name, pname)
+
+#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
+#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+
+ Operator* BooleanNot() const { UNOP(BooleanNot); }
+
+ Operator* NumberEqual() const { BINOP(NumberEqual); }
+ Operator* NumberLessThan() const { BINOP(NumberLessThan); }
+ Operator* NumberLessThanOrEqual() const { BINOP(NumberLessThanOrEqual); }
+ Operator* NumberAdd() const { BINOP(NumberAdd); }
+ Operator* NumberSubtract() const { BINOP(NumberSubtract); }
+ Operator* NumberMultiply() const { BINOP(NumberMultiply); }
+ Operator* NumberDivide() const { BINOP(NumberDivide); }
+ Operator* NumberModulus() const { BINOP(NumberModulus); }
+ Operator* NumberToInt32() const { UNOP(NumberToInt32); }
+ Operator* NumberToUint32() const { UNOP(NumberToUint32); }
+
+ Operator* ReferenceEqual(Type* type) const { BINOP(ReferenceEqual); }
+
+ Operator* StringEqual() const { BINOP(StringEqual); }
+ Operator* StringLessThan() const { BINOP(StringLessThan); }
+ Operator* StringLessThanOrEqual() const { BINOP(StringLessThanOrEqual); }
+ Operator* StringAdd() const { BINOP(StringAdd); }
+
+ Operator* ChangeTaggedToInt32() const { UNOP(ChangeTaggedToInt32); }
+ Operator* ChangeTaggedToUint32() const { UNOP(ChangeTaggedToUint32); }
+ Operator* ChangeTaggedToFloat64() const { UNOP(ChangeTaggedToFloat64); }
+ Operator* ChangeInt32ToTagged() const { UNOP(ChangeInt32ToTagged); }
+ Operator* ChangeUint32ToTagged() const { UNOP(ChangeUint32ToTagged); }
+ Operator* ChangeFloat64ToTagged() const { UNOP(ChangeFloat64ToTagged); }
+ Operator* ChangeBoolToBit() const { UNOP(ChangeBoolToBit); }
+ Operator* ChangeBitToBool() const { UNOP(ChangeBitToBool); }
+
+ Operator* LoadField(const FieldAccess& access) const {
+ OP1(LoadField, FieldAccess, access, Operator::kNoWrite, 1, 1);
+ }
+ Operator* StoreField(const FieldAccess& access) const {
+ OP1(StoreField, FieldAccess, access, Operator::kNoRead, 2, 0);
+ }
+ Operator* LoadElement(const ElementAccess& access) const {
+ OP1(LoadElement, ElementAccess, access, Operator::kNoWrite, 2, 1);
+ }
+ Operator* StoreElement(const ElementAccess& access) const {
+ OP1(StoreElement, ElementAccess, access, Operator::kNoRead, 3, 0);
+ }
+
+#undef BINOP
+#undef UNOP
+#undef OP1
+#undef SIMPLE
+
+ private:
+ Zone* zone_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_SIMPLIFIED_OPERATOR_H_
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/source-position.cc
new file mode 100644
index 000000000..11783900a
--- /dev/null
+++ b/deps/v8/src/compiler/source-position.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/source-position.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-aux-data-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SourcePositionTable::Decorator : public GraphDecorator {
+ public:
+ explicit Decorator(SourcePositionTable* source_positions)
+ : source_positions_(source_positions) {}
+
+ virtual void Decorate(Node* node) {
+ DCHECK(!source_positions_->current_position_.IsInvalid());
+ source_positions_->table_.Set(node, source_positions_->current_position_);
+ }
+
+ private:
+ SourcePositionTable* source_positions_;
+};
+
+
+SourcePositionTable::SourcePositionTable(Graph* graph)
+ : graph_(graph),
+ decorator_(NULL),
+ current_position_(SourcePosition::Invalid()),
+ table_(graph->zone()) {}
+
+
+void SourcePositionTable::AddDecorator() {
+ DCHECK(decorator_ == NULL);
+ decorator_ = new (graph_->zone()) Decorator(this);
+ graph_->AddDecorator(decorator_);
+}
+
+
+void SourcePositionTable::RemoveDecorator() {
+ DCHECK(decorator_ != NULL);
+ graph_->RemoveDecorator(decorator_);
+ decorator_ = NULL;
+}
+
+
+SourcePosition SourcePositionTable::GetSourcePosition(Node* node) {
+ return table_.Get(node);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/source-position.h b/deps/v8/src/compiler/source-position.h
new file mode 100644
index 000000000..b81582fd9
--- /dev/null
+++ b/deps/v8/src/compiler/source-position.h
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SOURCE_POSITION_H_
+#define V8_COMPILER_SOURCE_POSITION_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Encapsulates encoding and decoding of sources positions from which Nodes
+// originated.
+class SourcePosition V8_FINAL {
+ public:
+ explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
+
+ static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
+ bool IsUnknown() const { return raw() == kUnknownPosition; }
+
+ static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); }
+ bool IsInvalid() const { return raw() == kInvalidPosition; }
+
+ int raw() const { return raw_; }
+
+ private:
+ static const int kInvalidPosition = -2;
+ static const int kUnknownPosition = RelocInfo::kNoPosition;
+ STATIC_ASSERT(kInvalidPosition != kUnknownPosition);
+ int raw_;
+};
+
+
+inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
+ return lhs.raw() == rhs.raw();
+}
+
+inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
+ return !(lhs == rhs);
+}
+
+
+class SourcePositionTable V8_FINAL {
+ public:
+ class Scope {
+ public:
+ Scope(SourcePositionTable* source_positions, SourcePosition position)
+ : source_positions_(source_positions),
+ prev_position_(source_positions->current_position_) {
+ Init(position);
+ }
+ Scope(SourcePositionTable* source_positions, Node* node)
+ : source_positions_(source_positions),
+ prev_position_(source_positions->current_position_) {
+ Init(source_positions_->GetSourcePosition(node));
+ }
+ ~Scope() { source_positions_->current_position_ = prev_position_; }
+
+ private:
+ void Init(SourcePosition position) {
+ if (!position.IsUnknown() || prev_position_.IsInvalid()) {
+ source_positions_->current_position_ = position;
+ }
+ }
+
+ SourcePositionTable* source_positions_;
+ SourcePosition prev_position_;
+ DISALLOW_COPY_AND_ASSIGN(Scope);
+ };
+
+ explicit SourcePositionTable(Graph* graph);
+ ~SourcePositionTable() {
+ if (decorator_ != NULL) RemoveDecorator();
+ }
+
+ void AddDecorator();
+ void RemoveDecorator();
+
+ SourcePosition GetSourcePosition(Node* node);
+
+ private:
+ class Decorator;
+
+ Graph* graph_;
+ Decorator* decorator_;
+ SourcePosition current_position_;
+ NodeAuxData<SourcePosition> table_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/structured-machine-assembler.cc b/deps/v8/src/compiler/structured-machine-assembler.cc
new file mode 100644
index 000000000..dbf2134a1
--- /dev/null
+++ b/deps/v8/src/compiler/structured-machine-assembler.cc
@@ -0,0 +1,664 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/structured-machine-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* Variable::Get() const { return smasm_->GetVariable(offset_); }
+
+
+void Variable::Set(Node* value) const { smasm_->SetVariable(offset_, value); }
+
+
+StructuredMachineAssembler::StructuredMachineAssembler(
+ Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+ MachineType word)
+ : GraphBuilder(graph),
+ schedule_(new (zone()) Schedule(zone())),
+ machine_(zone(), word),
+ common_(zone()),
+ call_descriptor_builder_(call_descriptor_builder),
+ parameters_(NULL),
+ current_environment_(new (zone())
+ Environment(zone(), schedule()->entry(), false)),
+ number_of_variables_(0) {
+ Node* s = graph->NewNode(common_.Start(parameter_count()));
+ graph->SetStart(s);
+ if (parameter_count() == 0) return;
+ parameters_ = zone()->NewArray<Node*>(parameter_count());
+ for (int i = 0; i < parameter_count(); ++i) {
+ parameters_[i] = NewNode(common()->Parameter(i), graph->start());
+ }
+}
+
+
+Schedule* StructuredMachineAssembler::Export() {
+ // Compute the correct codegen order.
+ DCHECK(schedule_->rpo_order()->empty());
+ Scheduler::ComputeSpecialRPO(schedule_);
+ // Invalidate MachineAssembler.
+ Schedule* schedule = schedule_;
+ schedule_ = NULL;
+ return schedule;
+}
+
+
+Node* StructuredMachineAssembler::Parameter(int index) {
+ DCHECK(0 <= index && index < parameter_count());
+ return parameters_[index];
+}
+
+
+Node* StructuredMachineAssembler::MakeNode(Operator* op, int input_count,
+ Node** inputs) {
+ DCHECK(ScheduleValid());
+ DCHECK(current_environment_ != NULL);
+ Node* node = graph()->NewNode(op, input_count, inputs);
+ BasicBlock* block = NULL;
+ switch (op->opcode()) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kExternalConstant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kHeapConstant:
+ // Parameters and constants must be in start.
+ block = schedule()->start();
+ break;
+ default:
+ // Verify all leaf nodes handled above.
+ DCHECK((op->OutputCount() == 0) == (op->opcode() == IrOpcode::kStore));
+ block = current_environment_->block_;
+ break;
+ }
+ if (block != NULL) {
+ schedule()->AddNode(block, node);
+ }
+ return node;
+}
+
+
+Variable StructuredMachineAssembler::NewVariable(Node* initial_value) {
+ CHECK(initial_value != NULL);
+ int offset = number_of_variables_++;
+ // Extend current environment to correct number of values.
+ NodeVector* variables = CurrentVars();
+ size_t to_add = number_of_variables_ - variables->size();
+ if (to_add != 0) {
+ variables->reserve(number_of_variables_);
+ variables->insert(variables->end(), to_add, NULL);
+ }
+ variables->at(offset) = initial_value;
+ return Variable(this, offset);
+}
+
+
+Node* StructuredMachineAssembler::GetVariable(int offset) {
+ DCHECK(ScheduleValid());
+ return VariableAt(current_environment_, offset);
+}
+
+
+void StructuredMachineAssembler::SetVariable(int offset, Node* value) {
+ DCHECK(ScheduleValid());
+ Node*& ref = VariableAt(current_environment_, offset);
+ ref = value;
+}
+
+
+Node*& StructuredMachineAssembler::VariableAt(Environment* environment,
+ int32_t offset) {
+ // Variable used out of scope.
+ CHECK(static_cast<size_t>(offset) < environment->variables_.size());
+ Node*& value = environment->variables_.at(offset);
+ CHECK(value != NULL); // Variable used out of scope.
+ return value;
+}
+
+
+void StructuredMachineAssembler::Return(Node* value) {
+ BasicBlock* block = current_environment_->block_;
+ if (block != NULL) {
+ schedule()->AddReturn(block, value);
+ }
+ CopyCurrentAsDead();
+}
+
+
+void StructuredMachineAssembler::CopyCurrentAsDead() {
+ DCHECK(current_environment_ != NULL);
+ bool is_dead = current_environment_->is_dead_;
+ current_environment_->is_dead_ = true;
+ Environment* next = Copy(current_environment_);
+ current_environment_->is_dead_ = is_dead;
+ current_environment_ = next;
+}
+
+
+StructuredMachineAssembler::Environment* StructuredMachineAssembler::Copy(
+ Environment* env, int truncate_at) {
+ Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_);
+ if (!new_env->is_dead_) {
+ new_env->block_ = schedule()->NewBasicBlock();
+ }
+ new_env->variables_.reserve(truncate_at);
+ NodeVectorIter end = env->variables_.end();
+ DCHECK(truncate_at <= static_cast<int>(env->variables_.size()));
+ end -= static_cast<int>(env->variables_.size()) - truncate_at;
+ new_env->variables_.insert(new_env->variables_.begin(),
+ env->variables_.begin(), end);
+ return new_env;
+}
+
+
+StructuredMachineAssembler::Environment*
+StructuredMachineAssembler::CopyForLoopHeader(Environment* env) {
+ Environment* new_env = new (zone()) Environment(zone(), NULL, env->is_dead_);
+ if (!new_env->is_dead_) {
+ new_env->block_ = schedule()->NewBasicBlock();
+ }
+ new_env->variables_.reserve(env->variables_.size());
+ for (NodeVectorIter i = env->variables_.begin(); i != env->variables_.end();
+ ++i) {
+ Node* phi = NULL;
+ if (*i != NULL) {
+ phi = graph()->NewNode(common()->Phi(1), *i);
+ if (new_env->block_ != NULL) {
+ schedule()->AddNode(new_env->block_, phi);
+ }
+ }
+ new_env->variables_.push_back(phi);
+ }
+ return new_env;
+}
+
+
+void StructuredMachineAssembler::MergeBackEdgesToLoopHeader(
+ Environment* header, EnvironmentVector* environments) {
+ // Only merge as many variables are were declared before this loop.
+ int n = static_cast<int>(header->variables_.size());
+ // TODO(dcarney): invert loop order and extend phis once.
+ for (EnvironmentVector::iterator i = environments->begin();
+ i != environments->end(); ++i) {
+ Environment* from = *i;
+ if (from->is_dead_) continue;
+ AddGoto(from, header);
+ for (int i = 0; i < n; ++i) {
+ Node* phi = header->variables_[i];
+ if (phi == NULL) continue;
+ phi->set_op(common()->Phi(phi->InputCount() + 1));
+ phi->AppendInput(zone(), VariableAt(from, i));
+ }
+ }
+}
+
+
+void StructuredMachineAssembler::Merge(EnvironmentVector* environments,
+ int truncate_at) {
+ DCHECK(current_environment_ == NULL || current_environment_->is_dead_);
+ Environment* next = new (zone()) Environment(zone(), NULL, false);
+ current_environment_ = next;
+ size_t n_vars = number_of_variables_;
+ NodeVector& vars = next->variables_;
+ vars.reserve(n_vars);
+ Node** scratch = NULL;
+ size_t n_envs = environments->size();
+ Environment** live_environments = reinterpret_cast<Environment**>(
+ alloca(sizeof(environments->at(0)) * n_envs));
+ size_t n_live = 0;
+ for (size_t i = 0; i < n_envs; i++) {
+ if (environments->at(i)->is_dead_) continue;
+ live_environments[n_live++] = environments->at(i);
+ }
+ n_envs = n_live;
+ if (n_live == 0) next->is_dead_ = true;
+ if (!next->is_dead_) {
+ next->block_ = schedule()->NewBasicBlock();
+ }
+ for (size_t j = 0; j < n_vars; ++j) {
+ Node* resolved = NULL;
+ // Find first non equal variable.
+ size_t i = 0;
+ for (; i < n_envs; i++) {
+ DCHECK(live_environments[i]->variables_.size() <= n_vars);
+ Node* val = NULL;
+ if (j < static_cast<size_t>(truncate_at)) {
+ val = live_environments[i]->variables_.at(j);
+ // TODO(dcarney): record start position at time of split.
+ // all variables after this should not be NULL.
+ if (val != NULL) {
+ val = VariableAt(live_environments[i], static_cast<int>(j));
+ }
+ }
+ if (val == resolved) continue;
+ if (i != 0) break;
+ resolved = val;
+ }
+ // Have to generate a phi.
+ if (i < n_envs) {
+ // All values thus far uninitialized, variable used out of scope.
+ CHECK(resolved != NULL);
+ // Init scratch buffer.
+ if (scratch == NULL) {
+ scratch = static_cast<Node**>(alloca(n_envs * sizeof(resolved)));
+ }
+ for (size_t k = 0; k < i; k++) {
+ scratch[k] = resolved;
+ }
+ for (; i < n_envs; i++) {
+ scratch[i] = live_environments[i]->variables_[j];
+ }
+ resolved = graph()->NewNode(common()->Phi(static_cast<int>(n_envs)),
+ static_cast<int>(n_envs), scratch);
+ if (next->block_ != NULL) {
+ schedule()->AddNode(next->block_, resolved);
+ }
+ }
+ vars.push_back(resolved);
+ }
+}
+
+
+void StructuredMachineAssembler::AddGoto(Environment* from, Environment* to) {
+ if (to->is_dead_) {
+ DCHECK(from->is_dead_);
+ return;
+ }
+ DCHECK(!from->is_dead_);
+ schedule()->AddGoto(from->block_, to->block_);
+}
+
+
+// TODO(dcarney): add pass before rpo to schedule to compute these.
+BasicBlock* StructuredMachineAssembler::TrampolineFor(BasicBlock* block) {
+ BasicBlock* trampoline = schedule()->NewBasicBlock();
+ schedule()->AddGoto(trampoline, block);
+ return trampoline;
+}
+
+
+void StructuredMachineAssembler::AddBranch(Environment* environment,
+ Node* condition,
+ Environment* true_val,
+ Environment* false_val) {
+ DCHECK(environment->is_dead_ == true_val->is_dead_);
+ DCHECK(environment->is_dead_ == false_val->is_dead_);
+ if (true_val->block_ == false_val->block_) {
+ if (environment->is_dead_) return;
+ AddGoto(environment, true_val);
+ return;
+ }
+ Node* branch = graph()->NewNode(common()->Branch(), condition);
+ if (environment->is_dead_) return;
+ BasicBlock* true_block = TrampolineFor(true_val->block_);
+ BasicBlock* false_block = TrampolineFor(false_val->block_);
+ schedule()->AddBranch(environment->block_, branch, true_block, false_block);
+}
+
+
+StructuredMachineAssembler::Environment::Environment(Zone* zone,
+ BasicBlock* block,
+ bool is_dead)
+ : block_(block),
+ variables_(NodeVector::allocator_type(zone)),
+ is_dead_(is_dead) {}
+
+
+StructuredMachineAssembler::IfBuilder::IfBuilder(
+ StructuredMachineAssembler* smasm)
+ : smasm_(smasm),
+ if_clauses_(IfClauses::allocator_type(smasm_->zone())),
+ pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) {
+ DCHECK(smasm_->current_environment_ != NULL);
+ PushNewIfClause();
+ DCHECK(!IsDone());
+}
+
+
+StructuredMachineAssembler::IfBuilder&
+StructuredMachineAssembler::IfBuilder::If() {
+ DCHECK(smasm_->current_environment_ != NULL);
+ IfClause* clause = CurrentClause();
+ if (clause->then_environment_ != NULL || clause->else_environment_ != NULL) {
+ PushNewIfClause();
+ }
+ return *this;
+}
+
+
+StructuredMachineAssembler::IfBuilder&
+StructuredMachineAssembler::IfBuilder::If(Node* condition) {
+ If();
+ IfClause* clause = CurrentClause();
+ // Store branch for future resolution.
+ UnresolvedBranch* next = new (smasm_->zone())
+ UnresolvedBranch(smasm_->current_environment_, condition, NULL);
+ if (clause->unresolved_list_tail_ != NULL) {
+ clause->unresolved_list_tail_->next_ = next;
+ }
+ clause->unresolved_list_tail_ = next;
+ // Push onto merge queues.
+ clause->pending_else_merges_.push_back(next);
+ clause->pending_then_merges_.push_back(next);
+ smasm_->current_environment_ = NULL;
+ return *this;
+}
+
+
+void StructuredMachineAssembler::IfBuilder::And() {
+ CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionTerm);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::Or() {
+ CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionTerm);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::Then() {
+ CurrentClause()->ResolvePendingMerges(smasm_, kCombineThen, kExpressionDone);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::Else() {
+ AddCurrentToPending();
+ CurrentClause()->ResolvePendingMerges(smasm_, kCombineElse, kExpressionDone);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::AddCurrentToPending() {
+ if (smasm_->current_environment_ != NULL &&
+ !smasm_->current_environment_->is_dead_) {
+ pending_exit_merges_.push_back(smasm_->current_environment_);
+ }
+ smasm_->current_environment_ = NULL;
+}
+
+
+void StructuredMachineAssembler::IfBuilder::PushNewIfClause() {
+ int curr_size =
+ static_cast<int>(smasm_->current_environment_->variables_.size());
+ IfClause* clause = new (smasm_->zone()) IfClause(smasm_->zone(), curr_size);
+ if_clauses_.push_back(clause);
+}
+
+
+StructuredMachineAssembler::IfBuilder::IfClause::IfClause(
+ Zone* zone, int initial_environment_size)
+ : unresolved_list_tail_(NULL),
+ initial_environment_size_(initial_environment_size),
+ expression_states_(ExpressionStates::allocator_type(zone)),
+ pending_then_merges_(PendingMergeStack::allocator_type(zone)),
+ pending_else_merges_(PendingMergeStack::allocator_type(zone)),
+ then_environment_(NULL),
+ else_environment_(NULL) {
+ PushNewExpressionState();
+}
+
+
+StructuredMachineAssembler::IfBuilder::PendingMergeStackRange
+StructuredMachineAssembler::IfBuilder::IfClause::ComputeRelevantMerges(
+ CombineType combine_type) {
+ DCHECK(!expression_states_.empty());
+ PendingMergeStack* stack;
+ int start;
+ if (combine_type == kCombineThen) {
+ stack = &pending_then_merges_;
+ start = expression_states_.back().pending_then_size_;
+ } else {
+ DCHECK(combine_type == kCombineElse);
+ stack = &pending_else_merges_;
+ start = expression_states_.back().pending_else_size_;
+ }
+ PendingMergeStackRange data;
+ data.merge_stack_ = stack;
+ data.start_ = start;
+ data.size_ = static_cast<int>(stack->size()) - start;
+ return data;
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::ResolvePendingMerges(
+ StructuredMachineAssembler* smasm, CombineType combine_type,
+ ResolutionType resolution_type) {
+ DCHECK(smasm->current_environment_ == NULL);
+ PendingMergeStackRange data = ComputeRelevantMerges(combine_type);
+ DCHECK_EQ(data.merge_stack_->back(), unresolved_list_tail_);
+ DCHECK(data.size_ > 0);
+ // TODO(dcarney): assert no new variables created during expression building.
+ int truncate_at = initial_environment_size_;
+ if (data.size_ == 1) {
+ // Just copy environment in common case.
+ smasm->current_environment_ =
+ smasm->Copy(unresolved_list_tail_->environment_, truncate_at);
+ } else {
+ EnvironmentVector environments(
+ EnvironmentVector::allocator_type(smasm->zone()));
+ environments.reserve(data.size_);
+ CopyEnvironments(data, &environments);
+ DCHECK(static_cast<int>(environments.size()) == data.size_);
+ smasm->Merge(&environments, truncate_at);
+ }
+ Environment* then_environment = then_environment_;
+ Environment* else_environment = NULL;
+ if (resolution_type == kExpressionDone) {
+ DCHECK(expression_states_.size() == 1);
+ // Set the current then_ or else_environment_ to the new merged environment.
+ if (combine_type == kCombineThen) {
+ DCHECK(then_environment_ == NULL && else_environment_ == NULL);
+ this->then_environment_ = smasm->current_environment_;
+ } else {
+ DCHECK(else_environment_ == NULL);
+ this->else_environment_ = smasm->current_environment_;
+ }
+ } else {
+ DCHECK(resolution_type == kExpressionTerm);
+ DCHECK(then_environment_ == NULL && else_environment_ == NULL);
+ }
+ if (combine_type == kCombineThen) {
+ then_environment = smasm->current_environment_;
+ } else {
+ DCHECK(combine_type == kCombineElse);
+ else_environment = smasm->current_environment_;
+ }
+ // Finalize branches and clear the pending stack.
+ FinalizeBranches(smasm, data, combine_type, then_environment,
+ else_environment);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::CopyEnvironments(
+ const PendingMergeStackRange& data, EnvironmentVector* environments) {
+ PendingMergeStack::iterator i = data.merge_stack_->begin();
+ PendingMergeStack::iterator end = data.merge_stack_->end();
+ for (i += data.start_; i != end; ++i) {
+ environments->push_back((*i)->environment_);
+ }
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::PushNewExpressionState() {
+ ExpressionState next;
+ next.pending_then_size_ = static_cast<int>(pending_then_merges_.size());
+ next.pending_else_size_ = static_cast<int>(pending_else_merges_.size());
+ expression_states_.push_back(next);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::PopExpressionState() {
+ expression_states_.pop_back();
+ DCHECK(!expression_states_.empty());
+}
+
+
+void StructuredMachineAssembler::IfBuilder::IfClause::FinalizeBranches(
+ StructuredMachineAssembler* smasm, const PendingMergeStackRange& data,
+ CombineType combine_type, Environment* const then_environment,
+ Environment* const else_environment) {
+ DCHECK(unresolved_list_tail_ != NULL);
+ DCHECK(smasm->current_environment_ != NULL);
+ if (data.size_ == 0) return;
+ PendingMergeStack::iterator curr = data.merge_stack_->begin();
+ PendingMergeStack::iterator end = data.merge_stack_->end();
+ // Finalize everything but the head first,
+ // in the order the branches enter the merge block.
+ end -= 1;
+ Environment* true_val = then_environment;
+ Environment* false_val = else_environment;
+ Environment** next;
+ if (combine_type == kCombineThen) {
+ next = &false_val;
+ } else {
+ DCHECK(combine_type == kCombineElse);
+ next = &true_val;
+ }
+ for (curr += data.start_; curr != end; ++curr) {
+ UnresolvedBranch* branch = *curr;
+ *next = branch->next_->environment_;
+ smasm->AddBranch(branch->environment_, branch->condition_, true_val,
+ false_val);
+ }
+ DCHECK(curr + 1 == data.merge_stack_->end());
+ // Now finalize the tail if possible.
+ if (then_environment != NULL && else_environment != NULL) {
+ UnresolvedBranch* branch = *curr;
+ smasm->AddBranch(branch->environment_, branch->condition_, then_environment,
+ else_environment);
+ }
+ // Clear the merge stack.
+ PendingMergeStack::iterator begin = data.merge_stack_->begin();
+ begin += data.start_;
+ data.merge_stack_->erase(begin, data.merge_stack_->end());
+ DCHECK_EQ(static_cast<int>(data.merge_stack_->size()), data.start_);
+}
+
+
+void StructuredMachineAssembler::IfBuilder::End() {
+ DCHECK(!IsDone());
+ AddCurrentToPending();
+ size_t current_pending = pending_exit_merges_.size();
+ // All unresolved branch edges are now set to pending.
+ for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end();
+ ++i) {
+ IfClause* clause = *i;
+ DCHECK(clause->expression_states_.size() == 1);
+ PendingMergeStackRange data;
+ // Copy then environments.
+ data = clause->ComputeRelevantMerges(kCombineThen);
+ clause->CopyEnvironments(data, &pending_exit_merges_);
+ Environment* head = NULL;
+ // Will resolve the head node in the else_merge
+ if (data.size_ > 0 && clause->then_environment_ == NULL &&
+ clause->else_environment_ == NULL) {
+ head = pending_exit_merges_.back();
+ pending_exit_merges_.pop_back();
+ }
+ // Copy else environments.
+ data = clause->ComputeRelevantMerges(kCombineElse);
+ clause->CopyEnvironments(data, &pending_exit_merges_);
+ if (head != NULL) {
+ // Must have data to merge, or else head will never get a branch.
+ DCHECK(data.size_ != 0);
+ pending_exit_merges_.push_back(head);
+ }
+ }
+ smasm_->Merge(&pending_exit_merges_,
+ if_clauses_[0]->initial_environment_size_);
+ // Anything initally pending jumps into the new environment.
+ for (size_t i = 0; i < current_pending; ++i) {
+ smasm_->AddGoto(pending_exit_merges_[i], smasm_->current_environment_);
+ }
+ // Resolve all branches.
+ for (IfClauses::iterator i = if_clauses_.begin(); i != if_clauses_.end();
+ ++i) {
+ IfClause* clause = *i;
+ // Must finalize all environments, so ensure they are set correctly.
+ Environment* then_environment = clause->then_environment_;
+ if (then_environment == NULL) {
+ then_environment = smasm_->current_environment_;
+ }
+ Environment* else_environment = clause->else_environment_;
+ PendingMergeStackRange data;
+ // Finalize then environments.
+ data = clause->ComputeRelevantMerges(kCombineThen);
+ clause->FinalizeBranches(smasm_, data, kCombineThen, then_environment,
+ else_environment);
+ // Finalize else environments.
+ // Now set the else environment so head is finalized for edge case above.
+ if (else_environment == NULL) {
+ else_environment = smasm_->current_environment_;
+ }
+ data = clause->ComputeRelevantMerges(kCombineElse);
+ clause->FinalizeBranches(smasm_, data, kCombineElse, then_environment,
+ else_environment);
+ }
+ // Future accesses to this builder should crash immediately.
+ pending_exit_merges_.clear();
+ if_clauses_.clear();
+ DCHECK(IsDone());
+}
+
+
+StructuredMachineAssembler::LoopBuilder::LoopBuilder(
+ StructuredMachineAssembler* smasm)
+ : smasm_(smasm),
+ header_environment_(NULL),
+ pending_header_merges_(EnvironmentVector::allocator_type(smasm_->zone())),
+ pending_exit_merges_(EnvironmentVector::allocator_type(smasm_->zone())) {
+ DCHECK(smasm_->current_environment_ != NULL);
+ // Create header environment.
+ header_environment_ = smasm_->CopyForLoopHeader(smasm_->current_environment_);
+ smasm_->AddGoto(smasm_->current_environment_, header_environment_);
+ // Create body environment.
+ Environment* body = smasm_->Copy(header_environment_);
+ smasm_->AddGoto(header_environment_, body);
+ smasm_->current_environment_ = body;
+ DCHECK(!IsDone());
+}
+
+
+void StructuredMachineAssembler::LoopBuilder::Continue() {
+ DCHECK(!IsDone());
+ pending_header_merges_.push_back(smasm_->current_environment_);
+ smasm_->CopyCurrentAsDead();
+}
+
+
+void StructuredMachineAssembler::LoopBuilder::Break() {
+ DCHECK(!IsDone());
+ pending_exit_merges_.push_back(smasm_->current_environment_);
+ smasm_->CopyCurrentAsDead();
+}
+
+
+void StructuredMachineAssembler::LoopBuilder::End() {
+ DCHECK(!IsDone());
+ if (smasm_->current_environment_ != NULL) {
+ Continue();
+ }
+ // Do loop header merges.
+ smasm_->MergeBackEdgesToLoopHeader(header_environment_,
+ &pending_header_merges_);
+ int initial_size = static_cast<int>(header_environment_->variables_.size());
+ // Do loop exit merges, truncating loop variables away.
+ smasm_->Merge(&pending_exit_merges_, initial_size);
+ for (EnvironmentVector::iterator i = pending_exit_merges_.begin();
+ i != pending_exit_merges_.end(); ++i) {
+ smasm_->AddGoto(*i, smasm_->current_environment_);
+ }
+ pending_header_merges_.clear();
+ pending_exit_merges_.clear();
+ header_environment_ = NULL;
+ DCHECK(IsDone());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/structured-machine-assembler.h b/deps/v8/src/compiler/structured-machine-assembler.h
new file mode 100644
index 000000000..a6cb8ca88
--- /dev/null
+++ b/deps/v8/src/compiler/structured-machine-assembler.h
@@ -0,0 +1,311 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
+#define V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-node-factory.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Schedule;
+class StructuredMachineAssembler;
+
+
+class Variable : public ZoneObject {
+ public:
+ Node* Get() const;
+ void Set(Node* value) const;
+
+ private:
+ Variable(StructuredMachineAssembler* smasm, int offset)
+ : smasm_(smasm), offset_(offset) {}
+
+ friend class StructuredMachineAssembler;
+ friend class StructuredMachineAssemblerFriend;
+ StructuredMachineAssembler* const smasm_;
+ const int offset_;
+};
+
+
+class StructuredMachineAssembler
+ : public GraphBuilder,
+ public MachineNodeFactory<StructuredMachineAssembler> {
+ public:
+ class Environment : public ZoneObject {
+ public:
+ Environment(Zone* zone, BasicBlock* block, bool is_dead_);
+
+ private:
+ BasicBlock* block_;
+ NodeVector variables_;
+ bool is_dead_;
+ friend class StructuredMachineAssembler;
+ DISALLOW_COPY_AND_ASSIGN(Environment);
+ };
+
+ class IfBuilder;
+ friend class IfBuilder;
+ class LoopBuilder;
+ friend class LoopBuilder;
+
+ StructuredMachineAssembler(
+ Graph* graph, MachineCallDescriptorBuilder* call_descriptor_builder,
+ MachineType word = MachineOperatorBuilder::pointer_rep());
+ virtual ~StructuredMachineAssembler() {}
+
+ Isolate* isolate() const { return zone()->isolate(); }
+ Zone* zone() const { return graph()->zone(); }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ CallDescriptor* call_descriptor() const {
+ return call_descriptor_builder_->BuildCallDescriptor(zone());
+ }
+ int parameter_count() const {
+ return call_descriptor_builder_->parameter_count();
+ }
+ const MachineType* parameter_types() const {
+ return call_descriptor_builder_->parameter_types();
+ }
+
+ // Parameters.
+ Node* Parameter(int index);
+ // Variables.
+ Variable NewVariable(Node* initial_value);
+ // Control flow.
+ void Return(Node* value);
+
+ // MachineAssembler is invalid after export.
+ Schedule* Export();
+
+ protected:
+ virtual Node* MakeNode(Operator* op, int input_count, Node** inputs);
+
+ Schedule* schedule() {
+ DCHECK(ScheduleValid());
+ return schedule_;
+ }
+
+ private:
+ bool ScheduleValid() { return schedule_ != NULL; }
+
+ typedef std::vector<Environment*, zone_allocator<Environment*> >
+ EnvironmentVector;
+
+ NodeVector* CurrentVars() { return &current_environment_->variables_; }
+ Node*& VariableAt(Environment* environment, int offset);
+ Node* GetVariable(int offset);
+ void SetVariable(int offset, Node* value);
+
+ void AddBranch(Environment* environment, Node* condition,
+ Environment* true_val, Environment* false_val);
+ void AddGoto(Environment* from, Environment* to);
+ BasicBlock* TrampolineFor(BasicBlock* block);
+
+ void CopyCurrentAsDead();
+ Environment* Copy(Environment* environment) {
+ return Copy(environment, static_cast<int>(environment->variables_.size()));
+ }
+ Environment* Copy(Environment* environment, int truncate_at);
+ void Merge(EnvironmentVector* environments, int truncate_at);
+ Environment* CopyForLoopHeader(Environment* environment);
+ void MergeBackEdgesToLoopHeader(Environment* header,
+ EnvironmentVector* environments);
+
+ typedef std::vector<MachineType, zone_allocator<MachineType> >
+ RepresentationVector;
+
+ Schedule* schedule_;
+ MachineOperatorBuilder machine_;
+ CommonOperatorBuilder common_;
+ MachineCallDescriptorBuilder* call_descriptor_builder_;
+ Node** parameters_;
+ Environment* current_environment_;
+ int number_of_variables_;
+
+ friend class Variable;
+ // For testing only.
+ friend class StructuredMachineAssemblerFriend;
+ DISALLOW_COPY_AND_ASSIGN(StructuredMachineAssembler);
+};
+
+// IfBuilder constructs of nested if-else expressions which more or less follow
+// C semantics. Foe example:
+//
+// if (x) {do_x} else if (y) {do_y} else {do_z}
+//
+// would look like this:
+//
+// IfBuilder b;
+// b.If(x).Then();
+// do_x
+// b.Else();
+// b.If().Then();
+// do_y
+// b.Else();
+// do_z
+// b.End();
+//
+// Then() and Else() can be skipped, representing an empty block in C.
+// Combinations like If(x).Then().If(x).Then() are legitimate, but
+// Else().Else() is not. That is, once you've nested an If(), you can't get to a
+// higher level If() branch.
+// TODO(dcarney): describe expressions once the api is finalized.
+class StructuredMachineAssembler::IfBuilder {
+ public:
+ explicit IfBuilder(StructuredMachineAssembler* smasm);
+ ~IfBuilder() {
+ if (!IsDone()) End();
+ }
+
+ IfBuilder& If(); // TODO(dcarney): this should take an expression.
+ IfBuilder& If(Node* condition);
+ void Then();
+ void Else();
+ void End();
+
+ // The next 4 functions are exposed for expression support.
+ // They will be private once I have a nice expression api.
+ void And();
+ void Or();
+ IfBuilder& OpenParen() {
+ DCHECK(smasm_->current_environment_ != NULL);
+ CurrentClause()->PushNewExpressionState();
+ return *this;
+ }
+ IfBuilder& CloseParen() {
+ DCHECK(smasm_->current_environment_ == NULL);
+ CurrentClause()->PopExpressionState();
+ return *this;
+ }
+
+ private:
+ // UnresolvedBranch represents the chain of environments created while
+ // generating an expression. At this point, a branch Node
+ // cannot be created, as the target environments of the branch are not yet
+ // available, so everything required to create the branch Node is
+ // stored in this structure until the target environments are resolved.
+ struct UnresolvedBranch : public ZoneObject {
+ UnresolvedBranch(Environment* environment, Node* condition,
+ UnresolvedBranch* next)
+ : environment_(environment), condition_(condition), next_(next) {}
+ // environment_ will eventually be terminated by a branch on condition_.
+ Environment* environment_;
+ Node* condition_;
+ // next_ is the next link in the UnresolvedBranch chain, and will be
+ // either the true or false branch jumped to from environment_.
+ UnresolvedBranch* next_;
+ };
+
+ struct ExpressionState {
+ int pending_then_size_;
+ int pending_else_size_;
+ };
+
+ typedef std::vector<ExpressionState, zone_allocator<ExpressionState> >
+ ExpressionStates;
+ typedef std::vector<UnresolvedBranch*, zone_allocator<UnresolvedBranch*> >
+ PendingMergeStack;
+ struct IfClause;
+ typedef std::vector<IfClause*, zone_allocator<IfClause*> > IfClauses;
+
+ struct PendingMergeStackRange {
+ PendingMergeStack* merge_stack_;
+ int start_;
+ int size_;
+ };
+
+ enum CombineType { kCombineThen, kCombineElse };
+ enum ResolutionType { kExpressionTerm, kExpressionDone };
+
+ // IfClause represents one level of if-then-else nesting plus the associated
+ // expression.
+ // A call to If() triggers creation of a new nesting level after expression
+ // creation is complete - ie Then() or Else() has been called.
+ struct IfClause : public ZoneObject {
+ IfClause(Zone* zone, int initial_environment_size);
+ void CopyEnvironments(const PendingMergeStackRange& data,
+ EnvironmentVector* environments);
+ void ResolvePendingMerges(StructuredMachineAssembler* smasm,
+ CombineType combine_type,
+ ResolutionType resolution_type);
+ PendingMergeStackRange ComputeRelevantMerges(CombineType combine_type);
+ void FinalizeBranches(StructuredMachineAssembler* smasm,
+ const PendingMergeStackRange& offset_data,
+ CombineType combine_type,
+ Environment* then_environment,
+ Environment* else_environment);
+ void PushNewExpressionState();
+ void PopExpressionState();
+
+ // Each invocation of And or Or creates a new UnresolvedBranch.
+ // These form a singly-linked list, of which we only need to keep track of
+ // the tail. On creation of an UnresolvedBranch, pending_then_merges_ and
+ // pending_else_merges_ each push a copy, which are removed on merges to the
+ // respective environment.
+ UnresolvedBranch* unresolved_list_tail_;
+ int initial_environment_size_;
+ // expression_states_ keeps track of the state of pending_*_merges_,
+ // pushing and popping the lengths of these on
+ // OpenParend() and CloseParend() respectively.
+ ExpressionStates expression_states_;
+ PendingMergeStack pending_then_merges_;
+ PendingMergeStack pending_else_merges_;
+ // then_environment_ is created iff there is a call to Then(), otherwise
+ // branches which would merge to it merge to the exit environment instead.
+ // Likewise for else_environment_.
+ Environment* then_environment_;
+ Environment* else_environment_;
+ };
+
+ IfClause* CurrentClause() { return if_clauses_.back(); }
+ void AddCurrentToPending();
+ void PushNewIfClause();
+ bool IsDone() { return if_clauses_.empty(); }
+
+ StructuredMachineAssembler* smasm_;
+ IfClauses if_clauses_;
+ EnvironmentVector pending_exit_merges_;
+ DISALLOW_COPY_AND_ASSIGN(IfBuilder);
+};
+
+
+class StructuredMachineAssembler::LoopBuilder {
+ public:
+ explicit LoopBuilder(StructuredMachineAssembler* smasm);
+ ~LoopBuilder() {
+ if (!IsDone()) End();
+ }
+
+ void Break();
+ void Continue();
+ void End();
+
+ private:
+ friend class StructuredMachineAssembler;
+ bool IsDone() { return header_environment_ == NULL; }
+
+ StructuredMachineAssembler* smasm_;
+ Environment* header_environment_;
+ EnvironmentVector pending_header_merges_;
+ EnvironmentVector pending_exit_merges_;
+ DISALLOW_COPY_AND_ASSIGN(LoopBuilder);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_STRUCTURED_MACHINE_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
new file mode 100644
index 000000000..2aa18699d
--- /dev/null
+++ b/deps/v8/src/compiler/typer.cc
@@ -0,0 +1,842 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Typer::Typer(Zone* zone) : zone_(zone) {
+ Type* number = Type::Number(zone);
+ Type* signed32 = Type::Signed32(zone);
+ Type* unsigned32 = Type::Unsigned32(zone);
+ Type* integral32 = Type::Integral32(zone);
+ Type* object = Type::Object(zone);
+ Type* undefined = Type::Undefined(zone);
+ number_fun0_ = Type::Function(number, zone);
+ number_fun1_ = Type::Function(number, number, zone);
+ number_fun2_ = Type::Function(number, number, number, zone);
+ imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
+
+#define NATIVE_TYPE(sem, rep) \
+ Type::Intersect(Type::sem(zone), Type::rep(zone), zone)
+ // TODO(rossberg): Use range types for more precision, once we have them.
+ Type* int8 = NATIVE_TYPE(SignedSmall, UntaggedInt8);
+ Type* int16 = NATIVE_TYPE(SignedSmall, UntaggedInt16);
+ Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
+ Type* uint8 = NATIVE_TYPE(UnsignedSmall, UntaggedInt8);
+ Type* uint16 = NATIVE_TYPE(UnsignedSmall, UntaggedInt16);
+ Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
+ Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
+ Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
+#undef NATIVE_TYPE
+ Type* buffer = Type::Buffer(zone);
+ Type* int8_array = Type::Array(int8, zone);
+ Type* int16_array = Type::Array(int16, zone);
+ Type* int32_array = Type::Array(int32, zone);
+ Type* uint8_array = Type::Array(uint8, zone);
+ Type* uint16_array = Type::Array(uint16, zone);
+ Type* uint32_array = Type::Array(uint32, zone);
+ Type* float32_array = Type::Array(float32, zone);
+ Type* float64_array = Type::Array(float64, zone);
+ Type* arg1 = Type::Union(unsigned32, object, zone);
+ Type* arg2 = Type::Union(unsigned32, undefined, zone);
+ Type* arg3 = arg2;
+ array_buffer_fun_ = Type::Function(buffer, unsigned32, zone);
+ int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone);
+ int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone);
+ int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone);
+ uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone);
+ uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone);
+ uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone);
+ float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone);
+ float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone);
+}
+
+
+class Typer::Visitor : public NullNodeVisitor {
+ public:
+ Visitor(Typer* typer, MaybeHandle<Context> context)
+ : typer_(typer), context_(context) {}
+
+ Bounds TypeNode(Node* node) {
+ switch (node->opcode()) {
+#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
+ VALUE_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) case IrOpcode::k##x:
+ CONTROL_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+ break;
+ }
+ return Bounds(Type::None(zone()));
+ }
+
+ Type* TypeConstant(Handle<Object> value);
+
+ protected:
+#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+ VALUE_OP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+ Bounds OperandType(Node* node, int i) {
+ return NodeProperties::GetBounds(NodeProperties::GetValueInput(node, i));
+ }
+
+ Type* ContextType(Node* node) {
+ Bounds result =
+ NodeProperties::GetBounds(NodeProperties::GetContextInput(node));
+ DCHECK(result.upper->Is(Type::Internal()));
+ DCHECK(result.lower->Equals(result.upper));
+ return result.upper;
+ }
+
+ Zone* zone() { return typer_->zone(); }
+ Isolate* isolate() { return typer_->isolate(); }
+ MaybeHandle<Context> context() { return context_; }
+
+ private:
+ Typer* typer_;
+ MaybeHandle<Context> context_;
+};
+
+
+class Typer::RunVisitor : public Typer::Visitor {
+ public:
+ RunVisitor(Typer* typer, MaybeHandle<Context> context)
+ : Visitor(typer, context),
+ phis(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
+
+ GenericGraphVisit::Control Pre(Node* node) {
+ return NodeProperties::IsControl(node)
+ && node->opcode() != IrOpcode::kEnd
+ && node->opcode() != IrOpcode::kMerge
+ && node->opcode() != IrOpcode::kReturn
+ ? GenericGraphVisit::SKIP : GenericGraphVisit::CONTINUE;
+ }
+
+ GenericGraphVisit::Control Post(Node* node) {
+ Bounds bounds = TypeNode(node);
+ if (node->opcode() == IrOpcode::kPhi) {
+ // Remember phis for least fixpoint iteration.
+ phis.insert(node);
+ } else {
+ NodeProperties::SetBounds(node, bounds);
+ }
+ return GenericGraphVisit::CONTINUE;
+ }
+
+ NodeSet phis;
+};
+
+
+class Typer::NarrowVisitor : public Typer::Visitor {
+ public:
+ NarrowVisitor(Typer* typer, MaybeHandle<Context> context)
+ : Visitor(typer, context) {}
+
+ GenericGraphVisit::Control Pre(Node* node) {
+ Bounds previous = NodeProperties::GetBounds(node);
+ Bounds bounds = TypeNode(node);
+ NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
+ DCHECK(bounds.Narrows(previous));
+ // Stop when nothing changed (but allow reentry in case it does later).
+ return previous.Narrows(bounds)
+ ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+ }
+
+ GenericGraphVisit::Control Post(Node* node) {
+ return GenericGraphVisit::REENTER;
+ }
+};
+
+
+class Typer::WidenVisitor : public Typer::Visitor {
+ public:
+ WidenVisitor(Typer* typer, MaybeHandle<Context> context)
+ : Visitor(typer, context) {}
+
+ GenericGraphVisit::Control Pre(Node* node) {
+ Bounds previous = NodeProperties::GetBounds(node);
+ Bounds bounds = TypeNode(node);
+ DCHECK(previous.lower->Is(bounds.lower));
+ DCHECK(previous.upper->Is(bounds.upper));
+ NodeProperties::SetBounds(node, bounds); // TODO(rossberg): Either?
+ // Stop when nothing changed (but allow reentry in case it does later).
+ return bounds.Narrows(previous)
+ ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+ }
+
+ GenericGraphVisit::Control Post(Node* node) {
+ return GenericGraphVisit::REENTER;
+ }
+};
+
+
+void Typer::Run(Graph* graph, MaybeHandle<Context> context) {
+ RunVisitor typing(this, context);
+ graph->VisitNodeInputsFromEnd(&typing);
+ // Find least fixpoint.
+ for (NodeSetIter i = typing.phis.begin(); i != typing.phis.end(); ++i) {
+ Widen(graph, *i, context);
+ }
+}
+
+
+void Typer::Narrow(Graph* graph, Node* start, MaybeHandle<Context> context) {
+ NarrowVisitor typing(this, context);
+ graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) {
+ WidenVisitor typing(this, context);
+ graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Init(Node* node) {
+ Visitor typing(this, MaybeHandle<Context>());
+ Bounds bounds = typing.TypeNode(node);
+ NodeProperties::SetBounds(node, bounds);
+}
+
+
+// Common operators.
+Bounds Typer::Visitor::TypeParameter(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
+ // TODO(titzer): only call Type::Of() if the type is not already known.
+ return Bounds(Type::Of(ValueOf<int32_t>(node->op()), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
+ // TODO(titzer): only call Type::Of() if the type is not already known.
+ return Bounds(
+ Type::Of(static_cast<double>(ValueOf<int64_t>(node->op())), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
+ // TODO(titzer): only call Type::Of() if the type is not already known.
+ return Bounds(Type::Of(ValueOf<double>(node->op()), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
+ // TODO(titzer): only call Type::Of() if the type is not already known.
+ return Bounds(Type::Of(ValueOf<double>(node->op()), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
+ return Bounds(TypeConstant(ValueOf<Handle<Object> >(node->op())));
+}
+
+
+Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
+ return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypePhi(Node* node) {
+ int arity = OperatorProperties::GetValueInputCount(node->op());
+ Bounds bounds = OperandType(node, 0);
+ for (int i = 1; i < arity; ++i) {
+ bounds = Bounds::Either(bounds, OperandType(node, i), zone());
+ }
+ return bounds;
+}
+
+
+Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
+ return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFrameState(Node* node) {
+ return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStateValues(Node* node) {
+ return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeCall(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeProjection(Node* node) {
+ // TODO(titzer): use the output type of the input to determine the bounds.
+ return Bounds::Unbounded(zone());
+}
+
+
+// JS comparison operators.
+
+#define DEFINE_METHOD(x) \
+ Bounds Typer::Visitor::Type##x(Node* node) { \
+ return Bounds(Type::Boolean(zone())); \
+ }
+JS_COMPARE_BINOP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// JS bitwise operators.
+
+Bounds Typer::Visitor::TypeJSBitwiseOr(Node* node) {
+ Bounds left = OperandType(node, 0);
+ Bounds right = OperandType(node, 1);
+ Type* upper = Type::Union(left.upper, right.upper, zone());
+ if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+ Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+ return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseAnd(Node* node) {
+ Bounds left = OperandType(node, 0);
+ Bounds right = OperandType(node, 1);
+ Type* upper = Type::Union(left.upper, right.upper, zone());
+ if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+ Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+ return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseXor(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftLeft(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRight(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRightLogical(Node* node) {
+ return Bounds(Type::UnsignedSmall(zone()), Type::Unsigned32(zone()));
+}
+
+
+// JS arithmetic operators.
+
+Bounds Typer::Visitor::TypeJSAdd(Node* node) {
+ Bounds left = OperandType(node, 0);
+ Bounds right = OperandType(node, 1);
+ Type* lower =
+ left.lower->Is(Type::None()) || right.lower->Is(Type::None()) ?
+ Type::None(zone()) :
+ left.lower->Is(Type::Number()) && right.lower->Is(Type::Number()) ?
+ Type::SignedSmall(zone()) :
+ left.lower->Is(Type::String()) || right.lower->Is(Type::String()) ?
+ Type::String(zone()) : Type::None(zone());
+ Type* upper =
+ left.upper->Is(Type::None()) && right.upper->Is(Type::None()) ?
+ Type::None(zone()) :
+ left.upper->Is(Type::Number()) && right.upper->Is(Type::Number()) ?
+ Type::Number(zone()) :
+ left.upper->Is(Type::String()) || right.upper->Is(Type::String()) ?
+ Type::String(zone()) : Type::NumberOrString(zone());
+ return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSSubtract(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSMultiply(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSDivide(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSModulus(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+// JS unary operators.
+
+Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
+ return Bounds(Type::InternalizedString(zone()));
+}
+
+
+// JS conversion operators.
+
+Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
+ return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToString(Node* node) {
+ return Bounds(Type::None(zone()), Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToName(Node* node) {
+ return Bounds(Type::None(zone()), Type::Name(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToObject(Node* node) {
+ return Bounds(Type::None(zone()), Type::Object(zone()));
+}
+
+
+// JS object operators.
+
+Bounds Typer::Visitor::TypeJSCreate(Node* node) {
+ return Bounds(Type::None(zone()), Type::Object(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
+ Bounds object = OperandType(node, 0);
+ Bounds name = OperandType(node, 1);
+ Bounds result = Bounds::Unbounded(zone());
+ // TODO(rossberg): Use range types and sized array types to filter undefined.
+ if (object.lower->IsArray() && name.lower->Is(Type::Integral32())) {
+ result.lower = Type::Union(
+ object.lower->AsArray()->Element(), Type::Undefined(zone()), zone());
+ }
+ if (object.upper->IsArray() && name.upper->Is(Type::Integral32())) {
+ result.upper = Type::Union(
+ object.upper->AsArray()->Element(), Type::Undefined(zone()), zone());
+ }
+ return result;
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
+ return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
+ return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+// JS context operators.
+
+Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
+ Bounds outer = OperandType(node, 0);
+ DCHECK(outer.upper->Is(Type::Internal()));
+ DCHECK(outer.lower->Equals(outer.upper));
+ ContextAccess access = OpParameter<ContextAccess>(node);
+ Type* context_type = outer.upper;
+ MaybeHandle<Context> context;
+ if (context_type->IsConstant()) {
+ context = Handle<Context>::cast(context_type->AsConstant()->Value());
+ }
+ // Walk context chain (as far as known), mirroring dynamic lookup.
+ // Since contexts are mutable, the information is only useful as a lower
+ // bound.
+ // TODO(rossberg): Could use scope info to fix upper bounds for constant
+ // bindings if we know that this code is never shared.
+ for (int i = access.depth(); i > 0; --i) {
+ if (context_type->IsContext()) {
+ context_type = context_type->AsContext()->Outer();
+ if (context_type->IsConstant()) {
+ context = Handle<Context>::cast(context_type->AsConstant()->Value());
+ }
+ } else {
+ context = handle(context.ToHandleChecked()->previous(), isolate());
+ }
+ }
+ if (context.is_null()) {
+ return Bounds::Unbounded(zone());
+ } else {
+ Handle<Object> value =
+ handle(context.ToHandleChecked()->get(access.index()), isolate());
+ Type* lower = TypeConstant(value);
+ return Bounds(lower, Type::Any(zone()));
+ }
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
+ return Bounds(Type::None(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
+ Type* outer = ContextType(node);
+ return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
+ Type* outer = ContextType(node);
+ return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
+ Type* outer = ContextType(node);
+ return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
+ Type* outer = ContextType(node);
+ return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
+ // TODO(rossberg): this is probably incorrect
+ Type* outer = ContextType(node);
+ return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) {
+ Type* outer = ContextType(node);
+ return Bounds(Type::Context(outer, zone()));
+}
+
+
+// JS other operators.
+
+Bounds Typer::Visitor::TypeJSYield(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
+ return Bounds(Type::None(zone()), Type::Receiver(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
+ Bounds fun = OperandType(node, 0);
+ Type* lower = fun.lower->IsFunction()
+ ? fun.lower->AsFunction()->Result() : Type::None(zone());
+ Type* upper = fun.upper->IsFunction()
+ ? fun.upper->AsFunction()->Result() : Type::Any(zone());
+ return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
+ return Bounds::Unbounded(zone());
+}
+
+
+// Simplified operators.
+
+Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
+ return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
+ return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
+ return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
+ return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
+ return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
+ Bounds arg = OperandType(node, 0);
+ Type* s32 = Type::Signed32(zone());
+ Type* lower = arg.lower->Is(s32) ? arg.lower : s32;
+ Type* upper = arg.upper->Is(s32) ? arg.upper : s32;
+ return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
+ Bounds arg = OperandType(node, 0);
+ Type* u32 = Type::Unsigned32(zone());
+ Type* lower = arg.lower->Is(u32) ? arg.lower : u32;
+ Type* upper = arg.upper->Is(u32) ? arg.upper : u32;
+ return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringEqual(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
+ return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringAdd(Node* node) {
+ return Bounds(Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
+ // TODO(titzer): type is type of input, representation is Word32.
+ return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
+ return Bounds(Type::Integral32()); // TODO(titzer): add appropriate rep
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
+ // TODO(titzer): type is type of input, representation is Float64.
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+ // TODO(titzer): type is type of input, representation is Tagged.
+ return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+ // TODO(titzer): type is type of input, representation is Tagged.
+ return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
+ // TODO(titzer): type is type of input, representation is Tagged.
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+ // TODO(titzer): type is type of input, representation is Bit.
+ return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
+ // TODO(titzer): type is type of input, representation is Tagged.
+ return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeLoadField(Node* node) {
+ return Bounds(FieldAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeLoadElement(Node* node) {
+ return Bounds(ElementAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeStoreField(Node* node) {
+ return Bounds(Type::None());
+}
+
+
+Bounds Typer::Visitor::TypeStoreElement(Node* node) {
+ return Bounds(Type::None());
+}
+
+
+// Machine operators.
+
+// TODO(rossberg): implement
+#define DEFINE_METHOD(x) \
+ Bounds Typer::Visitor::Type##x(Node* node) { return Bounds(Type::None()); }
+MACHINE_OP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// Heap constants.
+
+Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
+ if (value->IsJSFunction() && JSFunction::cast(*value)->IsBuiltin() &&
+ !context().is_null()) {
+ Handle<Context> native =
+ handle(context().ToHandleChecked()->native_context(), isolate());
+ if (*value == native->math_abs_fun()) {
+ return typer_->number_fun1_; // TODO(rossberg): can't express overloading
+ } else if (*value == native->math_acos_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_asin_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_atan_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_atan2_fun()) {
+ return typer_->number_fun2_;
+ } else if (*value == native->math_ceil_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_cos_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_exp_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_floor_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_imul_fun()) {
+ return typer_->imul_fun_;
+ } else if (*value == native->math_log_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_pow_fun()) {
+ return typer_->number_fun2_;
+ } else if (*value == native->math_random_fun()) {
+ return typer_->number_fun0_;
+ } else if (*value == native->math_round_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_sin_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_sqrt_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->math_tan_fun()) {
+ return typer_->number_fun1_;
+ } else if (*value == native->array_buffer_fun()) {
+ return typer_->array_buffer_fun_;
+ } else if (*value == native->int8_array_fun()) {
+ return typer_->int8_array_fun_;
+ } else if (*value == native->int16_array_fun()) {
+ return typer_->int16_array_fun_;
+ } else if (*value == native->int32_array_fun()) {
+ return typer_->int32_array_fun_;
+ } else if (*value == native->uint8_array_fun()) {
+ return typer_->uint8_array_fun_;
+ } else if (*value == native->uint16_array_fun()) {
+ return typer_->uint16_array_fun_;
+ } else if (*value == native->uint32_array_fun()) {
+ return typer_->uint32_array_fun_;
+ } else if (*value == native->float32_array_fun()) {
+ return typer_->float32_array_fun_;
+ } else if (*value == native->float64_array_fun()) {
+ return typer_->float64_array_fun_;
+ }
+ }
+ return Type::Constant(value, zone());
+}
+
+
+namespace {
+
+class TyperDecorator : public GraphDecorator {
+ public:
+ explicit TyperDecorator(Typer* typer) : typer_(typer) {}
+ virtual void Decorate(Node* node) { typer_->Init(node); }
+
+ private:
+ Typer* typer_;
+};
+
+}
+
+
+void Typer::DecorateGraph(Graph* graph) {
+ graph->AddDecorator(new (zone()) TyperDecorator(this));
+}
+
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
new file mode 100644
index 000000000..2957e4b4a
--- /dev/null
+++ b/deps/v8/src/compiler/typer.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPER_H_
+#define V8_COMPILER_TYPER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/opcodes.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer {
+ public:
+ explicit Typer(Zone* zone);
+
+ void Init(Node* node);
+ void Run(Graph* graph, MaybeHandle<Context> context);
+ void Narrow(Graph* graph, Node* node, MaybeHandle<Context> context);
+ void Widen(Graph* graph, Node* node, MaybeHandle<Context> context);
+
+ void DecorateGraph(Graph* graph);
+
+ Zone* zone() { return zone_; }
+ Isolate* isolate() { return zone_->isolate(); }
+
+ private:
+ class Visitor;
+ class RunVisitor;
+ class NarrowVisitor;
+ class WidenVisitor;
+
+ Zone* zone_;
+ Type* number_fun0_;
+ Type* number_fun1_;
+ Type* number_fun2_;
+ Type* imul_fun_;
+ Type* array_buffer_fun_;
+ Type* int8_array_fun_;
+ Type* int16_array_fun_;
+ Type* int32_array_fun_;
+ Type* uint8_array_fun_;
+ Type* uint16_array_fun_;
+ Type* uint32_array_fun_;
+ Type* float32_array_fun_;
+ Type* float64_array_fun_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_TYPER_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
new file mode 100644
index 000000000..97bb762af
--- /dev/null
+++ b/deps/v8/src/compiler/verifier.cc
@@ -0,0 +1,245 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/verifier.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
+ Node::Uses uses = def->uses();
+ for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+ if (*it == use) return true;
+ }
+ return false;
+}
+
+
+static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
+ Node::Inputs inputs = use->inputs();
+ for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
+ if (*it == def) return true;
+ }
+ return false;
+}
+
+
+class Verifier::Visitor : public NullNodeVisitor {
+ public:
+ explicit Visitor(Zone* zone)
+ : reached_from_start(NodeSet::key_compare(),
+ NodeSet::allocator_type(zone)),
+ reached_from_end(NodeSet::key_compare(),
+ NodeSet::allocator_type(zone)) {}
+
+ // Fulfills the PreNodeCallback interface.
+ GenericGraphVisit::Control Pre(Node* node);
+
+ bool from_start;
+ NodeSet reached_from_start;
+ NodeSet reached_from_end;
+};
+
+
+GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
+ int value_count = OperatorProperties::GetValueInputCount(node->op());
+ int context_count = OperatorProperties::GetContextInputCount(node->op());
+ int effect_count = OperatorProperties::GetEffectInputCount(node->op());
+ int control_count = OperatorProperties::GetControlInputCount(node->op());
+
+ // Verify number of inputs matches up.
+ int input_count = value_count + context_count + effect_count + control_count;
+ CHECK_EQ(input_count, node->InputCount());
+
+ // Verify all value inputs actually produce a value.
+ for (int i = 0; i < value_count; ++i) {
+ Node* value = NodeProperties::GetValueInput(node, i);
+ CHECK(OperatorProperties::HasValueOutput(value->op()));
+ CHECK(IsDefUseChainLinkPresent(value, node));
+ CHECK(IsUseDefChainLinkPresent(value, node));
+ }
+
+ // Verify all context inputs are value nodes.
+ for (int i = 0; i < context_count; ++i) {
+ Node* context = NodeProperties::GetContextInput(node);
+ CHECK(OperatorProperties::HasValueOutput(context->op()));
+ CHECK(IsDefUseChainLinkPresent(context, node));
+ CHECK(IsUseDefChainLinkPresent(context, node));
+ }
+
+ // Verify all effect inputs actually have an effect.
+ for (int i = 0; i < effect_count; ++i) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ CHECK(OperatorProperties::HasEffectOutput(effect->op()));
+ CHECK(IsDefUseChainLinkPresent(effect, node));
+ CHECK(IsUseDefChainLinkPresent(effect, node));
+ }
+
+ // Verify all control inputs are control nodes.
+ for (int i = 0; i < control_count; ++i) {
+ Node* control = NodeProperties::GetControlInput(node, i);
+ CHECK(OperatorProperties::HasControlOutput(control->op()));
+ CHECK(IsDefUseChainLinkPresent(control, node));
+ CHECK(IsUseDefChainLinkPresent(control, node));
+ }
+
+ // Verify all successors are projections if multiple value outputs exist.
+ if (OperatorProperties::GetValueOutputCount(node->op()) > 1) {
+ Node::Uses uses = node->uses();
+ for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+ CHECK(!NodeProperties::IsValueEdge(it.edge()) ||
+ (*it)->opcode() == IrOpcode::kProjection ||
+ (*it)->opcode() == IrOpcode::kParameter);
+ }
+ }
+
+ switch (node->opcode()) {
+ case IrOpcode::kStart:
+ // Start has no inputs.
+ CHECK_EQ(0, input_count);
+ break;
+ case IrOpcode::kEnd:
+ // End has no outputs.
+ CHECK(!OperatorProperties::HasValueOutput(node->op()));
+ CHECK(!OperatorProperties::HasEffectOutput(node->op()));
+ CHECK(!OperatorProperties::HasControlOutput(node->op()));
+ break;
+ case IrOpcode::kDead:
+ // Dead is never connected to the graph.
+ UNREACHABLE();
+ case IrOpcode::kBranch: {
+ // Branch uses are IfTrue and IfFalse.
+ Node::Uses uses = node->uses();
+ bool got_true = false, got_false = false;
+ for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+ CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) ||
+ ((*it)->opcode() == IrOpcode::kIfFalse && !got_false));
+ if ((*it)->opcode() == IrOpcode::kIfTrue) got_true = true;
+ if ((*it)->opcode() == IrOpcode::kIfFalse) got_false = true;
+ }
+ // TODO(rossberg): Currently fails for various tests.
+ // CHECK(got_true && got_false);
+ break;
+ }
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ CHECK_EQ(IrOpcode::kBranch,
+ NodeProperties::GetControlInput(node, 0)->opcode());
+ break;
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ break;
+ case IrOpcode::kReturn:
+ // TODO(rossberg): check successor is End
+ break;
+ case IrOpcode::kThrow:
+ // TODO(rossberg): what are the constraints on these?
+ break;
+ case IrOpcode::kParameter: {
+ // Parameters have the start node as inputs.
+ CHECK_EQ(1, input_count);
+ CHECK_EQ(IrOpcode::kStart,
+ NodeProperties::GetValueInput(node, 0)->opcode());
+ // Parameter has an input that produces enough values.
+ int index = static_cast<Operator1<int>*>(node->op())->parameter();
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // Currently, parameter indices start at -1 instead of 0.
+ CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index + 1);
+ break;
+ }
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kExternalConstant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kHeapConstant:
+ // Constants have no inputs.
+ CHECK_EQ(0, input_count);
+ break;
+ case IrOpcode::kPhi: {
+ // Phi input count matches parent control node.
+ CHECK_EQ(1, control_count);
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ CHECK_EQ(value_count,
+ OperatorProperties::GetControlInputCount(control->op()));
+ break;
+ }
+ case IrOpcode::kEffectPhi: {
+ // EffectPhi input count matches parent control node.
+ CHECK_EQ(1, control_count);
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ CHECK_EQ(effect_count,
+ OperatorProperties::GetControlInputCount(control->op()));
+ break;
+ }
+ case IrOpcode::kLazyDeoptimization:
+ // TODO(jarin): what are the constraints on these?
+ break;
+ case IrOpcode::kDeoptimize:
+ // TODO(jarin): what are the constraints on these?
+ break;
+ case IrOpcode::kFrameState:
+ // TODO(jarin): what are the constraints on these?
+ break;
+ case IrOpcode::kCall:
+ // TODO(rossberg): what are the constraints on these?
+ break;
+ case IrOpcode::kContinuation:
+ // TODO(jarin): what are the constraints on these?
+ break;
+ case IrOpcode::kProjection: {
+ // Projection has an input that produces enough values.
+ int index = static_cast<Operator1<int>*>(node->op())->parameter();
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index);
+ break;
+ }
+ default:
+ // TODO(rossberg): Check other node kinds.
+ break;
+ }
+
+ if (from_start) {
+ reached_from_start.insert(node);
+ } else {
+ reached_from_end.insert(node);
+ }
+
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+void Verifier::Run(Graph* graph) {
+ Visitor visitor(graph->zone());
+
+ CHECK_NE(NULL, graph->start());
+ visitor.from_start = true;
+ graph->VisitNodeUsesFromStart(&visitor);
+ CHECK_NE(NULL, graph->end());
+ visitor.from_start = false;
+ graph->VisitNodeInputsFromEnd(&visitor);
+
+ // All control nodes reachable from end are reachable from start.
+ for (NodeSet::iterator it = visitor.reached_from_end.begin();
+ it != visitor.reached_from_end.end(); ++it) {
+ CHECK(!NodeProperties::IsControl(*it) ||
+ visitor.reached_from_start.count(*it));
+ }
+}
+}
+}
+} // namespace v8::internal::compiler
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
new file mode 100644
index 000000000..788c6a565
--- /dev/null
+++ b/deps/v8/src/compiler/verifier.h
@@ -0,0 +1,28 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VERIFIER_H_
+#define V8_COMPILER_VERIFIER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Verifier {
+ public:
+ static void Run(Graph* graph);
+
+ private:
+ class Visitor;
+ DISALLOW_COPY_AND_ASSIGN(Verifier);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_VERIFIER_H_
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
new file mode 100644
index 000000000..9f278ad89
--- /dev/null
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -0,0 +1,1001 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(turbofan): Cleanup these hacks.
+enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
+
+
+struct Immediate64 {
+ uint64_t value;
+ Handle<Object> handle;
+ ExternalReference reference;
+ Immediate64Type type;
+};
+
+
+enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
+
+
+struct RegisterOrOperand {
+ RegisterOrOperand() : operand(no_reg, 0) {}
+ Register reg;
+ DoubleRegister double_reg;
+ Operand operand;
+ RegisterOrOperandType type;
+};
+
+
+// Adds X64 specific methods for decoding operands.
+class X64OperandConverter : public InstructionOperandConverter {
+ public:
+ X64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ RegisterOrOperand InputRegisterOrOperand(int index) {
+ return ToRegisterOrOperand(instr_->InputAt(index));
+ }
+
+ Immediate InputImmediate(int index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ RegisterOrOperand OutputRegisterOrOperand() {
+ return ToRegisterOrOperand(instr_->Output());
+ }
+
+ Immediate64 InputImmediate64(int index) {
+ return ToImmediate64(instr_->InputAt(index));
+ }
+
+ Immediate64 ToImmediate64(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ Immediate64 immediate;
+ immediate.value = 0xbeefdeaddeefbeed;
+ immediate.type = kImm64Value;
+ switch (constant.type()) {
+ case Constant::kInt32:
+ case Constant::kInt64:
+ immediate.value = constant.ToInt64();
+ return immediate;
+ case Constant::kFloat64:
+ immediate.type = kImm64Handle;
+ immediate.handle =
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
+ return immediate;
+ case Constant::kExternalReference:
+ immediate.type = kImm64Reference;
+ immediate.reference = constant.ToExternalReference();
+ return immediate;
+ case Constant::kHeapObject:
+ immediate.type = kImm64Handle;
+ immediate.handle = constant.ToHeapObject();
+ return immediate;
+ }
+ UNREACHABLE();
+ return immediate;
+ }
+
+ Immediate ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Immediate(constant.ToInt32());
+ case Constant::kInt64:
+ case Constant::kFloat64:
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ break;
+ }
+ UNREACHABLE();
+ return Immediate(-1);
+ }
+
+ Operand ToOperand(InstructionOperand* op, int extra = 0) {
+ RegisterOrOperand result = ToRegisterOrOperand(op, extra);
+ DCHECK_EQ(kOperand, result.type);
+ return result.operand;
+ }
+
+ RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
+ RegisterOrOperand result;
+ if (op->IsRegister()) {
+ DCHECK(extra == 0);
+ result.type = kRegister;
+ result.reg = ToRegister(op);
+ return result;
+ } else if (op->IsDoubleRegister()) {
+ DCHECK(extra == 0);
+ DCHECK(extra == 0);
+ result.type = kDoubleRegister;
+ result.double_reg = ToDoubleRegister(op);
+ return result;
+ }
+
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+
+ result.type = kOperand;
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+ result.operand =
+ Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
+ return result;
+ }
+
+ Operand MemoryOperand(int* first_input) {
+ const int offset = *first_input;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_MR1I: {
+ *first_input += 2;
+ Register index = InputRegister(offset + 1);
+ return Operand(InputRegister(offset + 0), index, times_1,
+ 0); // TODO(dcarney): K != 0
+ }
+ case kMode_MRI:
+ *first_input += 2;
+ return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
+ default:
+ UNREACHABLE();
+ return Operand(no_reg, 0);
+ }
+ }
+
+ Operand MemoryOperand() {
+ int first_input = 0;
+ return MemoryOperand(&first_input);
+ }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsImmediate();
+}
+
+
+#define ASSEMBLE_BINOP(asm_instr) \
+ do { \
+ if (HasImmediateInput(instr, 1)) { \
+ RegisterOrOperand input = i.InputRegisterOrOperand(0); \
+ if (input.type == kRegister) { \
+ __ asm_instr(input.reg, i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(input.operand, i.InputImmediate(1)); \
+ } \
+ } else { \
+ RegisterOrOperand input = i.InputRegisterOrOperand(1); \
+ if (input.type == kRegister) { \
+ __ asm_instr(i.InputRegister(0), input.reg); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), input.operand); \
+ } \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width) \
+ do { \
+ if (HasImmediateInput(instr, 1)) { \
+ __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+ } else { \
+ __ asm_instr##_cl(i.OutputRegister()); \
+ } \
+ } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchJmp:
+ __ jmp(code_->GetLabel(i.InputBlock(0)));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ break;
+ }
+ case kX64Add32:
+ ASSEMBLE_BINOP(addl);
+ break;
+ case kX64Add:
+ ASSEMBLE_BINOP(addq);
+ break;
+ case kX64Sub32:
+ ASSEMBLE_BINOP(subl);
+ break;
+ case kX64Sub:
+ ASSEMBLE_BINOP(subq);
+ break;
+ case kX64And32:
+ ASSEMBLE_BINOP(andl);
+ break;
+ case kX64And:
+ ASSEMBLE_BINOP(andq);
+ break;
+ case kX64Cmp32:
+ ASSEMBLE_BINOP(cmpl);
+ break;
+ case kX64Cmp:
+ ASSEMBLE_BINOP(cmpq);
+ break;
+ case kX64Test32:
+ ASSEMBLE_BINOP(testl);
+ break;
+ case kX64Test:
+ ASSEMBLE_BINOP(testq);
+ break;
+ case kX64Imul32:
+ if (HasImmediateInput(instr, 1)) {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
+ } else {
+ __ movq(kScratchRegister, input.operand);
+ __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+ }
+ } else {
+ RegisterOrOperand input = i.InputRegisterOrOperand(1);
+ if (input.type == kRegister) {
+ __ imull(i.OutputRegister(), input.reg);
+ } else {
+ __ imull(i.OutputRegister(), input.operand);
+ }
+ }
+ break;
+ case kX64Imul:
+ if (HasImmediateInput(instr, 1)) {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
+ } else {
+ __ movq(kScratchRegister, input.operand);
+ __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+ }
+ } else {
+ RegisterOrOperand input = i.InputRegisterOrOperand(1);
+ if (input.type == kRegister) {
+ __ imulq(i.OutputRegister(), input.reg);
+ } else {
+ __ imulq(i.OutputRegister(), input.operand);
+ }
+ }
+ break;
+ case kX64Idiv32:
+ __ cdq();
+ __ idivl(i.InputRegister(1));
+ break;
+ case kX64Idiv:
+ __ cqo();
+ __ idivq(i.InputRegister(1));
+ break;
+ case kX64Udiv32:
+ __ xorl(rdx, rdx);
+ __ divl(i.InputRegister(1));
+ break;
+ case kX64Udiv:
+ __ xorq(rdx, rdx);
+ __ divq(i.InputRegister(1));
+ break;
+ case kX64Not: {
+ RegisterOrOperand output = i.OutputRegisterOrOperand();
+ if (output.type == kRegister) {
+ __ notq(output.reg);
+ } else {
+ __ notq(output.operand);
+ }
+ break;
+ }
+ case kX64Not32: {
+ RegisterOrOperand output = i.OutputRegisterOrOperand();
+ if (output.type == kRegister) {
+ __ notl(output.reg);
+ } else {
+ __ notl(output.operand);
+ }
+ break;
+ }
+ case kX64Neg: {
+ RegisterOrOperand output = i.OutputRegisterOrOperand();
+ if (output.type == kRegister) {
+ __ negq(output.reg);
+ } else {
+ __ negq(output.operand);
+ }
+ break;
+ }
+ case kX64Neg32: {
+ RegisterOrOperand output = i.OutputRegisterOrOperand();
+ if (output.type == kRegister) {
+ __ negl(output.reg);
+ } else {
+ __ negl(output.operand);
+ }
+ break;
+ }
+ case kX64Or32:
+ ASSEMBLE_BINOP(orl);
+ break;
+ case kX64Or:
+ ASSEMBLE_BINOP(orq);
+ break;
+ case kX64Xor32:
+ ASSEMBLE_BINOP(xorl);
+ break;
+ case kX64Xor:
+ ASSEMBLE_BINOP(xorq);
+ break;
+ case kX64Shl32:
+ ASSEMBLE_SHIFT(shll, 5);
+ break;
+ case kX64Shl:
+ ASSEMBLE_SHIFT(shlq, 6);
+ break;
+ case kX64Shr32:
+ ASSEMBLE_SHIFT(shrl, 5);
+ break;
+ case kX64Shr:
+ ASSEMBLE_SHIFT(shrq, 6);
+ break;
+ case kX64Sar32:
+ ASSEMBLE_SHIFT(sarl, 5);
+ break;
+ case kX64Sar:
+ ASSEMBLE_SHIFT(sarq, 6);
+ break;
+ case kX64Push: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ pushq(input.reg);
+ } else {
+ __ pushq(input.operand);
+ }
+ break;
+ }
+ case kX64PushI:
+ __ pushq(i.InputImmediate(0));
+ break;
+ case kX64CallCodeObject: {
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ Call(Operand(reg, entry));
+ }
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ AddNopForSmiCodeInlining();
+ break;
+ }
+ case kX64CallAddress:
+ if (HasImmediateInput(instr, 0)) {
+ Immediate64 imm = i.InputImmediate64(0);
+ DCHECK_EQ(kImm64Value, imm.type);
+ __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64);
+ } else {
+ __ call(i.InputRegister(0));
+ }
+ break;
+ case kPopStack: {
+ int words = MiscField::decode(instr->opcode());
+ __ addq(rsp, Immediate(kPointerSize * words));
+ break;
+ }
+ case kX64CallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the call.
+ __ movp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ break;
+ }
+ case kSSEFloat64Cmp: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(1);
+ if (input.type == kDoubleRegister) {
+ __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
+ } else {
+ __ ucomisd(i.InputDoubleRegister(0), input.operand);
+ }
+ break;
+ }
+ case kSSEFloat64Add:
+ __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Sub:
+ __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Mul:
+ __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Div:
+ __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Mod: {
+ __ subq(rsp, Immediate(kDoubleSize));
+ // Move values to st(0) and st(1).
+ __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+ __ fld_d(Operand(rsp, 0));
+ __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+ __ fld_d(Operand(rsp, 0));
+ // Loop while fprem isn't done.
+ Label mod_loop;
+ __ bind(&mod_loop);
+ // This instructions traps on all kinds inputs, but we are assuming the
+ // floating point control word is set to ignore them all.
+ __ fprem();
+ // The following 2 instruction implicitly use rax.
+ __ fnstsw_ax();
+ if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+ __ sahf();
+ } else {
+ __ shrl(rax, Immediate(8));
+ __ andl(rax, Immediate(0xFF));
+ __ pushq(rax);
+ __ popfq();
+ }
+ __ j(parity_even, &mod_loop);
+ // Move output to stack and clean up.
+ __ fstp(1);
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ break;
+ }
+ case kX64Int32ToInt64:
+ __ movzxwq(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kX64Int64ToInt32:
+ __ Move(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kSSEFloat64ToInt32: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kDoubleRegister) {
+ __ cvttsd2si(i.OutputRegister(), input.double_reg);
+ } else {
+ __ cvttsd2si(i.OutputRegister(), input.operand);
+ }
+ break;
+ }
+ case kSSEFloat64ToUint32: {
+ // TODO(turbofan): X64 SSE cvttsd2siq should support operands.
+ __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
+ // TODO(turbofan): generated code should not look at the upper 32 bits
+ // of the result, but those bits could escape to the outside world.
+ break;
+ }
+ case kSSEInt32ToFloat64: {
+ RegisterOrOperand input = i.InputRegisterOrOperand(0);
+ if (input.type == kRegister) {
+ __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
+ } else {
+ __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
+ }
+ break;
+ }
+ case kSSEUint32ToFloat64: {
+ // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
+ __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+
+ case kSSELoad:
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kSSEStore: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ break;
+ }
+ case kX64LoadWord8:
+ __ movzxbl(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX64StoreWord8: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movb(operand, i.InputRegister(index));
+ break;
+ }
+ case kX64StoreWord8I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movb(operand, Immediate(i.InputInt8(index)));
+ break;
+ }
+ case kX64LoadWord16:
+ __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX64StoreWord16: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movw(operand, i.InputRegister(index));
+ break;
+ }
+ case kX64StoreWord16I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movw(operand, Immediate(i.InputInt16(index)));
+ break;
+ }
+ case kX64LoadWord32:
+ __ movl(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX64StoreWord32: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movl(operand, i.InputRegister(index));
+ break;
+ }
+ case kX64StoreWord32I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movl(operand, i.InputImmediate(index));
+ break;
+ }
+ case kX64LoadWord64:
+ __ movq(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX64StoreWord64: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movq(operand, i.InputRegister(index));
+ break;
+ }
+ case kX64StoreWord64I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movq(operand, i.InputImmediate(index));
+ break;
+ }
+ case kX64StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ movsxlq(index, index);
+ __ movq(Operand(object, index, times_1, 0), value);
+ __ leaq(index, Operand(object, index, times_1, 0));
+ SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ __ RecordWrite(object, index, value, mode);
+ break;
+ }
+ }
+}
+
+
+// Assembles branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ X64OperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
+ BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kEqual:
+ __ j(equal, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ j(not_equal, tlabel);
+ break;
+ case kSignedLessThan:
+ __ j(less, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ j(greater_equal, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ j(less_equal, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ j(greater, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ j(below, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ j(above_equal, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ j(below_equal, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ j(above, tlabel);
+ break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
+ }
+ if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
+ __ bind(&done);
+}
+
+
+// Assembles boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ X64OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 64-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
+ Condition cc = no_condition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kEqual:
+ cc = equal;
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kNotEqual:
+ cc = not_equal;
+ break;
+ case kSignedLessThan:
+ cc = less;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = greater_equal;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = less_equal;
+ break;
+ case kSignedGreaterThan:
+ cc = greater;
+ break;
+ case kUnorderedLessThan:
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = below;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = above_equal;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = below_equal;
+ break;
+ case kUnorderedGreaterThan:
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = above;
+ break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
+ }
+ __ bind(&check);
+ __ setcc(cc, reg);
+ __ movzxbl(reg, reg);
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ pushq(rbp);
+ __ movq(rbp, rsp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ pushq(Register::from_code(i));
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
+ __ movp(rcx, args.GetReceiverOperand());
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &ok, Label::kNear);
+ __ movp(rcx, GlobalObjectOperand());
+ __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
+ __ movp(args.GetReceiverOperand(), rcx);
+ __ bind(&ok);
+ }
+
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ if (stack_slots > 0) {
+ __ subq(rsp, Immediate(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ addq(rsp, Immediate(stack_slots * kPointerSize));
+ }
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ popq(Register::from_code(i));
+ }
+ }
+ __ popq(rbp); // Pop caller's frame pointer.
+ __ ret(0);
+ } else {
+ // No saved registers.
+ __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
+ __ popq(rbp); // Pop caller's frame pointer.
+ __ ret(0);
+ }
+ } else {
+ __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
+ __ popq(rbp); // Pop caller's frame pointer.
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ ret(pop_count * kPointerSize);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ X64OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ movq(g.ToRegister(destination), src);
+ } else {
+ __ movq(g.ToOperand(destination), src);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ Register tmp = kScratchRegister;
+ Operand dst = g.ToOperand(destination);
+ __ movq(tmp, src);
+ __ movq(dst, tmp);
+ }
+ } else if (source->IsConstant()) {
+ ConstantOperand* constant_source = ConstantOperand::cast(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst = destination->IsRegister() ? g.ToRegister(destination)
+ : kScratchRegister;
+ Immediate64 imm = g.ToImmediate64(constant_source);
+ switch (imm.type) {
+ case kImm64Value:
+ __ Set(dst, imm.value);
+ break;
+ case kImm64Reference:
+ __ Move(dst, imm.reference);
+ break;
+ case kImm64Handle:
+ __ Move(dst, imm.handle);
+ break;
+ }
+ if (destination->IsStackSlot()) {
+ __ movq(g.ToOperand(destination), kScratchRegister);
+ }
+ } else {
+ __ movq(kScratchRegister,
+ BitCast<uint64_t, double>(g.ToDouble(constant_source)));
+ if (destination->IsDoubleRegister()) {
+ __ movq(g.ToDoubleRegister(destination), kScratchRegister);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ movq(g.ToOperand(destination), kScratchRegister);
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ XMMRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movsd(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ movsd(dst, src);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movsd(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = g.ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ X64OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+ } else if (source->IsRegister() && destination->IsStackSlot()) {
+ Register src = g.ToRegister(source);
+ Operand dst = g.ToOperand(destination);
+ __ xchgq(src, dst);
+ } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+ (source->IsDoubleStackSlot() &&
+ destination->IsDoubleStackSlot())) {
+ // Memory-memory.
+ Register tmp = kScratchRegister;
+ Operand src = g.ToOperand(source);
+ Operand dst = g.ToOperand(destination);
+ __ movq(tmp, dst);
+ __ xchgq(tmp, src);
+ __ movq(dst, tmp);
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // XMM register-register swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movsd(xmm0, src);
+ __ movsd(src, dst);
+ __ movsd(dst, xmm0);
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // XMM register-memory swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister src = g.ToDoubleRegister(source);
+ Operand dst = g.ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(src, dst);
+ __ movsd(dst, xmm0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+#undef __
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc) {
+ if (start_pc + 1 != end_pc) {
+ return false;
+ }
+ return *(code->instruction_start() + start_pc) ==
+ v8::internal::Assembler::kNopByte;
+}
+
+#endif
+
+} // namespace internal
+} // namespace compiler
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
new file mode 100644
index 000000000..8ba33ab10
--- /dev/null
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// X64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Push) \
+ V(X64PushI) \
+ V(X64CallCodeObject) \
+ V(X64CallAddress) \
+ V(PopStack) \
+ V(X64CallJSFunction) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(X64Int32ToInt64) \
+ V(X64Int64ToInt32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEInt32ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSELoad) \
+ V(SSEStore) \
+ V(X64LoadWord8) \
+ V(X64StoreWord8) \
+ V(X64StoreWord8I) \
+ V(X64LoadWord16) \
+ V(X64StoreWord16) \
+ V(X64StoreWord16I) \
+ V(X64LoadWord32) \
+ V(X64StoreWord32) \
+ V(X64StoreWord32I) \
+ V(X64LoadWord64) \
+ V(X64StoreWord64) \
+ V(X64StoreWord64I) \
+ V(X64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MR) /* [%r1] */ \
+ V(MRI) /* [%r1 + K] */ \
+ V(MR1I) /* [%r1 + %r2 + K] */ \
+ V(MR2I) /* [%r1 + %r2*2 + K] */ \
+ V(MR4I) /* [%r1 + %r2*4 + K] */ \
+ V(MR8I) /* [%r1 + %r2*8 + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
new file mode 100644
index 000000000..965e612e2
--- /dev/null
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -0,0 +1,722 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds X64-specific methods for generating operands.
+class X64OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit X64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* TempRegister(Register reg) {
+ return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+ }
+
+ InstructionOperand* UseByteRegister(Node* node) {
+ // TODO(dcarney): relax constraint.
+ return UseFixed(node, rdx);
+ }
+
+ InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
+
+ bool CanBeImmediate(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool CanBeImmediate64(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return true;
+ case IrOpcode::kNumberConstant:
+ return true;
+ case IrOpcode::kHeapConstant: {
+ // Constants in new space cannot be used as immediates in V8 because
+ // the GC does not scan code objects when collecting the new generation.
+ Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
+ return !isolate()->heap()->InNewSpace(*value);
+ }
+ default:
+ return false;
+ }
+ }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = OpParameter<MachineType>(node);
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* output = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kSSELoad;
+ break;
+ case kMachineWord8:
+ opcode = kX64LoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kX64LoadWord16;
+ break;
+ case kMachineWord32:
+ opcode = kX64LoadWord32;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord64:
+ opcode = kX64LoadWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(base)) {
+ // load [#base + %index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+ g.UseRegister(index), g.UseImmediate(base));
+ } else if (g.CanBeImmediate(index)) { // load [%base + #index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else { // load [%base + %index + K]
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+ // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ DCHECK(rep == kMachineTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
+ Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
+ g.UseFixed(index, rcx), g.UseFixed(value, rdx), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ bool is_immediate = false;
+ InstructionOperand* val;
+ if (rep == kMachineFloat64) {
+ val = g.UseDoubleRegister(value);
+ } else {
+ is_immediate = g.CanBeImmediate(value);
+ if (is_immediate) {
+ val = g.UseImmediate(value);
+ } else if (rep == kMachineWord8) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+ }
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kSSEStore;
+ break;
+ case kMachineWord8:
+ opcode = is_immediate ? kX64StoreWord8I : kX64StoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = is_immediate ? kX64StoreWord16I : kX64StoreWord16;
+ break;
+ case kMachineWord32:
+ opcode = is_immediate ? kX64StoreWord32I : kX64StoreWord32;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord64:
+ opcode = is_immediate ? kX64StoreWord64I : kX64StoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(base)) {
+ // store [#base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else { // store [%base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+ // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ X64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(m.right().node())) {
+ inputs[input_count++] = g.Use(m.left().node());
+ inputs[input_count++] = g.UseImmediate(m.right().node());
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.Use(m.right().node());
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(ARRAY_SIZE(inputs), input_count);
+ DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kX64And32);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ VisitBinop(this, node, kX64And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kX64Or32);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kX64Or);
+}
+
+
+template <typename T>
+static void VisitXor(InstructionSelector* selector, Node* node,
+ ArchOpcode xor_opcode, ArchOpcode not_opcode) {
+ X64OperandGenerator g(selector);
+ BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+ if (m.right().Is(-1)) {
+ selector->Emit(not_opcode, g.DefineSameAsFirst(node),
+ g.Use(m.left().node()));
+ } else {
+ VisitBinop(selector, node, xor_opcode);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ VisitXor<int64_t>(this, node, kX64Xor, kX64Not);
+}
+
+
+// Shared routine for multiple 32-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
+static void VisitWord32Shift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // TODO(turbofan): assembler only supports some addressing modes for shifts.
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ Int32BinopMatcher m(node);
+ if (m.right().IsWord32And()) {
+ Int32BinopMatcher mright(right);
+ if (mright.right().Is(0x1F)) {
+ right = mright.left().node();
+ }
+ }
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseFixed(right, rcx));
+ }
+}
+
+
+// Shared routine for multiple 64-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
+static void VisitWord64Shift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // TODO(turbofan): assembler only supports some addressing modes for shifts.
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ Int64BinopMatcher m(node);
+ if (m.right().IsWord64And()) {
+ Int64BinopMatcher mright(right);
+ if (mright.right().Is(0x3F)) {
+ right = mright.left().node();
+ }
+ }
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseFixed(right, rcx));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitWord32Shift(this, node, kX64Shl32);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ VisitWord64Shift(this, node, kX64Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitWord32Shift(this, node, kX64Shr32);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ VisitWord64Shift(this, node, kX64Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitWord32Shift(this, node, kX64Sar32);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitWord64Shift(this, node, kX64Sar);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kX64Add32);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop(this, node, kX64Add);
+}
+
+
+template <typename T>
+static void VisitSub(InstructionSelector* selector, Node* node,
+ ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
+ X64OperandGenerator g(selector);
+ BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+ if (m.left().Is(0)) {
+ selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
+ g.Use(m.right().node()));
+ } else {
+ VisitBinop(selector, node, sub_opcode);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitSub<int64_t>(this, node, kX64Sub, kX64Neg);
+}
+
+
+static void VisitMul(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
+ } else if (g.CanBeImmediate(left)) {
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
+ g.UseImmediate(left));
+ } else {
+ // TODO(turbofan): select better left operand.
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitMul(this, node, kX64Imul32);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitMul(this, node, kX64Imul);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(rdx)};
+ selector->Emit(
+ opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
+ g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitDiv(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitDiv(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+ VisitDiv(this, node, kX64Udiv);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
+ selector->Emit(
+ opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
+ g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitMod(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitMod(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+ VisitMod(this, node, kX64Udiv);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
+ Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ X64OperandGenerator g(this);
+ // TODO(turbofan): X64 SSE cvttsd2siq should support operands.
+ Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand* temps[] = {g.TempRegister(rax)};
+ Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+}
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ // TODO(dcarney): other modes
+ Emit(kX64Int64ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ // TODO(dcarney): other modes
+ Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kX64Add32, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation* cont) {
+ VisitBinop(this, node, kX64Sub32, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand* left, InstructionOperand* right,
+ FlagsContinuation* cont) {
+ X64OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ X64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right)) {
+ VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+ } else if (g.CanBeImmediate(left)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kX64Cmp32, cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kX64Test32, cont, true);
+ default:
+ break;
+ }
+
+ X64OperandGenerator g(this);
+ VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt64Sub:
+ return VisitWordCompare(this, node, kX64Cmp, cont, false);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, node, kX64Test, cont, true);
+ default:
+ break;
+ }
+
+ X64OperandGenerator g(this);
+ VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kX64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kX64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ X64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
+ cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ X64OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
+
+ // TODO(dcarney): stack alignment for c calls.
+ // TODO(dcarney): shadow space on window for c calls.
+ // Push any stack arguments.
+ for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+ Node* input = buffer.pushed_nodes[i];
+ // TODO(titzer): handle pushing double parameters.
+ if (g.CanBeImmediate(input)) {
+ Emit(kX64PushI, NULL, g.UseImmediate(input));
+ } else {
+ Emit(kX64Push, NULL, g.Use(input));
+ }
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kX64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kX64CallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kX64CallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ DCHECK(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (descriptor->kind() == CallDescriptor::kCallAddress &&
+ buffer.pushed_count > 0) {
+ DCHECK(deoptimization == NULL && continuation == NULL);
+ Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/linkage-x64.cc b/deps/v8/src/compiler/x64/linkage-x64.cc
new file mode 100644
index 000000000..84c01e654
--- /dev/null
+++ b/deps/v8/src/compiler/x64/linkage-x64.cc
@@ -0,0 +1,83 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef _WIN64
+const bool kWin64 = true;
+#else
+const bool kWin64 = false;
+#endif
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return rax; }
+ static Register ReturnValue2Reg() { return rdx; }
+ static Register JSCallFunctionReg() { return rdi; }
+ static Register ContextReg() { return rsi; }
+ static Register RuntimeCallFunctionReg() { return rbx; }
+ static Register RuntimeCallArgCountReg() { return rax; }
+ static RegList CCalleeSaveRegisters() {
+ if (kWin64) {
+ return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() |
+ r14.bit() | r15.bit();
+ } else {
+ return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
+ }
+ }
+ static Register CRegisterParameter(int i) {
+ if (kWin64) {
+ static Register register_parameters[] = {rcx, rdx, r8, r9};
+ return register_parameters[i];
+ } else {
+ static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9};
+ return register_parameters[i];
+ }
+ }
+ static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ zone, descriptor, stack_parameter_count, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineType return_type,
+ const MachineType* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+
+}
+}
+} // namespace v8::internal::compiler