summaryrefslogtreecommitdiff
path: root/deps/v8/src/crankshaft
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-05-27 16:37:42 +0200
committerMichaël Zasso <targos@protonmail.com>2016-06-29 09:04:28 +0200
commit2cc29517966de7257a2f1b34c58c77225a21e05d (patch)
tree210bd177df2f06eec16e1e22edafdbcbffe66f8a /deps/v8/src/crankshaft
parentbbf3838c70aaec1dd296fa75ae334fd1c7866df3 (diff)
downloadnode-new-2cc29517966de7257a2f1b34c58c77225a21e05d.tar.gz
deps: update V8 to 5.1.281.69
Pick up the latest branch-head for V8 5.1. This branch brings in improved language support and performance improvements. For full details: http://v8project.blogspot.com/2016/04/v8-release-51.html * Picks up the latest branch head for 5.1 [1] * Edit v8 gitignore to allow trace_event copy * Update V8 DEP trace_event as per deps/v8/DEPS [2] [1] https://chromium.googlesource.com/v8/v8.git/+/dc81244 [2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/c8c8665 PR-URL: https://github.com/nodejs/node/pull/7016 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'deps/v8/src/crankshaft')
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc112
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h79
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc236
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h5
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc109
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h79
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc243
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h5
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.cc44
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.h41
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bch.cc379
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bch.h33
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.cc4
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc600
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h660
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc11
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc881
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h131
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc488
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h11
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc151
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h122
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc5
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.h1
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc25
-rw-r--r--deps/v8/src/crankshaft/lithium-inl.h2
-rw-r--r--deps/v8/src/crankshaft/lithium.cc93
-rw-r--r--deps/v8/src/crankshaft/lithium.h11
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc238
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h5
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc113
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h79
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc296
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h5
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc113
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h79
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc242
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h5
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc109
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h84
-rw-r--r--deps/v8/src/crankshaft/s390/OWNERS5
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc5668
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.h359
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc280
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h58
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc2290
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h2414
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc310
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h5
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc138
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h122
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc478
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h13
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc125
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h79
57 files changed, 13605 insertions, 4672 deletions
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index d5590f5c05..4072982513 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -248,27 +248,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -567,12 +546,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -897,22 +871,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -925,14 +893,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1059,16 +1027,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1092,6 +1050,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1100,6 +1061,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1216,22 +1180,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(r3);
- vector = FixedTemp(r2);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
@@ -1843,13 +1791,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2488,13 +2429,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r3);
@@ -2531,11 +2465,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2596,11 +2528,5 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index 91435cf785..60fe79d402 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -133,7 +131,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -146,7 +143,6 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -227,6 +223,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -261,6 +264,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -535,6 +540,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1721,23 +1727,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1800,29 +1789,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2426,19 +2392,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2555,18 +2508,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index 8b4e6c9904..c64aac3cc8 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -113,7 +113,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -263,18 +263,15 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ PushFixedFrame();
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Move(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ pop(ip);
- __ PopFixedFrame();
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -327,7 +324,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ PushFixedFrame();
+ __ PushCommonFrame();
__ bl(&needs_frame);
} else {
__ bl(&call_deopt_entry);
@@ -342,10 +339,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(ip);
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -2070,29 +2066,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ b(eq, instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ cmp(reg, Operand::Zero());
__ b(eq, instr->FalseLabel(chunk_));
@@ -2115,13 +2112,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ b(ge, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2133,19 +2130,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2365,11 +2362,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(r1));
DCHECK(ToRegister(instr->right()).is(r0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand::Zero());
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
}
@@ -3057,17 +3053,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ sub(result, sp, Operand(2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ ldr(result, MemOperand(scratch,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ mov(result, fp, LeaveCC, ne);
__ mov(result, scratch, LeaveCC, eq);
+ } else {
+ __ mov(result, fp);
}
}
@@ -3189,15 +3188,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ b(ne, &loop);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(r0);
+ // It is safe to use r3, r4 and r5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r3, r4, r5);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3244,10 +3254,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3265,17 +3274,35 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ mov(r0, Operand(arity));
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(ip);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(ip);
+ } else {
+ __ Call(ip);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3562,22 +3589,77 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ clz(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ ldr(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(r1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use r3, r4 and r5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r3, r4, r5);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(r1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3640,56 +3722,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- // Change context.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ mov(r0, Operand(instr->arity()));
-
- // Load the code entry address
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(r3));
- DCHECK(vector_register.is(r2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ Move(vector_register, vector);
- __ mov(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r0, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r1));
@@ -5152,13 +5184,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(r0));
- __ push(r0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r3));
DCHECK(ToRegister(instr->result()).is(r0));
@@ -5517,13 +5542,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index 67925ccdf6..8bbacc3c58 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -218,11 +218,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index c5d42082bd..6cfc846548 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -66,13 +66,6 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -135,20 +128,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
function()->PrintTo(stream);
@@ -735,33 +714,22 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -817,13 +785,6 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = NULL;
@@ -993,7 +954,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
}
- ToBooleanStub::Types expected = instr->expected_input_types();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp1 = needs_temps ? TempRegister() : NULL;
LOperand* temp2 = needs_temps ? TempRegister() : NULL;
@@ -1011,16 +972,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), x1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1045,23 +996,10 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
ops,
zone());
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), x1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(x3);
- vector = FixedTemp(x2);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
}
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, x0), instr);
+ return MarkAsCall(DefineFixed(result, x0), instr);
}
@@ -1454,11 +1392,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if ((instr->arguments_var() != NULL) &&
instr->arguments_object()->IsLinked()) {
@@ -1553,6 +1489,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
LOperand* function = UseFixed(instr->function(), x1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -2457,13 +2396,6 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), x0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
@@ -2688,12 +2620,5 @@ LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
return AssignEnvironment(DefineAsRegister(result));
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 14abeb0ba6..237487ff88 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -32,8 +32,6 @@ class LCodeGen;
V(BitS) \
V(BoundsCheck) \
V(Branch) \
- V(CallFunction) \
- V(CallJSFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CallWithDescriptor) \
@@ -142,7 +140,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -157,7 +154,6 @@ class LCodeGen;
V(SubS) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(TruncateDoubleToIntOrSmi) \
@@ -238,6 +234,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -265,6 +268,8 @@ class LInstruction : public ZoneObject {
private:
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -635,6 +640,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -801,46 +807,6 @@ class LBranch final : public LControlInstruction<1, 2> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2734,19 +2700,6 @@ class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
@@ -2888,18 +2841,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 855cac14c0..9bbc8b87e8 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -359,38 +359,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
}
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).Is(x1));
- DCHECK(ToRegister(instr->result()).Is(x0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(x3));
- DCHECK(vector_register.is(x2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ Mov(vector_register, vector);
- __ Mov(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Mov(x0, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
- RecordPushedArgumentsDelta(hinstr->argument_delta());
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->context()).is(cp));
@@ -606,19 +574,20 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(
+ StackFrame::STUB,
+ GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
}
frame_is_built_ = true;
}
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- __ Claim(slots, kPointerSize);
- }
-
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
@@ -740,11 +709,11 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ Push(lr, fp, cp);
+ __ Push(lr, fp);
__ Mov(fp, Smi::FromInt(StackFrame::STUB));
__ Push(fp);
__ Add(fp, __ StackPointer(),
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ TypedFrameConstants::kFixedFrameSizeFromFp);
Comment(";;; Deferred code");
}
@@ -753,7 +722,7 @@ bool LCodeGen::GenerateDeferredCode() {
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ Pop(xzr, cp, fp, lr);
+ __ Pop(xzr, fp, lr);
frame_is_built_ = false;
}
@@ -1560,14 +1529,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ B(ne, &loop);
__ Bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(x0);
+ // It is safe to use x3, x4 and x5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) x3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, x3, x4, x5);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in argc (receiver) which is x0, as
// expected by InvokeFunction.
ParameterCount actual(argc);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -1585,16 +1566,18 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// get a pointer which will work well with LAccessArgumentsAt.
DCHECK(masm()->StackPointer().Is(jssp));
__ Sub(result, jssp, 2 * kPointerSize);
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
DCHECK(instr->temp() != NULL);
Register previous_fp = ToRegister(instr->temp());
__ Ldr(previous_fp,
MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(result,
- MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Ldr(result, MemOperand(previous_fp,
+ CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ Csel(result, fp, previous_fp, ne);
+ } else {
+ __ Mov(result, fp);
}
}
@@ -1763,17 +1746,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
EmitCompareAndBranch(instr, ne, temp, 0);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ JumpIfRoot(
value, Heap::kUndefinedValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ JumpIfRoot(
value, Heap::kTrueValueRootIndex, true_label);
@@ -1781,13 +1765,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
value, Heap::kFalseValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ JumpIfRoot(
value, Heap::kNullValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
DCHECK(Smi::FromInt(0) == 0);
__ Cbz(value, false_label);
@@ -1815,13 +1799,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
__ B(ge, true_label);
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
@@ -1832,19 +1816,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, scratch, SYMBOL_TYPE);
__ B(eq, true_label);
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
__ B(eq, true_label);
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -1867,10 +1851,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -1898,21 +1881,38 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
__ Mov(arity_reg, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(x10);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(x10);
+ } else {
+ __ Call(x10);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
-
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -1959,26 +1959,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToRegister(instr->function()).is(x1));
-
- // Change context.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
- __ Mov(x0, instr->arity());
-
- // Load the code entry address
- __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
- __ Call(x10);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -2838,23 +2818,79 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
__ Scvtf(result, value);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ Ldr(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ B(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ Mov(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
// The function is required to be in x1.
DCHECK(ToRegister(instr->function()).is(x1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use x3, x4 and x5 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) x3 (new.target) will be initialized below.
+ PrepareForTailCall(actual, x3, x4, x5);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(x1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@@ -5245,10 +5281,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(x1));
DCHECK(ToRegister(instr->right()).is(x0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
-
- EmitCompareAndBranch(instr, TokenToCondition(instr->op(), false), x0, 0);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
}
@@ -5375,14 +5411,6 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).Is(x0));
- DCHECK(ToRegister(instr->result()).Is(x0));
- __ Push(x0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object = ToRegister(instr->object());
@@ -5703,12 +5731,5 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index cf7de10394..f67ad5ab5d 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -322,11 +322,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in x1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) override;
diff --git a/deps/v8/src/crankshaft/compilation-phase.cc b/deps/v8/src/crankshaft/compilation-phase.cc
new file mode 100644
index 0000000000..9b40ccaec4
--- /dev/null
+++ b/deps/v8/src/crankshaft/compilation-phase.cc
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/compilation-phase.h"
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
+ : name_(name), info_(info), zone_(info->isolate()->allocator()) {
+ if (FLAG_hydrogen_stats) {
+ info_zone_start_allocation_size_ = info->zone()->allocation_size();
+ timer_.Start();
+ }
+}
+
+CompilationPhase::~CompilationPhase() {
+ if (FLAG_hydrogen_stats) {
+ size_t size = zone()->allocation_size();
+ size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
+ isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
+ }
+}
+
+bool CompilationPhase::ShouldProduceTraceOutput() const {
+ // Trace if the appropriate trace flag is set and the phase name's first
+ // character is in the FLAG_trace_phase command line parameter.
+ AllowHandleDereference allow_deref;
+ bool tracing_on =
+ info()->IsStub()
+ ? FLAG_trace_hydrogen_stubs
+ : (FLAG_trace_hydrogen &&
+ info()->shared_info()->PassesFilter(FLAG_trace_hydrogen_filter));
+ return (tracing_on &&
+ base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) !=
+ NULL);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/compilation-phase.h b/deps/v8/src/crankshaft/compilation-phase.h
new file mode 100644
index 0000000000..99e24c72c8
--- /dev/null
+++ b/deps/v8/src/crankshaft/compilation-phase.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_COMPILATION_PHASE_H_
+#define V8_CRANKSHAFT_COMPILATION_PHASE_H_
+
+#include "src/allocation.h"
+#include "src/compiler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationPhase BASE_EMBEDDED {
+ public:
+ CompilationPhase(const char* name, CompilationInfo* info);
+ ~CompilationPhase();
+
+ protected:
+ bool ShouldProduceTraceOutput() const;
+
+ const char* name() const { return name_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
+ Zone* zone() { return &zone_; }
+
+ private:
+ const char* name_;
+ CompilationInfo* info_;
+ Zone zone_;
+ size_t info_zone_start_allocation_size_;
+ base::ElapsedTimer timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_COMPILATION_PHASE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-bch.cc b/deps/v8/src/crankshaft/hydrogen-bch.cc
deleted file mode 100644
index 060e0bcdab..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-bch.cc
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-bch.h"
-
-namespace v8 {
-namespace internal {
-
-/*
- * This class is a table with one element for eack basic block.
- *
- * It is used to check if, inside one loop, all execution paths contain
- * a bounds check for a particular [index, length] combination.
- * The reason is that if there is a path that stays in the loop without
- * executing a check then the check cannot be hoisted out of the loop (it
- * would likely fail and cause a deopt for no good reason).
- * We also check is there are paths that exit the loop early, and if yes we
- * perform the hoisting only if graph()->use_optimistic_licm() is true.
- * The reason is that such paths are realtively common and harmless (like in
- * a "search" method that scans an array until an element is found), but in
- * some cases they could cause a deopt if we hoist the check so this is a
- * situation we need to detect.
- */
-class InductionVariableBlocksTable BASE_EMBEDDED {
- public:
- class Element {
- public:
- static const int kNoBlock = -1;
-
- HBasicBlock* block() { return block_; }
- void set_block(HBasicBlock* block) { block_ = block; }
- bool is_start() { return is_start_; }
- bool is_proper_exit() { return is_proper_exit_; }
- bool is_in_loop() { return is_in_loop_; }
- bool has_check() { return has_check_; }
- void set_has_check() { has_check_ = true; }
- InductionVariableLimitUpdate* additional_limit() {
- return &additional_limit_;
- }
-
- /*
- * Initializes the table element for a given loop (identified by its
- * induction variable).
- */
- void InitializeLoop(InductionVariableData* data) {
- DCHECK(data->limit() != NULL);
- HLoopInformation* loop = data->phi()->block()->current_loop();
- is_start_ = (block() == loop->loop_header());
- is_proper_exit_ = (block() == data->induction_exit_target());
- is_in_loop_ = loop->IsNestedInThisLoop(block()->current_loop());
- has_check_ = false;
- }
-
- // Utility methods to iterate over dominated blocks.
- void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
- HBasicBlock* CurrentDominatedBlock() {
- DCHECK(current_dominated_block_ != kNoBlock);
- return current_dominated_block_ < block()->dominated_blocks()->length() ?
- block()->dominated_blocks()->at(current_dominated_block_) : NULL;
- }
- HBasicBlock* NextDominatedBlock() {
- current_dominated_block_++;
- return CurrentDominatedBlock();
- }
-
- Element()
- : block_(NULL), is_start_(false), is_proper_exit_(false),
- has_check_(false), additional_limit_(),
- current_dominated_block_(kNoBlock) {}
-
- private:
- HBasicBlock* block_;
- bool is_start_;
- bool is_proper_exit_;
- bool is_in_loop_;
- bool has_check_;
- InductionVariableLimitUpdate additional_limit_;
- int current_dominated_block_;
- };
-
- HGraph* graph() const { return graph_; }
- Counters* counters() const { return graph()->isolate()->counters(); }
- HBasicBlock* loop_header() const { return loop_header_; }
- Element* at(int index) const { return &(elements_.at(index)); }
- Element* at(HBasicBlock* block) const { return at(block->block_id()); }
-
- void AddCheckAt(HBasicBlock* block) {
- at(block->block_id())->set_has_check();
- }
-
- /*
- * Initializes the table for a given loop (identified by its induction
- * variable).
- */
- void InitializeLoop(InductionVariableData* data) {
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- at(i)->InitializeLoop(data);
- }
- loop_header_ = data->phi()->block()->current_loop()->loop_header();
- }
-
-
- enum Hoistability {
- HOISTABLE,
- OPTIMISTICALLY_HOISTABLE,
- NOT_HOISTABLE
- };
-
- /*
- * This method checks if it is appropriate to hoist the bounds checks on an
- * induction variable out of the loop.
- * The problem is that in the loop code graph there could be execution paths
- * where the check is not performed, but hoisting the check has the same
- * semantics as performing it at every loop iteration, which could cause
- * unnecessary check failures (which would mean unnecessary deoptimizations).
- * The method returns OK if there are no paths that perform an iteration
- * (loop back to the header) without meeting a check, or UNSAFE is set if
- * early exit paths are found.
- */
- Hoistability CheckHoistability() {
- for (int i = 0; i < elements_.length(); i++) {
- at(i)->ResetCurrentDominatedBlock();
- }
- bool unsafe = false;
-
- HBasicBlock* current = loop_header();
- while (current != NULL) {
- HBasicBlock* next;
-
- if (at(current)->has_check() || !at(current)->is_in_loop()) {
- // We found a check or we reached a dominated block out of the loop,
- // therefore this block is safe and we can backtrack.
- next = NULL;
- } else {
- for (int i = 0; i < current->end()->SuccessorCount(); i ++) {
- Element* successor = at(current->end()->SuccessorAt(i));
-
- if (!successor->is_in_loop()) {
- if (!successor->is_proper_exit()) {
- // We found a path that exits the loop early, and is not the exit
- // related to the induction limit, therefore hoisting checks is
- // an optimistic assumption.
- unsafe = true;
- }
- }
-
- if (successor->is_start()) {
- // We found a path that does one loop iteration without meeting any
- // check, therefore hoisting checks would be likely to cause
- // unnecessary deopts.
- return NOT_HOISTABLE;
- }
- }
-
- next = at(current)->NextDominatedBlock();
- }
-
- // If we have no next block we need to backtrack the tree traversal.
- while (next == NULL) {
- current = current->dominator();
- if (current != NULL) {
- next = at(current)->NextDominatedBlock();
- } else {
- // We reached the root: next stays NULL.
- next = NULL;
- break;
- }
- }
-
- current = next;
- }
-
- return unsafe ? OPTIMISTICALLY_HOISTABLE : HOISTABLE;
- }
-
- explicit InductionVariableBlocksTable(HGraph* graph)
- : graph_(graph), loop_header_(NULL),
- elements_(graph->blocks()->length(), graph->zone()) {
- for (int i = 0; i < graph->blocks()->length(); i++) {
- Element element;
- element.set_block(graph->blocks()->at(i));
- elements_.Add(element, graph->zone());
- DCHECK(at(i)->block()->block_id() == i);
- }
- }
-
- // Tries to hoist a check out of its induction loop.
- void ProcessRelatedChecks(
- InductionVariableData::InductionVariableCheck* check,
- InductionVariableData* data) {
- HValue* length = check->check()->length();
- check->set_processed();
- HBasicBlock* header =
- data->phi()->block()->current_loop()->loop_header();
- HBasicBlock* pre_header = header->predecessors()->at(0);
- // Check that the limit is defined in the loop preheader.
- if (!data->limit()->IsInteger32Constant()) {
- HBasicBlock* limit_block = data->limit()->block();
- if (limit_block != pre_header &&
- !limit_block->Dominates(pre_header)) {
- return;
- }
- }
- // Check that the length and limit have compatible representations.
- if (!(data->limit()->representation().Equals(
- length->representation()) ||
- data->limit()->IsInteger32Constant())) {
- return;
- }
- // Check that the length is defined in the loop preheader.
- if (check->check()->length()->block() != pre_header &&
- !check->check()->length()->block()->Dominates(pre_header)) {
- return;
- }
-
- // Add checks to the table.
- for (InductionVariableData::InductionVariableCheck* current_check = check;
- current_check != NULL;
- current_check = current_check->next()) {
- if (current_check->check()->length() != length) continue;
-
- AddCheckAt(current_check->check()->block());
- current_check->set_processed();
- }
-
- // Check that we will not cause unwanted deoptimizations.
- Hoistability hoistability = CheckHoistability();
- if (hoistability == NOT_HOISTABLE ||
- (hoistability == OPTIMISTICALLY_HOISTABLE &&
- !graph()->use_optimistic_licm())) {
- return;
- }
-
- // We will do the hoisting, but we must see if the limit is "limit" or if
- // all checks are done on constants: if all check are done against the same
- // constant limit we will use that instead of the induction limit.
- bool has_upper_constant_limit = true;
- int32_t upper_constant_limit =
- check->HasUpperLimit() ? check->upper_limit() : 0;
- for (InductionVariableData::InductionVariableCheck* current_check = check;
- current_check != NULL;
- current_check = current_check->next()) {
- has_upper_constant_limit =
- has_upper_constant_limit && current_check->HasUpperLimit() &&
- current_check->upper_limit() == upper_constant_limit;
- counters()->bounds_checks_eliminated()->Increment();
- current_check->check()->set_skip_check();
- }
-
- // Choose the appropriate limit.
- Zone* zone = graph()->zone();
- HValue* context = graph()->GetInvalidContext();
- HValue* limit = data->limit();
- if (has_upper_constant_limit) {
- HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
- upper_constant_limit);
- new_limit->InsertBefore(pre_header->end());
- limit = new_limit;
- }
-
- // If necessary, redefine the limit in the preheader.
- if (limit->IsInteger32Constant() &&
- limit->block() != pre_header &&
- !limit->block()->Dominates(pre_header)) {
- HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
- limit->GetInteger32Constant());
- new_limit->InsertBefore(pre_header->end());
- limit = new_limit;
- }
-
- // Do the hoisting.
- HBoundsCheck* hoisted_check = HBoundsCheck::New(
- graph()->isolate(), zone, context, limit, check->check()->length());
- hoisted_check->InsertBefore(pre_header->end());
- hoisted_check->set_allow_equality(true);
- counters()->bounds_checks_hoisted()->Increment();
- }
-
- void CollectInductionVariableData(HBasicBlock* bb) {
- bool additional_limit = false;
-
- for (int i = 0; i < bb->phis()->length(); i++) {
- HPhi* phi = bb->phis()->at(i);
- phi->DetectInductionVariable();
- }
-
- additional_limit = InductionVariableData::ComputeInductionVariableLimit(
- bb, at(bb)->additional_limit());
-
- if (additional_limit) {
- at(bb)->additional_limit()->updated_variable->
- UpdateAdditionalLimit(at(bb)->additional_limit());
- }
-
- for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
- if (!i->IsBoundsCheck()) continue;
- HBoundsCheck* check = HBoundsCheck::cast(i);
- InductionVariableData::BitwiseDecompositionResult decomposition;
- InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
- if (!decomposition.base->IsPhi()) continue;
- HPhi* phi = HPhi::cast(decomposition.base);
-
- if (!phi->IsInductionVariable()) continue;
- InductionVariableData* data = phi->induction_variable_data();
-
- // For now ignore loops decrementing the index.
- if (data->increment() <= 0) continue;
- if (!data->LowerLimitIsNonNegativeConstant()) continue;
-
- // TODO(mmassi): skip OSR values for check->length().
- if (check->length() == data->limit() ||
- check->length() == data->additional_upper_limit()) {
- counters()->bounds_checks_eliminated()->Increment();
- check->set_skip_check();
- continue;
- }
-
- if (!phi->IsLimitedInductionVariable()) continue;
-
- int32_t limit = data->ComputeUpperLimit(decomposition.and_mask,
- decomposition.or_mask);
- phi->induction_variable_data()->AddCheck(check, limit);
- }
-
- for (int i = 0; i < bb->dominated_blocks()->length(); i++) {
- CollectInductionVariableData(bb->dominated_blocks()->at(i));
- }
-
- if (additional_limit) {
- at(bb->block_id())->additional_limit()->updated_variable->
- UpdateAdditionalLimit(at(bb->block_id())->additional_limit());
- }
- }
-
- void EliminateRedundantBoundsChecks(HBasicBlock* bb) {
- for (int i = 0; i < bb->phis()->length(); i++) {
- HPhi* phi = bb->phis()->at(i);
- if (!phi->IsLimitedInductionVariable()) continue;
-
- InductionVariableData* induction_data = phi->induction_variable_data();
- InductionVariableData::ChecksRelatedToLength* current_length_group =
- induction_data->checks();
- while (current_length_group != NULL) {
- current_length_group->CloseCurrentBlock();
- InductionVariableData::InductionVariableCheck* current_base_check =
- current_length_group->checks();
- InitializeLoop(induction_data);
-
- while (current_base_check != NULL) {
- ProcessRelatedChecks(current_base_check, induction_data);
- while (current_base_check != NULL &&
- current_base_check->processed()) {
- current_base_check = current_base_check->next();
- }
- }
-
- current_length_group = current_length_group->next();
- }
- }
- }
-
- private:
- HGraph* graph_;
- HBasicBlock* loop_header_;
- ZoneList<Element> elements_;
-};
-
-
-void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
- InductionVariableBlocksTable table(graph());
- table.CollectInductionVariableData(graph()->entry_block());
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- table.EliminateRedundantBoundsChecks(graph()->blocks()->at(i));
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-bch.h b/deps/v8/src/crankshaft/hydrogen-bch.h
deleted file mode 100644
index cdcd407a09..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-bch.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_BCH_H_
-#define V8_CRANKSHAFT_HYDROGEN_BCH_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HBoundsCheckHoistingPhase : public HPhase {
- public:
- explicit HBoundsCheckHoistingPhase(HGraph* graph)
- : HPhase("H_Bounds checks hoisting", graph) { }
-
- void Run() {
- HoistRedundantBoundsChecks();
- }
-
- private:
- void HoistRedundantBoundsChecks();
-
- DISALLOW_COPY_AND_ASSIGN(HBoundsCheckHoistingPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
index ae0bd08837..7965a9432a 100644
--- a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
@@ -37,9 +37,9 @@ void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
int index, HSimulate* simulate) {
int operand_index = simulate->ToOperandIndex(index);
if (operand_index == -1) {
- simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
+ simulate->AddAssignedValue(index, graph()->GetConstantOptimizedOut());
} else {
- simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
+ simulate->SetOperandAt(operand_index, graph()->GetConstantOptimizedOut());
}
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index 729fc588bc..b57bebd8fc 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -25,6 +25,8 @@
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
@@ -773,8 +775,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kArgumentsLength:
case HValue::kArgumentsObject:
case HValue::kBlockEntry:
- case HValue::kBoundsCheckBaseIndexInformation:
- case HValue::kCallFunction:
case HValue::kCallNewArray:
case HValue::kCapturedObject:
case HValue::kClassOfTestAndBranch:
@@ -815,7 +815,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kReturn:
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
- case HValue::kStoreFrameContext:
case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
@@ -832,7 +831,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kBitwise:
case HValue::kBoundsCheck:
case HValue::kBranch:
- case HValue::kCallJSFunction:
case HValue::kCallRuntime:
case HValue::kCallWithDescriptor:
case HValue::kChange:
@@ -873,7 +871,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kStringAdd:
case HValue::kStringCompareAndBranch:
case HValue::kSub:
- case HValue::kToFastProperties:
case HValue::kTransitionElementsKind:
case HValue::kTrapAllocationMemento:
case HValue::kTypeof:
@@ -907,97 +904,24 @@ std::ostream& HUnaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
}
-std::ostream& HCallJSFunction::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(function()) << " #" << argument_count();
-}
-
-
-HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* function,
- int argument_count) {
- bool has_stack_check = false;
- if (function->IsConstant()) {
- HConstant* fun_const = HConstant::cast(function);
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate));
- has_stack_check = !jsfun.is_null() &&
- (jsfun->code()->kind() == Code::FUNCTION ||
- jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
- }
-
- return new (zone) HCallJSFunction(function, argument_count, has_stack_check);
-}
-
-
std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
return os << NameOf(first()) << " " << NameOf(second()) << " #"
<< argument_count();
}
-
-std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(context()) << " " << NameOf(function());
- if (HasVectorAndSlot()) {
- os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
- }
- os << " (convert mode" << convert_mode() << ")";
- return os;
+std::ostream& HInvokeFunction::PrintTo(std::ostream& os) const { // NOLINT
+ if (tail_call_mode() == TailCallMode::kAllow) os << "Tail";
+ return HBinaryCall::PrintTo(os);
}
-
-void HBoundsCheck::ApplyIndexChange() {
- if (skip_check()) return;
-
- DecompositionResult decomposition;
- bool index_is_decomposable = index()->TryDecompose(&decomposition);
- if (index_is_decomposable) {
- DCHECK(decomposition.base() == base());
- if (decomposition.offset() == offset() &&
- decomposition.scale() == scale()) return;
- } else {
- return;
- }
-
- ReplaceAllUsesWith(index());
-
- HValue* current_index = decomposition.base();
- int actual_offset = decomposition.offset() + offset();
- int actual_scale = decomposition.scale() + scale();
-
- HGraph* graph = block()->graph();
- Isolate* isolate = graph->isolate();
- Zone* zone = graph->zone();
- HValue* context = graph->GetInvalidContext();
- if (actual_offset != 0) {
- HConstant* add_offset =
- HConstant::New(isolate, zone, context, actual_offset);
- add_offset->InsertBefore(this);
- HInstruction* add =
- HAdd::New(isolate, zone, context, current_index, add_offset);
- add->InsertBefore(this);
- add->AssumeRepresentation(index()->representation());
- add->ClearFlag(kCanOverflow);
- current_index = add;
- }
-
- if (actual_scale != 0) {
- HConstant* sar_scale = HConstant::New(isolate, zone, context, actual_scale);
- sar_scale->InsertBefore(this);
- HInstruction* sar =
- HSar::New(isolate, zone, context, current_index, sar_scale);
- sar->InsertBefore(this);
- sar->AssumeRepresentation(index()->representation());
- current_index = sar;
+std::ostream& HInvokeFunction::PrintDataTo(std::ostream& os) const { // NOLINT
+ HBinaryCall::PrintDataTo(os);
+ if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ os << ", JSTailCall";
}
-
- SetOperandAt(0, current_index);
-
- base_ = NULL;
- offset_ = 0;
- scale_ = 0;
+ return os;
}
-
std::ostream& HBoundsCheck::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(index()) << " " << NameOf(length());
if (base() != NULL && (offset() != 0 || scale() != 0)) {
@@ -1053,20 +977,16 @@ Range* HBoundsCheck::InferRange(Zone* zone) {
}
-std::ostream& HBoundsCheckBaseIndexInformation::PrintDataTo(
- std::ostream& os) const { // NOLINT
- // TODO(svenpanne) This 2nd base_index() looks wrong...
- return os << "base: " << NameOf(base_index())
- << ", check: " << NameOf(base_index());
-}
-
-
std::ostream& HCallWithDescriptor::PrintDataTo(
std::ostream& os) const { // NOLINT
for (int i = 0; i < OperandCount(); i++) {
os << NameOf(OperandAt(i)) << " ";
}
- return os << "#" << argument_count();
+ os << "#" << argument_count();
+ if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ os << ", JSTailCall";
+ }
+ return os;
}
@@ -1129,23 +1049,23 @@ std::ostream& HReturn::PrintDataTo(std::ostream& os) const { // NOLINT
Representation HBranch::observed_input_representation(int index) {
- if (expected_input_types_.Contains(ToBooleanStub::NULL_TYPE) ||
- expected_input_types_.Contains(ToBooleanStub::SPEC_OBJECT) ||
- expected_input_types_.Contains(ToBooleanStub::STRING) ||
- expected_input_types_.Contains(ToBooleanStub::SYMBOL) ||
- expected_input_types_.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::NULL_TYPE) ||
+ expected_input_types_.Contains(ToBooleanICStub::SPEC_OBJECT) ||
+ expected_input_types_.Contains(ToBooleanICStub::STRING) ||
+ expected_input_types_.Contains(ToBooleanICStub::SYMBOL) ||
+ expected_input_types_.Contains(ToBooleanICStub::SIMD_VALUE)) {
return Representation::Tagged();
}
- if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
- if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
return Representation::Double();
}
return Representation::Tagged();
}
- if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
return Representation::Double();
}
- if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ if (expected_input_types_.Contains(ToBooleanICStub::SMI)) {
return Representation::Smi();
}
return Representation::None();
@@ -1563,7 +1483,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
val, representation(), false, false));
}
}
- if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) {
+ if (op() == kMathFloor && representation().IsSmiOrInteger32() &&
+ value()->IsDiv() && value()->HasOneUse()) {
HDiv* hdiv = HDiv::cast(value());
HValue* left = hdiv->left();
@@ -1980,452 +1901,6 @@ Range* HMod::InferRange(Zone* zone) {
}
-InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
- if (phi->block()->loop_information() == NULL) return NULL;
- if (phi->OperandCount() != 2) return NULL;
- int32_t candidate_increment;
-
- candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
- if (candidate_increment != 0) {
- return new(phi->block()->graph()->zone())
- InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
- }
-
- candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
- if (candidate_increment != 0) {
- return new(phi->block()->graph()->zone())
- InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
- }
-
- return NULL;
-}
-
-
-/*
- * This function tries to match the following patterns (and all the relevant
- * variants related to |, & and + being commutative):
- * base | constant_or_mask
- * base & constant_and_mask
- * (base + constant_offset) & constant_and_mask
- * (base - constant_offset) & constant_and_mask
- */
-void InductionVariableData::DecomposeBitwise(
- HValue* value,
- BitwiseDecompositionResult* result) {
- HValue* base = IgnoreOsrValue(value);
- result->base = value;
-
- if (!base->representation().IsInteger32()) return;
-
- if (base->IsBitwise()) {
- bool allow_offset = false;
- int32_t mask = 0;
-
- HBitwise* bitwise = HBitwise::cast(base);
- if (bitwise->right()->IsInteger32Constant()) {
- mask = bitwise->right()->GetInteger32Constant();
- base = bitwise->left();
- } else if (bitwise->left()->IsInteger32Constant()) {
- mask = bitwise->left()->GetInteger32Constant();
- base = bitwise->right();
- } else {
- return;
- }
- if (bitwise->op() == Token::BIT_AND) {
- result->and_mask = mask;
- allow_offset = true;
- } else if (bitwise->op() == Token::BIT_OR) {
- result->or_mask = mask;
- } else {
- return;
- }
-
- result->context = bitwise->context();
-
- if (allow_offset) {
- if (base->IsAdd()) {
- HAdd* add = HAdd::cast(base);
- if (add->right()->IsInteger32Constant()) {
- base = add->left();
- } else if (add->left()->IsInteger32Constant()) {
- base = add->right();
- }
- } else if (base->IsSub()) {
- HSub* sub = HSub::cast(base);
- if (sub->right()->IsInteger32Constant()) {
- base = sub->left();
- }
- }
- }
-
- result->base = base;
- }
-}
-
-
-void InductionVariableData::AddCheck(HBoundsCheck* check,
- int32_t upper_limit) {
- DCHECK(limit_validity() != NULL);
- if (limit_validity() != check->block() &&
- !limit_validity()->Dominates(check->block())) return;
- if (!phi()->block()->current_loop()->IsNestedInThisLoop(
- check->block()->current_loop())) return;
-
- ChecksRelatedToLength* length_checks = checks();
- while (length_checks != NULL) {
- if (length_checks->length() == check->length()) break;
- length_checks = length_checks->next();
- }
- if (length_checks == NULL) {
- length_checks = new(check->block()->zone())
- ChecksRelatedToLength(check->length(), checks());
- checks_ = length_checks;
- }
-
- length_checks->AddCheck(check, upper_limit);
-}
-
-
-void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
- if (checks() != NULL) {
- InductionVariableCheck* c = checks();
- HBasicBlock* current_block = c->check()->block();
- while (c != NULL && c->check()->block() == current_block) {
- c->set_upper_limit(current_upper_limit_);
- c = c->next();
- }
- }
-}
-
-
-void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
- Token::Value token,
- int32_t mask,
- HValue* index_base,
- HValue* context) {
- DCHECK(first_check_in_block() != NULL);
- HValue* previous_index = first_check_in_block()->index();
- DCHECK(context != NULL);
-
- Zone* zone = index_base->block()->graph()->zone();
- Isolate* isolate = index_base->block()->graph()->isolate();
- set_added_constant(HConstant::New(isolate, zone, context, mask));
- if (added_index() != NULL) {
- added_constant()->InsertBefore(added_index());
- } else {
- added_constant()->InsertBefore(first_check_in_block());
- }
-
- if (added_index() == NULL) {
- first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
- HInstruction* new_index = HBitwise::New(isolate, zone, context, token,
- index_base, added_constant());
- DCHECK(new_index->IsBitwise());
- new_index->ClearAllSideEffects();
- new_index->AssumeRepresentation(Representation::Integer32());
- set_added_index(HBitwise::cast(new_index));
- added_index()->InsertBefore(first_check_in_block());
- }
- DCHECK(added_index()->op() == token);
-
- added_index()->SetOperandAt(1, index_base);
- added_index()->SetOperandAt(2, added_constant());
- first_check_in_block()->SetOperandAt(0, added_index());
- if (previous_index->HasNoUses()) {
- previous_index->DeleteAndReplaceWith(NULL);
- }
-}
-
-void InductionVariableData::ChecksRelatedToLength::AddCheck(
- HBoundsCheck* check,
- int32_t upper_limit) {
- BitwiseDecompositionResult decomposition;
- InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
-
- if (first_check_in_block() == NULL ||
- first_check_in_block()->block() != check->block()) {
- CloseCurrentBlock();
-
- first_check_in_block_ = check;
- set_added_index(NULL);
- set_added_constant(NULL);
- current_and_mask_in_block_ = decomposition.and_mask;
- current_or_mask_in_block_ = decomposition.or_mask;
- current_upper_limit_ = upper_limit;
-
- InductionVariableCheck* new_check = new(check->block()->graph()->zone())
- InductionVariableCheck(check, checks_, upper_limit);
- checks_ = new_check;
- return;
- }
-
- if (upper_limit > current_upper_limit()) {
- current_upper_limit_ = upper_limit;
- }
-
- if (decomposition.and_mask != 0 &&
- current_or_mask_in_block() == 0) {
- if (current_and_mask_in_block() == 0 ||
- decomposition.and_mask > current_and_mask_in_block()) {
- UseNewIndexInCurrentBlock(Token::BIT_AND,
- decomposition.and_mask,
- decomposition.base,
- decomposition.context);
- current_and_mask_in_block_ = decomposition.and_mask;
- }
- check->set_skip_check();
- }
- if (current_and_mask_in_block() == 0) {
- if (decomposition.or_mask > current_or_mask_in_block()) {
- UseNewIndexInCurrentBlock(Token::BIT_OR,
- decomposition.or_mask,
- decomposition.base,
- decomposition.context);
- current_or_mask_in_block_ = decomposition.or_mask;
- }
- check->set_skip_check();
- }
-
- if (!check->skip_check()) {
- InductionVariableCheck* new_check = new(check->block()->graph()->zone())
- InductionVariableCheck(check, checks_, upper_limit);
- checks_ = new_check;
- }
-}
-
-
-/*
- * This method detects if phi is an induction variable, with phi_operand as
- * its "incremented" value (the other operand would be the "base" value).
- *
- * It cheks is phi_operand has the form "phi + constant".
- * If yes, the constant is the increment that the induction variable gets at
- * every loop iteration.
- * Otherwise it returns 0.
- */
-int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
- HValue* phi_operand) {
- if (!phi_operand->representation().IsSmiOrInteger32()) return 0;
-
- if (phi_operand->IsAdd()) {
- HAdd* operation = HAdd::cast(phi_operand);
- if (operation->left() == phi &&
- operation->right()->IsInteger32Constant()) {
- return operation->right()->GetInteger32Constant();
- } else if (operation->right() == phi &&
- operation->left()->IsInteger32Constant()) {
- return operation->left()->GetInteger32Constant();
- }
- } else if (phi_operand->IsSub()) {
- HSub* operation = HSub::cast(phi_operand);
- if (operation->left() == phi &&
- operation->right()->IsInteger32Constant()) {
- int constant = operation->right()->GetInteger32Constant();
- if (constant == kMinInt) return 0;
- return -constant;
- }
- }
-
- return 0;
-}
-
-
-/*
- * Swaps the information in "update" with the one contained in "this".
- * The swapping is important because this method is used while doing a
- * dominator tree traversal, and "update" will retain the old data that
- * will be restored while backtracking.
- */
-void InductionVariableData::UpdateAdditionalLimit(
- InductionVariableLimitUpdate* update) {
- DCHECK(update->updated_variable == this);
- if (update->limit_is_upper) {
- swap(&additional_upper_limit_, &update->limit);
- swap(&additional_upper_limit_is_included_, &update->limit_is_included);
- } else {
- swap(&additional_lower_limit_, &update->limit);
- swap(&additional_lower_limit_is_included_, &update->limit_is_included);
- }
-}
-
-
-int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
- int32_t or_mask) {
- // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
- const int32_t MAX_LIMIT = 1 << 30;
-
- int32_t result = MAX_LIMIT;
-
- if (limit() != NULL &&
- limit()->IsInteger32Constant()) {
- int32_t limit_value = limit()->GetInteger32Constant();
- if (!limit_included()) {
- limit_value--;
- }
- if (limit_value < result) result = limit_value;
- }
-
- if (additional_upper_limit() != NULL &&
- additional_upper_limit()->IsInteger32Constant()) {
- int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
- if (!additional_upper_limit_is_included()) {
- limit_value--;
- }
- if (limit_value < result) result = limit_value;
- }
-
- if (and_mask > 0 && and_mask < MAX_LIMIT) {
- if (and_mask < result) result = and_mask;
- return result;
- }
-
- // Add the effect of the or_mask.
- result |= or_mask;
-
- return result >= MAX_LIMIT ? kNoLimit : result;
-}
-
-
-HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
- if (!v->IsPhi()) return v;
- HPhi* phi = HPhi::cast(v);
- if (phi->OperandCount() != 2) return v;
- if (phi->OperandAt(0)->block()->is_osr_entry()) {
- return phi->OperandAt(1);
- } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
- return phi->OperandAt(0);
- } else {
- return v;
- }
-}
-
-
-InductionVariableData* InductionVariableData::GetInductionVariableData(
- HValue* v) {
- v = IgnoreOsrValue(v);
- if (v->IsPhi()) {
- return HPhi::cast(v)->induction_variable_data();
- }
- return NULL;
-}
-
-
-/*
- * Check if a conditional branch to "current_branch" with token "token" is
- * the branch that keeps the induction loop running (and, conversely, will
- * terminate it if the "other_branch" is taken).
- *
- * Three conditions must be met:
- * - "current_branch" must be in the induction loop.
- * - "other_branch" must be out of the induction loop.
- * - "token" and the induction increment must be "compatible": the token should
- * be a condition that keeps the execution inside the loop until the limit is
- * reached.
- */
-bool InductionVariableData::CheckIfBranchIsLoopGuard(
- Token::Value token,
- HBasicBlock* current_branch,
- HBasicBlock* other_branch) {
- if (!phi()->block()->current_loop()->IsNestedInThisLoop(
- current_branch->current_loop())) {
- return false;
- }
-
- if (phi()->block()->current_loop()->IsNestedInThisLoop(
- other_branch->current_loop())) {
- return false;
- }
-
- if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
- return true;
- }
- if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
- return true;
- }
- if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
- return true;
- }
-
- return false;
-}
-
-
-void InductionVariableData::ComputeLimitFromPredecessorBlock(
- HBasicBlock* block,
- LimitFromPredecessorBlock* result) {
- if (block->predecessors()->length() != 1) return;
- HBasicBlock* predecessor = block->predecessors()->at(0);
- HInstruction* end = predecessor->last();
-
- if (!end->IsCompareNumericAndBranch()) return;
- HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
-
- Token::Value token = branch->token();
- if (!Token::IsArithmeticCompareOp(token)) return;
-
- HBasicBlock* other_target;
- if (block == branch->SuccessorAt(0)) {
- other_target = branch->SuccessorAt(1);
- } else {
- other_target = branch->SuccessorAt(0);
- token = Token::NegateCompareOp(token);
- DCHECK(block == branch->SuccessorAt(1));
- }
-
- InductionVariableData* data;
-
- data = GetInductionVariableData(branch->left());
- HValue* limit = branch->right();
- if (data == NULL) {
- data = GetInductionVariableData(branch->right());
- token = Token::ReverseCompareOp(token);
- limit = branch->left();
- }
-
- if (data != NULL) {
- result->variable = data;
- result->token = token;
- result->limit = limit;
- result->other_target = other_target;
- }
-}
-
-
-/*
- * Compute the limit that is imposed on an induction variable when entering
- * "block" (if any).
- * If the limit is the "proper" induction limit (the one that makes the loop
- * terminate when the induction variable reaches it) it is stored directly in
- * the induction variable data.
- * Otherwise the limit is written in "additional_limit" and the method
- * returns true.
- */
-bool InductionVariableData::ComputeInductionVariableLimit(
- HBasicBlock* block,
- InductionVariableLimitUpdate* additional_limit) {
- LimitFromPredecessorBlock limit;
- ComputeLimitFromPredecessorBlock(block, &limit);
- if (!limit.LimitIsValid()) return false;
-
- if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
- block,
- limit.other_target)) {
- limit.variable->limit_ = limit.limit;
- limit.variable->limit_included_ = limit.LimitIsIncluded();
- limit.variable->limit_validity_ = block;
- limit.variable->induction_exit_block_ = block->predecessors()->at(0);
- limit.variable->induction_exit_target_ = limit.other_target;
- return false;
- } else {
- additional_limit->updated_variable = limit.variable;
- additional_limit->limit = limit.limit;
- additional_limit->limit_is_upper = limit.LimitIsUpper();
- additional_limit->limit_is_included = limit.LimitIsIncluded();
- return true;
- }
-}
-
-
Range* HMathMinMax::InferRange(Zone* zone) {
if (representation().IsSmiOrInteger32()) {
Range* a = left()->range();
@@ -2652,7 +2127,11 @@ void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << function()->debug_name()->ToCString().get();
+ os << function()->debug_name()->ToCString().get();
+ if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ os << ", JSTailCall";
+ }
+ return os;
}
@@ -3272,6 +2751,17 @@ bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
? FirstSuccessor() : SecondSuccessor();
return true;
}
+ if (value()->type().IsNull() || value()->type().IsUndefined()) {
+ *block = FirstSuccessor();
+ return true;
+ }
+ if (value()->type().IsBoolean() ||
+ value()->type().IsSmi() ||
+ value()->type().IsString() ||
+ value()->type().IsJSReceiver()) {
+ *block = SecondSuccessor();
+ return true;
+ }
*block = NULL;
return false;
}
@@ -3767,12 +3257,12 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- bool keep_new_space_iterable = FLAG_log_gc || FLAG_heap_stats;
+ bool keep_heap_iterable = FLAG_log_gc || FLAG_heap_stats;
#ifdef VERIFY_HEAP
- keep_new_space_iterable = keep_new_space_iterable || FLAG_verify_heap;
+ keep_heap_iterable = keep_heap_iterable || FLAG_verify_heap;
#endif
- if (keep_new_space_iterable && dominator_allocate->IsNewSpaceAllocation()) {
+ if (keep_heap_iterable) {
dominator_allocate->MakePrefillWithFiller();
} else {
// TODO(hpayer): This is a short-term hack to make allocation mementos
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 22ed052ba3..196a14fc70 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -56,11 +56,8 @@ class LChunkBuilder;
V(Bitwise) \
V(BlockEntry) \
V(BoundsCheck) \
- V(BoundsCheckBaseIndexInformation) \
V(Branch) \
V(CallWithDescriptor) \
- V(CallJSFunction) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CapturedObject) \
@@ -135,7 +132,6 @@ class LChunkBuilder;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -146,7 +142,6 @@ class LChunkBuilder;
V(StringCompareAndBranch) \
V(Sub) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -735,14 +730,6 @@ class HValue : public ZoneObject {
virtual void Verify() = 0;
#endif
- virtual bool TryDecompose(DecompositionResult* decomposition) {
- if (RedefinedOperand() != NULL) {
- return RedefinedOperand()->TryDecompose(decomposition);
- } else {
- return false;
- }
- }
-
// Returns true conservatively if the program might be able to observe a
// ToString() operation on this value.
bool ToStringCanBeObserved() const {
@@ -1368,10 +1355,8 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
class HBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
- ToBooleanStub::Types);
- DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
- ToBooleanStub::Types,
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanICStub::Types);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanICStub::Types,
HBasicBlock*, HBasicBlock*);
Representation RequiredInputRepresentation(int index) override {
@@ -1383,23 +1368,22 @@ class HBranch final : public HUnaryControlInstruction {
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- ToBooleanStub::Types expected_input_types() const {
+ ToBooleanICStub::Types expected_input_types() const {
return expected_input_types_;
}
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
- HBranch(HValue* value,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
+ HBranch(HValue* value, ToBooleanICStub::Types expected_input_types =
+ ToBooleanICStub::Types(),
+ HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
expected_input_types_(expected_input_types) {
SetFlag(kAllowUndefinedAsNaN);
}
- ToBooleanStub::Types expected_input_types_;
+ ToBooleanICStub::Types expected_input_types_;
};
@@ -1954,10 +1938,12 @@ class HEnterInlined final : public HTemplateInstruction<0> {
HConstant* closure_context, int arguments_count,
FunctionLiteral* function,
InliningKind inlining_kind, Variable* arguments_var,
- HArgumentsObject* arguments_object) {
- return new (zone) HEnterInlined(return_id, closure, closure_context,
- arguments_count, function, inlining_kind,
- arguments_var, arguments_object, zone);
+ HArgumentsObject* arguments_object,
+ TailCallMode syntactic_tail_call_mode) {
+ return new (zone)
+ HEnterInlined(return_id, closure, closure_context, arguments_count,
+ function, inlining_kind, arguments_var, arguments_object,
+ syntactic_tail_call_mode, zone);
}
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
@@ -1973,6 +1959,9 @@ class HEnterInlined final : public HTemplateInstruction<0> {
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
InliningKind inlining_kind() const { return inlining_kind_; }
+ TailCallMode syntactic_tail_call_mode() const {
+ return syntactic_tail_call_mode_;
+ }
BailoutId ReturnId() const { return return_id_; }
int inlining_id() const { return inlining_id_; }
void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; }
@@ -1991,7 +1980,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
HConstant* closure_context, int arguments_count,
FunctionLiteral* function, InliningKind inlining_kind,
Variable* arguments_var, HArgumentsObject* arguments_object,
- Zone* zone)
+ TailCallMode syntactic_tail_call_mode, Zone* zone)
: return_id_(return_id),
shared_(handle(closure->shared())),
closure_(closure),
@@ -2000,6 +1989,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
arguments_pushed_(false),
function_(function),
inlining_kind_(inlining_kind),
+ syntactic_tail_call_mode_(syntactic_tail_call_mode),
inlining_id_(0),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
@@ -2013,6 +2003,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
bool arguments_pushed_;
FunctionLiteral* function_;
InliningKind inlining_kind_;
+ TailCallMode syntactic_tail_call_mode_;
int inlining_id_;
Variable* arguments_var_;
HArgumentsObject* arguments_object_;
@@ -2220,51 +2211,17 @@ class HBinaryCall : public HCall<2> {
};
-class HCallJSFunction final : public HCall<1> {
- public:
- static HCallJSFunction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* function, int argument_count);
-
- HValue* function() const { return OperandAt(0); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) final {
- DCHECK(index == 0);
- return Representation::Tagged();
- }
-
- bool HasStackCheck() final { return has_stack_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
-
- private:
- // The argument count includes the receiver.
- HCallJSFunction(HValue* function,
- int argument_count,
- bool has_stack_check)
- : HCall<1>(argument_count),
- has_stack_check_(has_stack_check) {
- SetOperandAt(0, function);
- }
-
- bool has_stack_check_;
-};
-
-
-enum CallMode { NORMAL_CALL, TAIL_CALL };
-
-
class HCallWithDescriptor final : public HInstruction {
public:
- static HCallWithDescriptor* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* target, int argument_count,
- CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands,
- CallMode call_mode = NORMAL_CALL) {
- HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
- target, argument_count, descriptor, operands, call_mode, zone);
- DCHECK(operands.length() == res->GetParameterCount());
+ static HCallWithDescriptor* New(
+ Isolate* isolate, Zone* zone, HValue* context, HValue* target,
+ int argument_count, CallInterfaceDescriptor descriptor,
+ const Vector<HValue*>& operands,
+ TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow) {
+ HCallWithDescriptor* res = new (zone)
+ HCallWithDescriptor(target, argument_count, descriptor, operands,
+ syntactic_tail_call_mode, tail_call_mode, zone);
return res;
}
@@ -2286,7 +2243,16 @@ class HCallWithDescriptor final : public HInstruction {
HType CalculateInferredType() final { return HType::Tagged(); }
- bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
+ // Defines whether this instruction corresponds to a JS call at tail position.
+ TailCallMode syntactic_tail_call_mode() const {
+ return SyntacticTailCallModeField::decode(bit_field_);
+ }
+
+ // Defines whether this call should be generated as a tail call.
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
+ bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; }
virtual int argument_count() const {
return argument_count_;
@@ -2306,14 +2272,18 @@ class HCallWithDescriptor final : public HInstruction {
// The argument count includes the receiver.
HCallWithDescriptor(HValue* target, int argument_count,
CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands, CallMode call_mode,
- Zone* zone)
+ const Vector<HValue*>& operands,
+ TailCallMode syntactic_tail_call_mode,
+ TailCallMode tail_call_mode, Zone* zone)
: descriptor_(descriptor),
values_(GetParameterCount() + 1, zone),
argument_count_(argument_count),
- call_mode_(call_mode) {
+ bit_field_(
+ TailCallModeField::encode(tail_call_mode) |
+ SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+ DCHECK_EQ(operands.length(), GetParameterCount());
// We can only tail call without any stack arguments.
- DCHECK(call_mode != TAIL_CALL || argument_count == 0);
+ DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0);
AddOperand(target, zone);
for (int i = 0; i < operands.length(); i++) {
AddOperand(operands[i], zone);
@@ -2338,97 +2308,75 @@ class HCallWithDescriptor final : public HInstruction {
CallInterfaceDescriptor descriptor_;
ZoneList<HValue*> values_;
int argument_count_;
- CallMode call_mode_;
+ class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
+ class SyntacticTailCallModeField
+ : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
+ uint32_t bit_field_;
};
class HInvokeFunction final : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
-
- HInvokeFunction(HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count)
- : HBinaryCall(context, function, argument_count),
- known_function_(known_function) {
- formal_parameter_count_ =
- known_function.is_null()
- ? 0
- : known_function->shared()->internal_formal_parameter_count();
- has_stack_check_ = !known_function.is_null() &&
- (known_function->code()->kind() == Code::FUNCTION ||
- known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- }
-
- static HInvokeFunction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* function,
- Handle<JSFunction> known_function,
- int argument_count) {
- return new(zone) HInvokeFunction(context, function,
- known_function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HInvokeFunction, HValue*,
+ Handle<JSFunction>, int,
+ TailCallMode, TailCallMode);
HValue* context() { return first(); }
HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
- bool HasStackCheck() final { return has_stack_check_; }
+ bool HasStackCheck() final { return HasStackCheckField::decode(bit_field_); }
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
-
- private:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count),
- has_stack_check_(false) {
+ // Defines whether this instruction corresponds to a JS call at tail position.
+ TailCallMode syntactic_tail_call_mode() const {
+ return SyntacticTailCallModeField::decode(bit_field_);
}
- Handle<JSFunction> known_function_;
- int formal_parameter_count_;
- bool has_stack_check_;
-};
-
-
-class HCallFunction final : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallFunction, HValue*, int,
- ConvertReceiverMode);
-
- HValue* context() const { return first(); }
- HValue* function() const { return second(); }
-
- ConvertReceiverMode convert_mode() const {
- return ConvertReceiverModeField::decode(bit_field_);
- }
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
- bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
+ // Defines whether this call should be generated as a tail call.
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
}
- DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+ std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- int argument_delta() const override { return -argument_count(); }
-
private:
- HCallFunction(HValue* context, HValue* function, int argument_count,
- ConvertReceiverMode convert_mode)
+ void set_has_stack_check(bool has_stack_check) {
+ bit_field_ = HasStackCheckField::update(bit_field_, has_stack_check);
+ }
+
+ HInvokeFunction(HValue* context, HValue* function,
+ Handle<JSFunction> known_function, int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ TailCallMode tail_call_mode)
: HBinaryCall(context, function, argument_count),
- bit_field_(ConvertReceiverModeField::encode(convert_mode)) {}
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
+ known_function_(known_function),
+ bit_field_(
+ TailCallModeField::encode(tail_call_mode) |
+ SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+ DCHECK(tail_call_mode != TailCallMode::kAllow ||
+ syntactic_tail_call_mode == TailCallMode::kAllow);
+ formal_parameter_count_ =
+ known_function.is_null()
+ ? 0
+ : known_function->shared()->internal_formal_parameter_count();
+ set_has_stack_check(
+ !known_function.is_null() &&
+ (known_function->code()->kind() == Code::FUNCTION ||
+ known_function->code()->kind() == Code::OPTIMIZED_FUNCTION));
+ }
- class ConvertReceiverModeField : public BitField<ConvertReceiverMode, 0, 2> {
- };
+ Handle<JSFunction> known_function_;
+ int formal_parameter_count_;
+ class HasStackCheckField : public BitField<bool, 0, 1> {};
+ class TailCallModeField
+ : public BitField<TailCallMode, HasStackCheckField::kNext, 1> {};
+ class SyntacticTailCallModeField
+ : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
uint32_t bit_field_;
};
@@ -2550,10 +2498,10 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
// Indicates if we support a double (and int32) output for Math.floor and
// Math.round.
bool SupportsFlexibleFloorAndRound() const {
-#ifdef V8_TARGET_ARCH_ARM64
- // TODO(rmcilroy): Re-enable this for Arm64 once http://crbug.com/476477 is
- // fixed.
- return false;
+#if V8_TARGET_ARCH_ARM64
+ return true;
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ return CpuFeatures::IsSupported(SSE4_1);
#else
return false;
#endif
@@ -2997,226 +2945,6 @@ class HCheckHeapObject final : public HUnaryOperation {
};
-class InductionVariableData;
-
-
-struct InductionVariableLimitUpdate {
- InductionVariableData* updated_variable;
- HValue* limit;
- bool limit_is_upper;
- bool limit_is_included;
-
- InductionVariableLimitUpdate()
- : updated_variable(NULL), limit(NULL),
- limit_is_upper(false), limit_is_included(false) {}
-};
-
-
-class HBoundsCheck;
-class HPhi;
-class HBitwise;
-
-
-class InductionVariableData final : public ZoneObject {
- public:
- class InductionVariableCheck : public ZoneObject {
- public:
- HBoundsCheck* check() { return check_; }
- InductionVariableCheck* next() { return next_; }
- bool HasUpperLimit() { return upper_limit_ >= 0; }
- int32_t upper_limit() {
- DCHECK(HasUpperLimit());
- return upper_limit_;
- }
- void set_upper_limit(int32_t upper_limit) {
- upper_limit_ = upper_limit;
- }
-
- bool processed() { return processed_; }
- void set_processed() { processed_ = true; }
-
- InductionVariableCheck(HBoundsCheck* check,
- InductionVariableCheck* next,
- int32_t upper_limit = kNoLimit)
- : check_(check), next_(next), upper_limit_(upper_limit),
- processed_(false) {}
-
- private:
- HBoundsCheck* check_;
- InductionVariableCheck* next_;
- int32_t upper_limit_;
- bool processed_;
- };
-
- class ChecksRelatedToLength : public ZoneObject {
- public:
- HValue* length() { return length_; }
- ChecksRelatedToLength* next() { return next_; }
- InductionVariableCheck* checks() { return checks_; }
-
- void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
- void CloseCurrentBlock();
-
- ChecksRelatedToLength(HValue* length, ChecksRelatedToLength* next)
- : length_(length), next_(next), checks_(NULL),
- first_check_in_block_(NULL),
- added_index_(NULL),
- added_constant_(NULL),
- current_and_mask_in_block_(0),
- current_or_mask_in_block_(0) {}
-
- private:
- void UseNewIndexInCurrentBlock(Token::Value token,
- int32_t mask,
- HValue* index_base,
- HValue* context);
-
- HBoundsCheck* first_check_in_block() { return first_check_in_block_; }
- HBitwise* added_index() { return added_index_; }
- void set_added_index(HBitwise* index) { added_index_ = index; }
- HConstant* added_constant() { return added_constant_; }
- void set_added_constant(HConstant* constant) { added_constant_ = constant; }
- int32_t current_and_mask_in_block() { return current_and_mask_in_block_; }
- int32_t current_or_mask_in_block() { return current_or_mask_in_block_; }
- int32_t current_upper_limit() { return current_upper_limit_; }
-
- HValue* length_;
- ChecksRelatedToLength* next_;
- InductionVariableCheck* checks_;
-
- HBoundsCheck* first_check_in_block_;
- HBitwise* added_index_;
- HConstant* added_constant_;
- int32_t current_and_mask_in_block_;
- int32_t current_or_mask_in_block_;
- int32_t current_upper_limit_;
- };
-
- struct LimitFromPredecessorBlock {
- InductionVariableData* variable;
- Token::Value token;
- HValue* limit;
- HBasicBlock* other_target;
-
- bool LimitIsValid() { return token != Token::ILLEGAL; }
-
- bool LimitIsIncluded() {
- return Token::IsEqualityOp(token) ||
- token == Token::GTE || token == Token::LTE;
- }
- bool LimitIsUpper() {
- return token == Token::LTE || token == Token::LT || token == Token::NE;
- }
-
- LimitFromPredecessorBlock()
- : variable(NULL),
- token(Token::ILLEGAL),
- limit(NULL),
- other_target(NULL) {}
- };
-
- static const int32_t kNoLimit = -1;
-
- static InductionVariableData* ExaminePhi(HPhi* phi);
- static void ComputeLimitFromPredecessorBlock(
- HBasicBlock* block,
- LimitFromPredecessorBlock* result);
- static bool ComputeInductionVariableLimit(
- HBasicBlock* block,
- InductionVariableLimitUpdate* additional_limit);
-
- struct BitwiseDecompositionResult {
- HValue* base;
- int32_t and_mask;
- int32_t or_mask;
- HValue* context;
-
- BitwiseDecompositionResult()
- : base(NULL), and_mask(0), or_mask(0), context(NULL) {}
- };
- static void DecomposeBitwise(HValue* value,
- BitwiseDecompositionResult* result);
-
- void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
-
- bool CheckIfBranchIsLoopGuard(Token::Value token,
- HBasicBlock* current_branch,
- HBasicBlock* other_branch);
-
- void UpdateAdditionalLimit(InductionVariableLimitUpdate* update);
-
- HPhi* phi() { return phi_; }
- HValue* base() { return base_; }
- int32_t increment() { return increment_; }
- HValue* limit() { return limit_; }
- bool limit_included() { return limit_included_; }
- HBasicBlock* limit_validity() { return limit_validity_; }
- HBasicBlock* induction_exit_block() { return induction_exit_block_; }
- HBasicBlock* induction_exit_target() { return induction_exit_target_; }
- ChecksRelatedToLength* checks() { return checks_; }
- HValue* additional_upper_limit() { return additional_upper_limit_; }
- bool additional_upper_limit_is_included() {
- return additional_upper_limit_is_included_;
- }
- HValue* additional_lower_limit() { return additional_lower_limit_; }
- bool additional_lower_limit_is_included() {
- return additional_lower_limit_is_included_;
- }
-
- bool LowerLimitIsNonNegativeConstant() {
- if (base()->IsInteger32Constant() && base()->GetInteger32Constant() >= 0) {
- return true;
- }
- if (additional_lower_limit() != NULL &&
- additional_lower_limit()->IsInteger32Constant() &&
- additional_lower_limit()->GetInteger32Constant() >= 0) {
- // Ignoring the corner case of !additional_lower_limit_is_included()
- // is safe, handling it adds unneeded complexity.
- return true;
- }
- return false;
- }
-
- int32_t ComputeUpperLimit(int32_t and_mask, int32_t or_mask);
-
- private:
- template <class T> void swap(T* a, T* b) {
- T c(*a);
- *a = *b;
- *b = c;
- }
-
- InductionVariableData(HPhi* phi, HValue* base, int32_t increment)
- : phi_(phi), base_(IgnoreOsrValue(base)), increment_(increment),
- limit_(NULL), limit_included_(false), limit_validity_(NULL),
- induction_exit_block_(NULL), induction_exit_target_(NULL),
- checks_(NULL),
- additional_upper_limit_(NULL),
- additional_upper_limit_is_included_(false),
- additional_lower_limit_(NULL),
- additional_lower_limit_is_included_(false) {}
-
- static int32_t ComputeIncrement(HPhi* phi, HValue* phi_operand);
-
- static HValue* IgnoreOsrValue(HValue* v);
- static InductionVariableData* GetInductionVariableData(HValue* v);
-
- HPhi* phi_;
- HValue* base_;
- int32_t increment_;
- HValue* limit_;
- bool limit_included_;
- HBasicBlock* limit_validity_;
- HBasicBlock* induction_exit_block_;
- HBasicBlock* induction_exit_target_;
- ChecksRelatedToLength* checks_;
- HValue* additional_upper_limit_;
- bool additional_upper_limit_is_included_;
- HValue* additional_lower_limit_;
- bool additional_lower_limit_is_included_;
-};
-
-
class HPhi final : public HValue {
public:
HPhi(int merged_index, Zone* zone)
@@ -3250,21 +2978,6 @@ class HPhi final : public HValue {
int merged_index() const { return merged_index_; }
- InductionVariableData* induction_variable_data() {
- return induction_variable_data_;
- }
- bool IsInductionVariable() {
- return induction_variable_data_ != NULL;
- }
- bool IsLimitedInductionVariable() {
- return IsInductionVariable() &&
- induction_variable_data_->limit() != NULL;
- }
- void DetectInductionVariable() {
- DCHECK(induction_variable_data_ == NULL);
- induction_variable_data_ = InductionVariableData::ExaminePhi(this);
- }
-
std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
#ifdef DEBUG
@@ -3310,7 +3023,6 @@ class HPhi final : public HValue {
int merged_index_ = 0;
int phi_id_ = -1;
- InductionVariableData* induction_variable_data_ = nullptr;
Representation representation_from_indirect_uses_ = Representation::None();
Representation representation_from_non_phi_uses_ = Representation::None();
@@ -3865,8 +3577,8 @@ class HWrapReceiver final : public HTemplateInstruction<2> {
class HApplyArguments final : public HTemplateInstruction<4> {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
- HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P5(HApplyArguments, HValue*, HValue*, HValue*,
+ HValue*, TailCallMode);
Representation RequiredInputRepresentation(int index) override {
// The length is untagged, all other inputs are tagged.
@@ -3880,13 +3592,16 @@ class HApplyArguments final : public HTemplateInstruction<4> {
HValue* length() { return OperandAt(2); }
HValue* elements() { return OperandAt(3); }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
private:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
+ HApplyArguments(HValue* function, HValue* receiver, HValue* length,
+ HValue* elements, TailCallMode tail_call_mode)
+ : bit_field_(TailCallModeField::encode(tail_call_mode)) {
set_representation(Representation::Tagged());
SetOperandAt(0, function);
SetOperandAt(1, receiver);
@@ -3894,12 +3609,16 @@ class HApplyArguments final : public HTemplateInstruction<4> {
SetOperandAt(3, elements);
SetAllSideEffects();
}
+
+ class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
+ uint32_t bit_field_;
};
class HArgumentsElements final : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
+ DECLARE_INSTRUCTION_FACTORY_P2(HArgumentsElements, bool, bool);
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
@@ -3908,12 +3627,14 @@ class HArgumentsElements final : public HTemplateInstruction<0> {
}
bool from_inlined() const { return from_inlined_; }
+ bool arguments_adaptor() const { return arguments_adaptor_; }
protected:
bool DataEquals(HValue* other) override { return true; }
private:
- explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
+ explicit HArgumentsElements(bool from_inlined, bool arguments_adaptor = true)
+ : from_inlined_(from_inlined), arguments_adaptor_(arguments_adaptor) {
// The value produced by this instruction is a pointer into the stack
// that looks as if it was a smi because of alignment.
set_representation(Representation::Tagged());
@@ -3923,6 +3644,7 @@ class HArgumentsElements final : public HTemplateInstruction<0> {
bool IsDeletable() const override { return true; }
bool from_inlined_;
+ bool arguments_adaptor_;
};
@@ -3981,9 +3703,6 @@ class HAccessArgumentsAt final : public HTemplateInstruction<3> {
};
-class HBoundsCheckBaseIndexInformation;
-
-
class HBoundsCheck final : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
@@ -3995,24 +3714,6 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
int offset() const { return offset_; }
int scale() const { return scale_; }
- void ApplyIndexChange();
- bool DetectCompoundIndex() {
- DCHECK(base() == NULL);
-
- DecompositionResult decomposition;
- if (index()->TryDecompose(&decomposition)) {
- base_ = decomposition.base();
- offset_ = decomposition.offset();
- scale_ = decomposition.scale();
- return true;
- } else {
- base_ = index();
- offset_ = 0;
- scale_ = 0;
- return false;
- }
- }
-
Representation RequiredInputRepresentation(int index) override {
return representation();
}
@@ -4031,8 +3732,6 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
- friend class HBoundsCheckBaseIndexInformation;
-
Range* InferRange(Zone* zone) override;
bool DataEquals(HValue* other) override { return true; }
@@ -4061,34 +3760,6 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
};
-class HBoundsCheckBaseIndexInformation final : public HTemplateInstruction<2> {
- public:
- explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
- DecompositionResult decomposition;
- if (check->index()->TryDecompose(&decomposition)) {
- SetOperandAt(0, decomposition.base());
- SetOperandAt(1, check);
- } else {
- UNREACHABLE();
- }
- }
-
- HValue* base_index() const { return OperandAt(0); }
- HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
-
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- int RedefinedOperandIndex() override { return 0; }
- bool IsPurelyInformativeDefinition() override { return true; }
-};
-
-
class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
@@ -4711,18 +4382,6 @@ class HAdd final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (left()->IsInteger32Constant()) {
- decomposition->Apply(right(), left()->GetInteger32Constant());
- return true;
- } else if (right()->IsInteger32Constant()) {
- decomposition->Apply(left(), right()->GetInteger32Constant());
- return true;
- } else {
- return false;
- }
- }
-
void RepresentationChanged(Representation to) override {
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
@@ -4802,15 +4461,6 @@ class HSub final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (right()->IsInteger32Constant()) {
- decomposition->Apply(left(), -right()->GetInteger32Constant());
- return true;
- } else {
- return false;
- }
- }
-
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
@@ -5065,18 +4715,6 @@ class HShr final : public HBitwiseBinaryOperation {
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right);
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (right()->IsInteger32Constant()) {
- if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
- // This is intended to look for HAdd and HSub, to handle compounds
- // like ((base + offset) >> scale) with one single decomposition.
- left()->TryDecompose(decomposition);
- return true;
- }
- }
- return false;
- }
-
Range* InferRange(Zone* zone) override;
void UpdateRepresentation(Representation new_rep,
@@ -5102,18 +4740,6 @@ class HSar final : public HBitwiseBinaryOperation {
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right);
- bool TryDecompose(DecompositionResult* decomposition) override {
- if (right()->IsInteger32Constant()) {
- if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
- // This is intended to look for HAdd and HSub, to handle compounds
- // like ((base + offset) >> scale) with one single decomposition.
- left()->TryDecompose(decomposition);
- return true;
- }
- }
- return false;
- }
-
Range* InferRange(Zone* zone) override;
void UpdateRepresentation(Representation new_rep,
@@ -5572,11 +5198,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
return false;
}
- // Stores to old space allocations require no write barriers if the value is
- // a constant provably not in new space.
- if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
- return false;
- }
}
return true;
}
@@ -5928,6 +5549,10 @@ class HObjectAccess final {
Representation::Integer32());
}
+ static HObjectAccess ForMapDescriptors() {
+ return HObjectAccess(kInobject, Map::kDescriptorsOffset);
+ }
+
static HObjectAccess ForNameHashField() {
return HObjectAccess(kInobject,
Name::kHashFieldOffset,
@@ -7391,35 +7016,6 @@ class HMaybeGrowElements final : public HTemplateInstruction<5> {
};
-class HToFastProperties final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
-
- private:
- explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
-
- // This instruction is not marked as kChangesMaps, but does
- // change the map of the input operand. Use it only when creating
- // object literals via a runtime call.
- DCHECK(value->IsCallRuntime());
-#ifdef DEBUG
- const Runtime::Function* function = HCallRuntime::cast(value)->function();
- DCHECK(function->function_id == Runtime::kCreateObjectLiteral);
-#endif
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
class HSeqStringGetChar final : public HTemplateInstruction<2> {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
@@ -7646,28 +7242,6 @@ class HLoadFieldByIndex final : public HTemplateInstruction<2> {
bool IsDeletable() const override { return true; }
};
-
-class HStoreFrameContext: public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*);
-
- HValue* context() { return OperandAt(0); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext)
- private:
- explicit HStoreFrameContext(HValue* context)
- : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetChangesFlag(kContextSlots);
- }
-};
-
-
-
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.cc b/deps/v8/src/crankshaft/hydrogen-osr.cc
index c98bbf627f..8de3ac0705 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.cc
+++ b/deps/v8/src/crankshaft/hydrogen-osr.cc
@@ -30,7 +30,7 @@ HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanICStub::Types(),
non_osr_entry, osr_entry_);
builder_->FinishCurrentBlock(test);
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 8c8562581a..4266e28da0 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -22,7 +22,7 @@ HType HType::FromType(Type* type) {
if (type->Is(Type::Boolean())) return HType::Boolean();
if (type->Is(Type::Undefined())) return HType::Undefined();
if (type->Is(Type::Object())) return HType::JSObject();
- if (type->Is(Type::Receiver())) return HType::JSReceiver();
+ if (type->Is(Type::DetectableReceiver())) return HType::JSReceiver();
return HType::Tagged();
}
@@ -43,8 +43,13 @@ HType HType::FromValue(Handle<Object> value) {
if (value->IsString()) return HType::String();
if (value->IsBoolean()) return HType::Boolean();
if (value->IsUndefined()) return HType::Undefined();
- if (value->IsJSArray()) return HType::JSArray();
- if (value->IsJSObject()) return HType::JSObject();
+ if (value->IsJSArray()) {
+ DCHECK(!value->IsUndetectable());
+ return HType::JSArray();
+ }
+ if (value->IsJSObject() && !value->IsUndetectable()) {
+ return HType::JSObject();
+ }
DCHECK(value->IsHeapObject());
return HType::HeapObject();
}
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index b6fdd3a315..fd232701f2 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -11,7 +11,6 @@
#include "src/ast/scopeinfo.h"
#include "src/code-factory.h"
#include "src/crankshaft/hydrogen-bce.h"
-#include "src/crankshaft/hydrogen-bch.h"
#include "src/crankshaft/hydrogen-canonicalize.h"
#include "src/crankshaft/hydrogen-check-elimination.h"
#include "src/crankshaft/hydrogen-dce.h"
@@ -58,6 +57,8 @@
#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#else
@@ -688,28 +689,32 @@ HConstant* HGraph::GetConstantBool(bool value) {
return value ? GetConstantTrue() : GetConstantFalse();
}
-#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value, \
- undetectable) \
- HConstant* HGraph::GetConstant##Name() { \
- if (!constant_##name##_.is_set()) { \
- HConstant* constant = new (zone()) HConstant( \
- Unique<Object>::CreateImmovable( \
- isolate()->factory()->name##_value()), \
- Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
- false, Representation::Tagged(), htype, true, boolean_value, \
- undetectable, ODDBALL_TYPE); \
- constant->InsertAfter(entry_block()->first()); \
- constant_##name##_.set(constant); \
- } \
- return ReinsertConstantIfNecessary(constant_##name##_.get()); \
- }
-
-DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false,
- true)
-DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true, false)
-DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false, false)
-DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false, false)
-DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false, true)
+#define DEFINE_GET_CONSTANT(Name, name, constant, type, htype, boolean_value, \
+ undetectable) \
+ HConstant* HGraph::GetConstant##Name() { \
+ if (!constant_##name##_.is_set()) { \
+ HConstant* constant = new (zone()) HConstant( \
+ Unique<Object>::CreateImmovable(isolate()->factory()->constant()), \
+ Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
+ false, Representation::Tagged(), htype, true, boolean_value, \
+ undetectable, ODDBALL_TYPE); \
+ constant->InsertAfter(entry_block()->first()); \
+ constant_##name##_.set(constant); \
+ } \
+ return ReinsertConstantIfNecessary(constant_##name##_.get()); \
+ }
+
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined_value, undefined,
+ HType::Undefined(), false, true)
+DEFINE_GET_CONSTANT(True, true, true_value, boolean, HType::Boolean(), true,
+ false)
+DEFINE_GET_CONSTANT(False, false, false_value, boolean, HType::Boolean(), false,
+ false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole_value, the_hole, HType::None(),
+ false, false)
+DEFINE_GET_CONSTANT(Null, null, null_value, null, HType::Null(), false, true)
+DEFINE_GET_CONSTANT(OptimizedOut, optimized_out, optimized_out, optimized_out,
+ HType::None(), false, false)
#undef DEFINE_GET_CONSTANT
@@ -906,8 +911,8 @@ void HGraphBuilder::IfBuilder::Then() {
// so that the graph builder visits it and sees any live range extending
// constructs within it.
HConstant* constant_false = builder()->graph()->GetConstantFalse();
- ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
- boolean_type.Add(ToBooleanStub::BOOLEAN);
+ ToBooleanICStub::Types boolean_type = ToBooleanICStub::Types();
+ boolean_type.Add(ToBooleanICStub::BOOLEAN);
HBranch* branch = builder()->New<HBranch>(
constant_false, boolean_type, first_true_block_, first_false_block_);
builder()->FinishCurrentBlock(branch);
@@ -1302,9 +1307,9 @@ HValue* HGraphBuilder::BuildCheckString(HValue* string) {
return string;
}
-
-HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* checked) {
if (object->type().IsJSObject()) return object;
+ HValue* function = checked->ActualValue();
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
@@ -1312,7 +1317,7 @@ HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
SharedFunctionInfo* shared = f->shared();
if (is_strict(shared->language_mode()) || shared->native()) return object;
}
- return Add<HWrapReceiver>(object, function);
+ return Add<HWrapReceiver>(object, checked);
}
@@ -3179,58 +3184,6 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
}
-void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
- HIfContinuation* continuation,
- MapEmbedding map_embedding) {
- IfBuilder if_nil(this);
-
- if (type->Maybe(Type::Undetectable())) {
- if_nil.If<HIsUndetectableAndBranch>(value);
- } else {
- bool maybe_null = type->Maybe(Type::Null());
- if (maybe_null) {
- if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
- }
-
- if (type->Maybe(Type::Undefined())) {
- if (maybe_null) if_nil.Or();
- if_nil.If<HCompareObjectEqAndBranch>(value,
- graph()->GetConstantUndefined());
- }
-
- if_nil.Then();
- if_nil.Else();
-
- if (type->NumClasses() == 1) {
- BuildCheckHeapObject(value);
- // For ICs, the map checked below is a sentinel map that gets replaced by
- // the monomorphic map when the code is used as a template to generate a
- // new IC. For optimized functions, there is no sentinel map, the map
- // emitted below is the actual monomorphic map.
- if (map_embedding == kEmbedMapsViaWeakCells) {
- HValue* cell =
- Add<HConstant>(Map::WeakCellForMap(type->Classes().Current()));
- HValue* expected_map = Add<HLoadNamedField>(
- cell, nullptr, HObjectAccess::ForWeakCellValue());
- HValue* map =
- Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
- IfBuilder map_check(this);
- map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(Deoptimizer::kUnknownMap);
- map_check.End();
- } else {
- DCHECK(map_embedding == kEmbedMapsDirectly);
- Add<HCheckMaps>(value, type->Classes().Current());
- }
- } else {
- if_nil.Deopt(Deoptimizer::kTooManyUndetectableTypes);
- }
- }
-
- if_nil.CaptureContinuation(continuation);
-}
-
-
void HGraphBuilder::BuildCreateAllocationMemento(
HValue* previous_object,
HValue* previous_object_size,
@@ -3544,11 +3497,11 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
return Add<HLoadNamedField>(native_context, nullptr, function_access);
}
-
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info, CallInterfaceDescriptor()),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN, 0),
+ initial_function_state_(this, info, NORMAL_RETURN, 0,
+ TailCallMode::kAllow),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -3621,9 +3574,16 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
IterationStatement* statement) {
- HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
- ? osr()->BuildOsrLoopEntry(statement)
- : BuildLoopEntry();
+ HBasicBlock* loop_entry;
+
+ if (osr()->HasOsrEntryAt(statement)) {
+ loop_entry = osr()->BuildOsrLoopEntry(statement);
+ if (function_state()->IsInsideDoExpressionScope()) {
+ Bailout(kDoExpressionUnmodelable);
+ }
+ } else {
+ loop_entry = BuildLoopEntry();
+ }
return loop_entry;
}
@@ -3652,7 +3612,6 @@ HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
info_(info),
descriptor_(descriptor),
zone_(info->zone()),
- is_recursive_(false),
use_optimistic_licm_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
@@ -4085,11 +4044,12 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info, InliningKind inlining_kind,
- int inlining_id)
+ int inlining_id, TailCallMode tail_call_mode)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
inlining_kind_(inlining_kind),
+ tail_call_mode_(tail_call_mode),
function_return_(NULL),
test_context_(NULL),
entry_(NULL),
@@ -4097,6 +4057,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
arguments_elements_(NULL),
inlining_id_(inlining_id),
outer_source_position_(SourcePosition::Unknown()),
+ do_expression_scope_count_(0),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -4153,7 +4114,7 @@ AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
typeof_mode_(NOT_INSIDE_TYPEOF) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
- DCHECK(owner->environment()->frame_type() == JS_FUNCTION);
+ DCHECK_EQ(JS_FUNCTION, owner->environment()->frame_type());
original_length_ = owner->environment()->length();
#endif
}
@@ -4165,18 +4126,18 @@ AstContext::~AstContext() {
EffectContext::~EffectContext() {
- DCHECK(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
+ DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ &&
- owner()->environment()->frame_type() == JS_FUNCTION));
+ (owner()->environment()->frame_type() == JS_FUNCTION ||
+ owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
}
ValueContext::~ValueContext() {
- DCHECK(owner()->HasStackOverflow() ||
- owner()->current_block() == NULL ||
+ DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ + 1 &&
- owner()->environment()->frame_type() == JS_FUNCTION));
+ (owner()->environment()->frame_type() == JS_FUNCTION ||
+ owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
}
@@ -4350,7 +4311,7 @@ void TestContext::BuildBranch(HValue* value) {
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
- ToBooleanStub::Types expected(condition()->to_boolean_types());
+ ToBooleanICStub::Types expected(condition()->to_boolean_types());
ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
}
@@ -4566,7 +4527,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
Run<HStackCheckEliminationPhase>();
if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
- if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
@@ -4739,12 +4699,8 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
HInstruction* inner_context = Add<HCallRuntime>(
Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
inner_context->SetFlag(HValue::kHasNoObservableSideEffects);
- HInstruction* instr = Add<HStoreFrameContext>(inner_context);
set_scope(scope);
environment()->BindContext(inner_context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
- }
}
VisitDeclarations(scope->declarations());
AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
@@ -4759,11 +4715,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
inner_context, nullptr,
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- HInstruction* instr = Add<HStoreFrameContext>(outer_context);
environment()->BindContext(outer_context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
- }
}
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
@@ -4811,23 +4763,24 @@ void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
HBasicBlock* cond_false = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
- if (cond_true->HasPredecessor()) {
- cond_true->SetJoinId(stmt->ThenId());
- set_current_block(cond_true);
- CHECK_BAILOUT(Visit(stmt->then_statement()));
- cond_true = current_block();
- } else {
- cond_true = NULL;
- }
+ // Technically, we should be able to handle the case when one side of
+ // the test is not connected, but this can trip up liveness analysis
+ // if we did not fully connect the test context based on some optimistic
+ // assumption. If such an assumption was violated, we would end up with
+ // an environment with optimized-out values. So we should always
+ // conservatively connect the test context.
+ CHECK(cond_true->HasPredecessor());
+ CHECK(cond_false->HasPredecessor());
- if (cond_false->HasPredecessor()) {
- cond_false->SetJoinId(stmt->ElseId());
- set_current_block(cond_false);
- CHECK_BAILOUT(Visit(stmt->else_statement()));
- cond_false = current_block();
- } else {
- cond_false = NULL;
- }
+ cond_true->SetJoinId(stmt->ThenId());
+ set_current_block(cond_true);
+ CHECK_BAILOUT(Visit(stmt->then_statement()));
+ cond_true = current_block();
+
+ cond_false->SetJoinId(stmt->ElseId());
+ set_current_block(cond_false);
+ CHECK_BAILOUT(Visit(stmt->else_statement()));
+ cond_false = current_block();
HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
set_current_block(join);
@@ -4881,6 +4834,11 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
+ if (function_state()->IsInsideDoExpressionScope()) {
+ return Bailout(kDoExpressionUnmodelable);
+ }
+
Scope* outer_scope = NULL;
Scope* inner_scope = scope();
int drop_extra = 0;
@@ -4897,10 +4855,6 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
context = context_instruction;
}
- HInstruction* instr = Add<HStoreFrameContext>(context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE);
- }
environment()->BindContext(context);
}
@@ -4913,6 +4867,11 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
+ if (function_state()->IsInsideDoExpressionScope()) {
+ return Bailout(kDoExpressionUnmodelable);
+ }
+
Scope* outer_scope = NULL;
Scope* inner_scope = scope();
int drop_extra = 0;
@@ -4929,10 +4888,6 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
context = context_instruction;
}
- HInstruction* instr = Add<HStoreFrameContext>(context);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE);
- }
environment()->BindContext(context);
}
Goto(break_block);
@@ -5156,7 +5111,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HBasicBlock* loop_successor = NULL;
- if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
+ if (body_exit != NULL) {
set_current_block(body_exit);
loop_successor = graph()->CreateBasicBlock();
if (stmt->cond()->ToBooleanIsFalse()) {
@@ -5198,19 +5153,17 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
- if (!stmt->cond()->ToBooleanIsTrue()) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
- if (body_entry->HasPredecessor()) {
- body_entry->SetJoinId(stmt->BodyId());
- set_current_block(body_entry);
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
+ HBasicBlock* body_entry = graph()->CreateBasicBlock();
+ loop_successor = graph()->CreateBasicBlock();
+ CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+ if (body_entry->HasPredecessor()) {
+ body_entry->SetJoinId(stmt->BodyId());
+ set_current_block(body_entry);
+ }
+ if (loop_successor->HasPredecessor()) {
+ loop_successor->SetJoinId(stmt->ExitId());
+ } else {
+ loop_successor = NULL;
}
BreakAndContinueInfo break_info(stmt, scope());
@@ -5239,10 +5192,9 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
DCHECK(current_block() != NULL);
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
- HBasicBlock* loop_successor = NULL;
+ HBasicBlock* loop_successor = graph()->CreateBasicBlock();
+ HBasicBlock* body_entry = graph()->CreateBasicBlock();
if (stmt->cond() != NULL) {
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
if (body_entry->HasPredecessor()) {
body_entry->SetJoinId(stmt->BodyId());
@@ -5253,6 +5205,14 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
loop_successor = NULL;
}
+ } else {
+ // Create dummy control flow so that variable liveness analysis
+ // produces teh correct result.
+ HControlInstruction* branch = New<HBranch>(graph()->GetConstantTrue());
+ branch->SetSuccessorAt(0, body_entry);
+ branch->SetSuccessorAt(1, loop_successor);
+ FinishCurrentBlock(branch);
+ set_current_block(body_entry);
}
BreakAndContinueInfo break_info(stmt, scope());
@@ -5540,9 +5500,8 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
FastNewClosureDescriptor descriptor(isolate());
HValue* values[] = {context(), shared_info_value};
HConstant* stub_value = Add<HConstant>(stub.GetCode());
- instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
- Vector<HValue*>(values, arraysize(values)),
- NORMAL_CALL);
+ instr = New<HCallWithDescriptor>(
+ stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
} else {
Add<HPushArguments>(shared_info_value);
Runtime::FunctionId function_id =
@@ -5571,10 +5530,12 @@ void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ DoExpressionScope scope(this);
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- return Bailout(kDoExpression);
+ CHECK_ALIVE(VisitBlock(expr->block()));
+ Visit(expr->result());
}
@@ -5821,9 +5782,9 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
HConstant* stub_value = Add<HConstant>(callable.code());
- HInstruction* instr = New<HCallWithDescriptor>(
- stub_value, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)), NORMAL_CALL);
+ HInstruction* instr =
+ New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -6019,17 +5980,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- if (expr->has_function()) {
- // Return the result of the transformation to fast properties
- // instead of the original since this operation changes the map
- // of the object. This makes sure that the original object won't
- // be used by other optimized code before it is transformed
- // (e.g. because of code motion).
- HToFastProperties* result = Add<HToFastProperties>(Pop());
- return ast_context()->ReturnValue(result);
- } else {
- return ast_context()->ReturnValue(Pop());
- }
+ return ast_context()->ReturnValue(Pop());
}
@@ -6053,9 +6004,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<Object> raw_boilerplate;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate(), raw_boilerplate,
- Runtime::CreateArrayLiteralBoilerplate(
- isolate(), literals, expr->constant_elements(),
- is_strong(function_language_mode())),
+ Runtime::CreateArrayLiteralBoilerplate(isolate(), literals,
+ expr->constant_elements()),
Bailout(kArrayBoilerplateCreationFailed));
boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
@@ -6591,13 +6541,7 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (!info->IsFound()) {
DCHECK(info->IsLoad());
- if (is_strong(function_language_mode())) {
- return New<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
- 0);
- } else {
- return graph()->GetConstantUndefined();
- }
+ return graph()->GetConstantUndefined();
}
if (info->IsData()) {
@@ -6625,8 +6569,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
info->NeedsWrappingFor(Handle<JSFunction>::cast(info->accessor()))) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
- return New<HCallFunction>(function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined);
+ return NewCallFunction(function, argument_count, TailCallMode::kDisallow,
+ ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kDisallow);
} else if (FLAG_inline_accessors && can_inline_accessor) {
bool success = info->IsLoad()
? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
@@ -6640,8 +6585,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
Bailout(kInliningBailedOut);
return nullptr;
}
- return BuildCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
- argument_count);
+ return NewCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
+ argument_count, TailCallMode::kDisallow,
+ TailCallMode::kDisallow);
}
DCHECK(info->IsDataConstant());
@@ -7600,9 +7546,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
- Handle<Map> transitioned_map =
- Map::FindTransitionedMap(map, &possible_transitioned_maps);
- transition_target.Add(transitioned_map);
+ Map* transitioned_map =
+ map->FindElementsKindTransitionedMap(&possible_transitioned_maps);
+ if (transitioned_map != nullptr) {
+ transition_target.Add(handle(transitioned_map));
+ } else {
+ transition_target.Add(Handle<Map>());
+ }
}
MapHandleList untransitionable_maps(maps->length());
@@ -8039,56 +7989,81 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
}
}
-
-HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
- int argument_count) {
- return New<HCallJSFunction>(fun, argument_count);
+void HOptimizedGraphBuilder::BuildEnsureCallable(HValue* object) {
+ NoObservableSideEffectsScope scope(this);
+ const Runtime::Function* throw_called_non_callable =
+ Runtime::FunctionForId(Runtime::kThrowCalledNonCallable);
+
+ IfBuilder is_not_function(this);
+ HValue* smi_check = is_not_function.If<HIsSmiAndBranch>(object);
+ is_not_function.Or();
+ HValue* map = AddLoadMap(object, smi_check);
+ HValue* bit_field =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+ HValue* bit_field_masked = AddUncasted<HBitwise>(
+ Token::BIT_AND, bit_field, Add<HConstant>(1 << Map::kIsCallable));
+ is_not_function.IfNot<HCompareNumericAndBranch>(
+ bit_field_masked, Add<HConstant>(1 << Map::kIsCallable), Token::EQ);
+ is_not_function.Then();
+ {
+ Add<HPushArguments>(object);
+ Add<HCallRuntime>(throw_called_non_callable, 1);
+ }
+ is_not_function.End();
}
-
-HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
- HValue* fun, HValue* context,
- int argument_count, HValue* expected_param_count) {
- HValue* new_target = graph()->GetConstantUndefined();
+HInstruction* HOptimizedGraphBuilder::NewCallFunction(
+ HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
+ if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+ BuildEnsureCallable(function);
+ } else {
+ DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
+ }
HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = {context, fun, new_target, arity, expected_param_count};
+ HValue* op_vals[] = {context(), function, arity};
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ Callable callable =
+ CodeFactory::Call(isolate(), convert_mode, tail_call_mode);
HConstant* stub = Add<HConstant>(callable.code());
return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)));
+ Vector<HValue*>(op_vals, arraysize(op_vals)),
+ syntactic_tail_call_mode);
}
-
-HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
- Handle<JSFunction> jsfun, int argument_count) {
- HValue* target = Add<HConstant>(jsfun);
- // For constant functions, we try to avoid calling the
- // argument adaptor and instead call the function directly
- int formal_parameter_count =
- jsfun->shared()->internal_formal_parameter_count();
- bool dont_adapt_arguments =
- (formal_parameter_count ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- int arity = argument_count - 1;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
- if (can_invoke_directly) {
- if (jsfun.is_identical_to(current_info()->closure())) {
- graph()->MarkRecursive();
- }
- return NewPlainFunctionCall(target, argument_count);
+HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
+ HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode,
+ FeedbackVectorSlot slot) {
+ if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+ BuildEnsureCallable(function);
} else {
- HValue* param_count_value = Add<HConstant>(formal_parameter_count);
- HValue* context = Add<HLoadNamedField>(
- target, nullptr, HObjectAccess::ForFunctionContextPointer());
- return NewArgumentAdaptorCall(target, context,
- argument_count, param_count_value);
+ DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
}
- UNREACHABLE();
- return NULL;
+ int arity = argument_count - 1;
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+ HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
+ HValue* vector_val = Add<HConstant>(vector);
+
+ HValue* op_vals[] = {context(), function, index_val, vector_val};
+
+ Callable callable = CodeFactory::CallICInOptimizedCode(
+ isolate(), arity, convert_mode, tail_call_mode);
+ HConstant* stub = Add<HConstant>(callable.code());
+
+ return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals)),
+ syntactic_tail_call_mode);
+}
+
+HInstruction* HOptimizedGraphBuilder::NewCallConstantFunction(
+ Handle<JSFunction> function, int argument_count,
+ TailCallMode syntactic_tail_call_mode, TailCallMode tail_call_mode) {
+ HValue* target = Add<HConstant>(function);
+ return New<HInvokeFunction>(target, function, argument_count,
+ syntactic_tail_call_mode, tail_call_mode);
}
@@ -8126,6 +8101,10 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
bool handled_string = false;
int ordered_functions = 0;
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
int i;
for (i = 0; i < maps->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
@@ -8230,14 +8209,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (HasStackOverflow()) return;
} else {
// Since HWrapReceiver currently cannot actually wrap numbers and strings,
- // use the regular CallFunctionStub for method calls to wrap the receiver.
+ // use the regular call builtin for method calls to wrap the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
HInstruction* call =
- needs_wrapping ? NewUncasted<HCallFunction>(
- function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined)
- : BuildCallConstantFunction(target, argument_count);
+ needs_wrapping
+ ? NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode)
+ : NewCallConstantFunction(target, argument_count,
+ syntactic_tail_call_mode,
+ tail_call_mode);
PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
Drop(1); // Drop the function.
@@ -8266,8 +8248,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
environment()->SetExpressionStackAt(0, receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- HInstruction* call = New<HCallFunction>(
- function, argument_count, ConvertReceiverMode::kNotNullOrUndefined);
+ HInstruction* call = NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
PushArgumentsFromEnvironment(argument_count);
@@ -8295,17 +8278,19 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
}
}
-
void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
Handle<JSFunction> caller,
- const char* reason) {
+ const char* reason,
+ TailCallMode tail_call_mode) {
if (FLAG_trace_inlining) {
base::SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
base::SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
- PrintF("Inlined %s called from %s.\n", target_name.get(),
+ const char* call_mode =
+ tail_call_mode == TailCallMode::kAllow ? "tail called" : "called";
+ PrintF("Inlined %s %s from %s.\n", target_name.get(), call_mode,
caller_name.get());
} else {
PrintF("Did not inline %s called from %s (%s).\n",
@@ -8362,12 +8347,12 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
return nodes_added;
}
-
bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id, BailoutId return_id,
- InliningKind inlining_kind) {
+ InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode) {
if (target->context()->native_context() !=
top_info()->closure()->context()->native_context()) {
return false;
@@ -8376,7 +8361,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
if (nodes_added == kNotInlinable) return false;
Handle<JSFunction> caller = current_info()->closure();
-
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
@@ -8498,15 +8482,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
}
- // In strong mode it is an error to call a function with too few arguments.
- // In that case do not inline because then the arity check would be skipped.
- if (is_strong(function->language_mode()) &&
- arguments_count < function->parameter_count()) {
- TraceInline(target, caller,
- "too few arguments passed to a strong function");
- return false;
- }
-
// Generate the deoptimization data for the unoptimized version of
// the target function if we don't already have it.
if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
@@ -8537,17 +8512,15 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
- FunctionState* target_state =
- new FunctionState(this, &target_info, inlining_kind, inlining_id);
+ FunctionState* target_state = new FunctionState(
+ this, &target_info, inlining_kind, inlining_id,
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode));
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner_env =
- environment()->CopyForInlining(target,
- arguments_count,
- function,
- undefined,
- function_state()->inlining_kind());
+ HEnvironment* inner_env = environment()->CopyForInlining(
+ target, arguments_count, function, undefined,
+ function_state()->inlining_kind(), syntactic_tail_call_mode);
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
@@ -8577,10 +8550,10 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
current_block()->UpdateEnvironment(inner_env);
Scope* saved_scope = scope();
set_scope(target_info.scope());
- HEnterInlined* enter_inlined =
- Add<HEnterInlined>(return_id, target, context, arguments_count, function,
- function_state()->inlining_kind(),
- function->scope()->arguments(), arguments_object);
+ HEnterInlined* enter_inlined = Add<HEnterInlined>(
+ return_id, target, context, arguments_count, function,
+ function_state()->inlining_kind(), function->scope()->arguments(),
+ arguments_object, syntactic_tail_call_mode);
if (top_info()->is_tracking_positions()) {
enter_inlined->set_inlining_id(inlining_id);
}
@@ -8608,7 +8581,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
- TraceInline(target, caller, NULL);
+ TraceInline(target, caller, NULL, syntactic_tail_call_mode);
if (current_block() != NULL) {
FunctionState* state = function_state();
@@ -8692,7 +8665,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
return TryInline(expr->target(), expr->arguments()->length(), NULL,
- expr->id(), expr->ReturnId(), NORMAL_RETURN);
+ expr->id(), expr->ReturnId(), NORMAL_RETURN,
+ expr->tail_call_mode());
}
@@ -8700,7 +8674,7 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
HValue* implicit_return_value) {
return TryInline(expr->target(), expr->arguments()->length(),
implicit_return_value, expr->id(), expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
+ CONSTRUCT_CALL_RETURN, TailCallMode::kDisallow);
}
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
@@ -8710,7 +8684,7 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
return getter->IsJSFunction() &&
TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
- GETTER_CALL_RETURN);
+ GETTER_CALL_RETURN, TailCallMode::kDisallow);
}
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
@@ -8721,7 +8695,8 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
if (TryInlineApiSetter(setter, receiver_map, id)) return true;
return setter->IsJSFunction() &&
TryInline(Handle<JSFunction>::cast(setter), 1, implicit_return_value,
- id, assignment_id, SETTER_CALL_RETURN);
+ id, assignment_id, SETTER_CALL_RETURN,
+ TailCallMode::kDisallow);
}
@@ -8729,13 +8704,15 @@ bool HOptimizedGraphBuilder::TryInlineIndirectCall(Handle<JSFunction> function,
Call* expr,
int arguments_count) {
return TryInline(function, arguments_count, NULL, expr->id(),
- expr->ReturnId(), NORMAL_RETURN);
+ expr->ReturnId(), NORMAL_RETURN, expr->tail_call_mode());
}
bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
+ // We intentionally ignore expr->tail_call_mode() here because builtins
+ // we inline here do not observe if they were tail called or not.
switch (id) {
case kMathExp:
if (!FLAG_fast_math) break;
@@ -8819,6 +8796,25 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
// Try to inline calls like Math.* as operations in the calling function.
switch (id) {
+ case kObjectHasOwnProperty: {
+ // It's not safe to look through the phi for elements if we're compiling
+ // for osr.
+ if (top_info()->is_osr()) return false;
+ if (argument_count != 2) return false;
+ HValue* key = Top();
+ if (!key->IsLoadKeyed()) return false;
+ HValue* elements = HLoadKeyed::cast(key)->elements();
+ if (!elements->IsPhi() || elements->OperandCount() != 1) return false;
+ if (!elements->OperandAt(0)->IsForInCacheArray()) return false;
+ HForInCacheArray* cache = HForInCacheArray::cast(elements->OperandAt(0));
+ HValue* receiver = environment()->ExpressionStackAt(1);
+ if (!receiver->IsPhi() || receiver->OperandCount() != 1) return false;
+ if (cache->enumerable() != receiver->OperandAt(0)) return false;
+ Drop(3); // key, receiver, function
+ Add<HCheckMapValue>(receiver, cache->map());
+ ast_context()->ReturnValue(graph()->GetConstantTrue());
+ return true;
+ }
case kStringCharCodeAt:
case kStringCharAt:
if (argument_count == 2) {
@@ -8841,6 +8837,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
+ argument = AddUncasted<HForceRepresentation>(
+ argument, Representation::Integer32());
+ argument->SetFlag(HValue::kTruncatingToInt32);
HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -9053,7 +9052,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Drop(args_count_no_receiver);
HValue* receiver = Pop();
- HValue* function = Pop();
+ Drop(1); // Function.
HValue* result;
{
@@ -9129,7 +9128,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if_inline.Else();
{
Add<HPushArguments>(receiver);
- result = Add<HCallJSFunction>(function, 1);
+ result = AddInstruction(NewCallConstantFunction(
+ function, 1, TailCallMode::kDisallow, TailCallMode::kDisallow));
if (!ast_context()->IsEffect()) Push(result);
}
if_inline.End();
@@ -9193,12 +9193,8 @@ bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
SmallMapList receiver_maps;
- return TryInlineApiCall(function,
- receiver,
- &receiver_maps,
- argc,
- expr->id(),
- kCallApiFunction);
+ return TryInlineApiCall(function, receiver, &receiver_maps, argc, expr->id(),
+ kCallApiFunction, expr->tail_call_mode());
}
@@ -9208,12 +9204,8 @@ bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
SmallMapList* receiver_maps) {
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
- return TryInlineApiCall(function,
- receiver,
- receiver_maps,
- argc,
- expr->id(),
- kCallApiMethod);
+ return TryInlineApiCall(function, receiver, receiver_maps, argc, expr->id(),
+ kCallApiMethod, expr->tail_call_mode());
}
bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
@@ -9223,10 +9215,8 @@ bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
NULL, // Receiver is on expression stack.
- &receiver_maps,
- 0,
- ast_id,
- kCallApiGetter);
+ &receiver_maps, 0, ast_id, kCallApiGetter,
+ TailCallMode::kDisallow);
}
bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
@@ -9236,22 +9226,23 @@ bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
NULL, // Receiver is on expression stack.
- &receiver_maps,
- 1,
- ast_id,
- kCallApiSetter);
+ &receiver_maps, 1, ast_id, kCallApiSetter,
+ TailCallMode::kDisallow);
}
-bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
- HValue* receiver,
- SmallMapList* receiver_maps,
- int argc, BailoutId ast_id,
- ApiCallType call_type) {
+bool HOptimizedGraphBuilder::TryInlineApiCall(
+ Handle<Object> function, HValue* receiver, SmallMapList* receiver_maps,
+ int argc, BailoutId ast_id, ApiCallType call_type,
+ TailCallMode syntactic_tail_call_mode) {
if (function->IsJSFunction() &&
Handle<JSFunction>::cast(function)->context()->native_context() !=
top_info()->closure()->context()->native_context()) {
return false;
}
+ if (argc > CallApiCallbackStub::kArgMax) {
+ return false;
+ }
+
CallOptimization optimization(function);
if (!optimization.is_simple_api_call()) return false;
Handle<Map> holder_map;
@@ -9347,33 +9338,24 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
api_function_address, nullptr};
HInstruction* call = nullptr;
+ CHECK(argc <= CallApiCallbackStub::kArgMax);
if (!is_function) {
- CallApiAccessorStub stub(isolate(), is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate(), is_store, call_data_undefined,
!optimization.is_constant_call());
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
- ApiAccessorDescriptor descriptor(isolate());
- call = New<HCallWithDescriptor>(
- code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
- } else if (argc <= CallApiFunctionWithFixedArgsStub::kMaxFixedArgs) {
- CallApiFunctionWithFixedArgsStub stub(isolate(), argc, call_data_undefined);
- Handle<Code> code = stub.GetCode();
- HConstant* code_value = Add<HConstant>(code);
- ApiFunctionWithFixedArgsDescriptor descriptor(isolate());
call = New<HCallWithDescriptor>(
- code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
- Drop(1); // Drop function.
+ code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
+ syntactic_tail_call_mode);
} else {
- op_vals[arraysize(op_vals) - 1] = Add<HConstant>(argc);
- CallApiFunctionStub stub(isolate(), call_data_undefined);
+ CallApiCallbackStub stub(isolate(), argc, call_data_undefined);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
- ApiFunctionDescriptor descriptor(isolate());
- call =
- New<HCallWithDescriptor>(code_value, argc + 1, descriptor,
- Vector<HValue*>(op_vals, arraysize(op_vals)));
+ call = New<HCallWithDescriptor>(
+ code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
+ syntactic_tail_call_mode);
Drop(1); // Drop function.
}
@@ -9405,9 +9387,14 @@ void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
}
}
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
PushArgumentsFromEnvironment(arguments_count);
HInvokeFunction* call =
- New<HInvokeFunction>(function, known_function, arguments_count);
+ New<HInvokeFunction>(function, known_function, arguments_count,
+ syntactic_tail_call_mode, tail_call_mode);
Drop(1); // Function
ast_context()->ReturnInstruction(call, expr->id());
}
@@ -9459,13 +9446,15 @@ void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
HValue* checked_function = AddCheckMap(function, function_map);
if (function_state()->outer() == NULL) {
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
- HInstruction* result = New<HApplyArguments>(function,
- wrapped_receiver,
- length,
- elements);
+ HInstruction* result = New<HApplyArguments>(
+ function, wrapped_receiver, length, elements, tail_call_mode);
ast_context()->ReturnInstruction(result, expr->id());
} else {
// We are inside inlined function and we know exactly what is inside
@@ -9733,9 +9722,6 @@ bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
void HOptimizedGraphBuilder::VisitCall(Call* expr) {
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- return Bailout(kTailCall);
- }
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
@@ -9744,6 +9730,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
HInstruction* call = NULL;
+ TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
+ TailCallMode tail_call_mode =
+ function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
+
Property* prop = callee->AsProperty();
if (prop != NULL) {
CHECK_ALIVE(VisitForValue(prop->obj()));
@@ -9797,16 +9787,19 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// Wrap the receiver if necessary.
if (NeedsWrapping(maps->first(), known_function)) {
// Since HWrapReceiver currently cannot actually wrap numbers and
- // strings, use the regular CallFunctionStub for method calls to wrap
+ // strings, use the regular call builtin for method calls to wrap
// the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
- call = New<HCallFunction>(function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined);
+ call = NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
} else if (TryInlineCall(expr)) {
return;
} else {
- call = BuildCallConstantFunction(known_function, argument_count);
+ call =
+ NewCallConstantFunction(known_function, argument_count,
+ syntactic_tail_call_mode, tail_call_mode);
}
} else {
@@ -9825,8 +9818,9 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
- call = New<HCallFunction>(function, argument_count,
- ConvertReceiverMode::kNotNullOrUndefined);
+ call = NewCallFunction(function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNotNullOrUndefined,
+ tail_call_mode);
}
PushArgumentsFromEnvironment(argument_count);
@@ -9873,20 +9867,22 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (TryInlineCall(expr)) return;
PushArgumentsFromEnvironment(argument_count);
- call = BuildCallConstantFunction(expr->target(), argument_count);
+ call = NewCallConstantFunction(expr->target(), argument_count,
+ syntactic_tail_call_mode, tail_call_mode);
} else {
PushArgumentsFromEnvironment(argument_count);
- HCallFunction* call_function = New<HCallFunction>(
- function, argument_count, ConvertReceiverMode::kNullOrUndefined);
- call = call_function;
if (expr->is_uninitialized() &&
expr->IsUsingCallFeedbackICSlot(isolate())) {
// We've never seen this call before, so let's have Crankshaft learn
// through the type vector.
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
- call_function->SetVectorAndSlot(vector, slot);
+ call = NewCallFunctionViaIC(function, argument_count,
+ syntactic_tail_call_mode,
+ ConvertReceiverMode::kNullOrUndefined,
+ tail_call_mode, expr->CallFeedbackICSlot());
+ } else {
+ call = NewCallFunction(
+ function, argument_count, syntactic_tail_call_mode,
+ ConvertReceiverMode::kNullOrUndefined, tail_call_mode);
}
}
}
@@ -10509,7 +10505,29 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
if (expr->is_jsruntime()) {
- return Bailout(kCallToAJavaScriptRuntimeFunction);
+ // Crankshaft always specializes to the native context, so we can just grab
+ // the constant function from the current native context and embed that into
+ // the code object.
+ Handle<JSFunction> known_function(
+ JSFunction::cast(
+ current_info()->native_context()->get(expr->context_index())),
+ isolate());
+
+ // The callee and the receiver both have to be pushed onto the operand stack
+ // before arguments are being evaluated.
+ HConstant* function = Add<HConstant>(known_function);
+ HValue* receiver = ImplicitReceiverFor(function, known_function);
+ Push(function);
+ Push(receiver);
+
+ int argument_count = expr->arguments()->length() + 1; // Count receiver.
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
+ HInstruction* call = NewCallConstantFunction(known_function, argument_count,
+ TailCallMode::kDisallow,
+ TailCallMode::kDisallow);
+ Drop(1); // Function
+ return ast_context()->ReturnInstruction(call, expr->id());
}
const Runtime::Function* function = expr->function();
@@ -10661,7 +10679,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
rep = Representation::Smi();
}
- if (returns_original_input && !is_strong(function_language_mode())) {
+ if (returns_original_input) {
// We need an explicit HValue representing ToNumber(input). The
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
@@ -10686,11 +10704,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
add->set_observed_input_representation(1, rep);
add->set_observed_input_representation(2, Representation::Smi());
}
- if (!is_strong(function_language_mode())) {
- instr->ClearAllSideEffects();
- } else {
- Add<HSimulate>(expr->ToNumberId(), REMOVABLE_SIMULATE);
- }
+ instr->ClearAllSideEffects();
instr->SetFlag(HInstruction::kCannotBeTagged);
return instr;
}
@@ -11331,12 +11345,10 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// Translate right subexpression by visiting it in the same AST
// context as the entire expression.
- if (eval_right->HasPredecessor()) {
- eval_right->SetJoinId(expr->RightId());
- set_current_block(eval_right);
- Visit(expr->right());
- }
-
+ CHECK(eval_right->HasPredecessor());
+ eval_right->SetJoinId(expr->RightId());
+ set_current_block(eval_right);
+ Visit(expr->right());
} else if (ast_context()->IsValue()) {
CHECK_ALIVE(VisitForValue(expr->left()));
DCHECK(current_block() != NULL);
@@ -11358,7 +11370,7 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- ToBooleanStub::Types expected(expr->left()->to_boolean_types());
+ ToBooleanICStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
? New<HBranch>(left_value, expected, eval_right, empty_block)
: New<HBranch>(left_value, expected, empty_block, eval_right);
@@ -11392,20 +11404,22 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// second one is not a merge node, and that we really have no good AST ID to
// put on that first HSimulate.
- if (empty_block->HasPredecessor()) {
- empty_block->SetJoinId(expr->id());
- } else {
- empty_block = NULL;
- }
+ // Technically, we should be able to handle the case when one side of
+ // the test is not connected, but this can trip up liveness analysis
+ // if we did not fully connect the test context based on some optimistic
+ // assumption. If such an assumption was violated, we would end up with
+ // an environment with optimized-out values. So we should always
+ // conservatively connect the test context.
- if (right_block->HasPredecessor()) {
- right_block->SetJoinId(expr->RightId());
- set_current_block(right_block);
- CHECK_BAILOUT(VisitForEffect(expr->right()));
- right_block = current_block();
- } else {
- right_block = NULL;
- }
+ CHECK(right_block->HasPredecessor());
+ CHECK(empty_block->HasPredecessor());
+
+ empty_block->SetJoinId(expr->id());
+
+ right_block->SetJoinId(expr->RightId());
+ set_current_block(right_block);
+ CHECK_BAILOUT(VisitForEffect(expr->right()));
+ right_block = current_block();
HBasicBlock* join_block =
CreateJoin(empty_block, right_block, expr->id());
@@ -11474,7 +11488,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
return HandleLiteralCompareTypeof(expr, sub_expr, check);
}
- if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
@@ -11510,6 +11524,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
if (op == Token::INSTANCEOF) {
+ DCHECK(!FLAG_harmony_instanceof);
// Check to see if the rhs of the instanceof is a known function.
if (right->IsConstant() &&
HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
@@ -11699,6 +11714,20 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
New<HCompareNumericAndBranch>(left, right, op);
return result;
} else {
+ if (op == Token::EQ) {
+ if (left->IsConstant() &&
+ HConstant::cast(left)->GetInstanceType() == ODDBALL_TYPE &&
+ HConstant::cast(left)->IsUndetectable()) {
+ return New<HIsUndetectableAndBranch>(right);
+ }
+
+ if (right->IsConstant() &&
+ HConstant::cast(right)->GetInstanceType() == ODDBALL_TYPE &&
+ HConstant::cast(right)->IsUndetectable()) {
+ return New<HIsUndetectableAndBranch>(left);
+ }
+ }
+
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
@@ -11738,22 +11767,17 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
+ HControlInstruction* instr;
if (expr->op() == Token::EQ_STRICT) {
HConstant* nil_constant = nil == kNullValue
? graph()->GetConstantNull()
: graph()->GetConstantUndefined();
- HCompareObjectEqAndBranch* instr =
- New<HCompareObjectEqAndBranch>(value, nil_constant);
- return ast_context()->ReturnControl(instr, expr->id());
+ instr = New<HCompareObjectEqAndBranch>(value, nil_constant);
} else {
DCHECK_EQ(Token::EQ, expr->op());
- Type* type = expr->combined_type()->Is(Type::None())
- ? Type::Any()
- : expr->combined_type();
- HIfContinuation continuation;
- BuildCompareNil(value, type, &continuation);
- return ast_context()->ReturnContinuation(&continuation, expr->id());
+ instr = New<HIsUndetectableAndBranch>(value);
}
+ return ast_context()->ReturnControl(instr, expr->id());
}
@@ -12268,22 +12292,13 @@ void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
if (input->type().IsSmi()) {
return ast_context()->ReturnValue(input);
} else {
- IfBuilder if_inputissmi(this);
- if_inputissmi.If<HIsSmiAndBranch>(input);
- if_inputissmi.Then();
- {
- // Return the input value.
- Push(input);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_inputissmi.Else();
- {
- Add<HPushArguments>(input);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToInteger), 1));
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_inputissmi.End();
- return ast_context()->ReturnValue(Pop());
+ Callable callable = CodeFactory::ToInteger(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
}
}
@@ -12528,6 +12543,18 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
return ast_context()->ReturnInstruction(result, call->id());
}
+// Support for direct creation of new objects.
+void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
+ DCHECK_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ FastNewObjectStub stub(isolate());
+ FastNewObjectDescriptor descriptor(isolate());
+ HValue* values[] = {context(), Pop(), Pop()};
+ HConstant* stub_value = Add<HConstant>(stub.GetCode());
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+}
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
@@ -12621,6 +12648,45 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
DCHECK_LE(2, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
+
+ // Try and customize ES6 instanceof here.
+ // We should at least have the constructor on the expression stack.
+ if (FLAG_harmony_instanceof && FLAG_harmony_instanceof_opt &&
+ call->arguments()->length() == 3) {
+ HValue* target = environment()->ExpressionStackAt(2);
+ if (target->IsConstant()) {
+ HConstant* constant_function = HConstant::cast(target);
+ if (constant_function->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> func =
+ Handle<JSFunction>::cast(constant_function->handle(isolate()));
+ if (*func == isolate()->native_context()->ordinary_has_instance()) {
+ // Look at the function, which will be argument 1.
+ HValue* right = environment()->ExpressionStackAt(1);
+ if (right->IsConstant() &&
+ HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
+ Handle<JSFunction> constructor = Handle<JSFunction>::cast(
+ HConstant::cast(right)->handle(isolate()));
+ if (constructor->IsConstructor() &&
+ !constructor->map()->has_non_instance_prototype()) {
+ JSFunction::EnsureHasInitialMap(constructor);
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ top_info()->dependencies()->AssumeInitialMapCantChange(
+ initial_map);
+ HInstruction* prototype =
+ Add<HConstant>(handle(initial_map->prototype(), isolate()));
+ HValue* left = environment()->ExpressionStackAt(0);
+ HHasInPrototypeChainAndBranch* result =
+ New<HHasInPrototypeChainAndBranch>(left, prototype);
+ Drop(3);
+ return ast_context()->ReturnControl(result, call->id());
+ }
+ }
+ }
+ }
+ }
+ }
+
CallTrampolineDescriptor descriptor(isolate());
PushArgumentsFromEnvironment(call->arguments()->length() - 1);
HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
@@ -12646,24 +12712,6 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathClz32);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12673,15 +12721,6 @@ void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12888,6 +12927,12 @@ void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
return ast_context()->ReturnValue(value);
}
+void HOptimizedGraphBuilder::GenerateGetOrdinaryHasInstance(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 0);
+ // ordinary_has_instance is immutable so we can treat it as a constant.
+ HValue* value = Add<HConstant>(isolate()->ordinary_has_instance());
+ return ast_context()->ReturnValue(value);
+}
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -13121,14 +13166,21 @@ HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
return new_env;
}
+void HEnvironment::MarkAsTailCaller() {
+ DCHECK_EQ(JS_FUNCTION, frame_type());
+ frame_type_ = TAIL_CALLER_FUNCTION;
+}
+
+void HEnvironment::ClearTailCallerMark() {
+ DCHECK_EQ(TAIL_CALLER_FUNCTION, frame_type());
+ frame_type_ = JS_FUNCTION;
+}
HEnvironment* HEnvironment::CopyForInlining(
- Handle<JSFunction> target,
- int arguments,
- FunctionLiteral* function,
- HConstant* undefined,
- InliningKind inlining_kind) const {
- DCHECK(frame_type() == JS_FUNCTION);
+ Handle<JSFunction> target, int arguments, FunctionLiteral* function,
+ HConstant* undefined, InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode) const {
+ DCHECK_EQ(JS_FUNCTION, frame_type());
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
@@ -13137,6 +13189,11 @@ HEnvironment* HEnvironment::CopyForInlining(
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
+ if (syntactic_tail_call_mode == TailCallMode::kAllow) {
+ DCHECK_EQ(NORMAL_RETURN, inlining_kind);
+ outer->MarkAsTailCaller();
+ }
+
if (inlining_kind == CONSTRUCT_CALL_RETURN) {
// Create artificial constructor stub environment. The receiver should
// actually be the constructor function, but we pass the newly allocated
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index ce0d0df6aa..10c0baa29d 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -11,6 +11,7 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/compiler.h"
+#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
@@ -293,8 +294,6 @@ class HLoopInformation final : public ZoneObject {
};
-class BoundsCheckTable;
-class InductionVariableBlocksTable;
class HGraph final : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
@@ -333,6 +332,7 @@ class HGraph final : public ZoneObject {
HConstant* GetConstantBool(bool value);
HConstant* GetConstantHole();
HConstant* GetConstantNull();
+ HConstant* GetConstantOptimizedOut();
HConstant* GetInvalidContext();
bool IsConstantUndefined(HConstant* constant);
@@ -400,9 +400,6 @@ class HGraph final : public ZoneObject {
use_optimistic_licm_ = value;
}
- void MarkRecursive() { is_recursive_ = true; }
- bool is_recursive() const { return is_recursive_; }
-
void MarkDependsOnEmptyArrayProtoElements() {
// Add map dependency if not already added.
if (depends_on_empty_array_proto_elements_) return;
@@ -474,6 +471,7 @@ class HGraph final : public ZoneObject {
SetOncePointer<HConstant> constant_false_;
SetOncePointer<HConstant> constant_the_hole_;
SetOncePointer<HConstant> constant_null_;
+ SetOncePointer<HConstant> constant_optimized_out_;
SetOncePointer<HConstant> constant_invalid_context_;
HOsrBuilder* osr_;
@@ -482,7 +480,6 @@ class HGraph final : public ZoneObject {
CallInterfaceDescriptor descriptor_;
Zone* zone_;
- bool is_recursive_;
bool use_optimistic_licm_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
@@ -504,10 +501,10 @@ enum FrameType {
JS_GETTER,
JS_SETTER,
ARGUMENTS_ADAPTOR,
+ TAIL_CALLER_FUNCTION,
STUB
};
-
class HEnvironment final : public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
@@ -616,16 +613,21 @@ class HEnvironment final : public ZoneObject {
// Create an "inlined version" of this environment, where the original
// environment is the outer environment but the top expression stack
// elements are moved to an inner environment as parameters.
- HEnvironment* CopyForInlining(Handle<JSFunction> target,
- int arguments,
- FunctionLiteral* function,
- HConstant* undefined,
- InliningKind inlining_kind) const;
+ HEnvironment* CopyForInlining(Handle<JSFunction> target, int arguments,
+ FunctionLiteral* function, HConstant* undefined,
+ InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode) const;
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
- while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
+ while (outer->frame_type() != JS_FUNCTION &&
+ outer->frame_type() != TAIL_CALLER_FUNCTION) {
+ outer = outer->outer_;
+ }
if (drop_extra) outer->Drop(1);
+ if (outer->frame_type() == TAIL_CALLER_FUNCTION) {
+ outer->ClearTailCallerMark();
+ }
return outer;
}
@@ -683,6 +685,11 @@ class HEnvironment final : public ZoneObject {
FrameType frame_type,
int arguments) const;
+ // Marks current environment as tail caller by setting frame type to
+ // TAIL_CALLER_FUNCTION.
+ void MarkAsTailCaller();
+ void ClearTailCallerMark();
+
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
@@ -852,10 +859,9 @@ class TestContext final : public AstContext {
class FunctionState final {
public:
- FunctionState(HOptimizedGraphBuilder* owner,
- CompilationInfo* info,
- InliningKind inlining_kind,
- int inlining_id);
+ FunctionState(HOptimizedGraphBuilder* owner, CompilationInfo* info,
+ InliningKind inlining_kind, int inlining_id,
+ TailCallMode tail_call_mode);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -870,6 +876,11 @@ class FunctionState final {
FunctionState* outer() { return outer_; }
+ TailCallMode ComputeTailCallMode(TailCallMode tail_call_mode) const {
+ if (tail_call_mode_ == TailCallMode::kDisallow) return tail_call_mode_;
+ return tail_call_mode;
+ }
+
HEnterInlined* entry() { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
@@ -887,6 +898,10 @@ class FunctionState final {
int inlining_id() const { return inlining_id_; }
+ void IncrementInDoExpressionScope() { do_expression_scope_count_++; }
+ void DecrementInDoExpressionScope() { do_expression_scope_count_--; }
+ bool IsInsideDoExpressionScope() { return do_expression_scope_count_ > 0; }
+
private:
HOptimizedGraphBuilder* owner_;
@@ -899,6 +914,10 @@ class FunctionState final {
// The kind of call which is currently being inlined.
InliningKind inlining_kind_;
+ // Defines whether the calls with TailCallMode::kAllow in the function body
+ // can be generated as tail calls.
+ TailCallMode tail_call_mode_;
+
// When inlining in an effect or value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
// pair of return blocks in the context. When not inlining, there is no
@@ -919,6 +938,8 @@ class FunctionState final {
int inlining_id_;
SourcePosition outer_source_position_;
+ int do_expression_scope_count_;
+
FunctionState* outer_;
};
@@ -1267,6 +1288,26 @@ class HGraphBuilder {
return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
}
+ template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+ class P7, class P8, class P9>
+ I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
+ return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8,
+ p9);
+ }
+
+ template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+ class P7, class P8, class P9>
+ HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7,
+ P8 p8, P9 p9) {
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p8));
+ }
+
+ template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
+ class P7, class P8, class P9>
+ I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
+ }
+
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
// When initializing arrays, we'll unfold the loop if the number of elements
@@ -1837,11 +1878,6 @@ class HGraphBuilder {
HValue* BuildElementIndexHash(HValue* index);
- enum MapEmbedding { kEmbedMapsDirectly, kEmbedMapsViaWeakCells };
-
- void BuildCompareNil(HValue* value, Type* type, HIfContinuation* continuation,
- MapEmbedding map_embedding = kEmbedMapsDirectly);
-
void BuildCreateAllocationMemento(HValue* previous_object,
HValue* previous_object_size,
HValue* payload);
@@ -2198,6 +2234,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
+ F(NewObject) \
F(ValueOf) \
F(StringCharFromCode) \
F(StringCharAt) \
@@ -2222,6 +2259,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
+ F(GetOrdinaryHasInstance) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
F(MaxSmi) \
@@ -2235,9 +2273,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(ConstructDouble) \
F(DoubleHi) \
F(DoubleLo) \
- F(MathClz32) \
- F(MathFloor) \
- F(MathSqrt) \
F(MathLogRT) \
/* ES6 Collections */ \
F(MapClear) \
@@ -2404,7 +2439,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(Handle<JSFunction> target, int arguments_count,
HValue* implicit_return_value, BailoutId ast_id,
- BailoutId return_id, InliningKind inlining_kind);
+ BailoutId return_id, InliningKind inlining_kind,
+ TailCallMode syntactic_tail_call_mode);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2435,16 +2471,17 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId ast_id);
bool TryInlineApiCall(Handle<Object> function, HValue* receiver,
SmallMapList* receiver_maps, int argc, BailoutId ast_id,
- ApiCallType call_type);
+ ApiCallType call_type,
+ TailCallMode syntactic_tail_call_mode);
static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
// If --trace-inlining, print a line of the inlining trace. Inlining
// succeeded if the reason string is NULL and failed if there is a
// non-NULL reason string.
- void TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* failure_reason);
+ void TraceInline(Handle<JSFunction> target, Handle<JSFunction> caller,
+ const char* failure_reason,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
void HandleGlobalVariableAssignment(Variable* var, HValue* value,
FeedbackVectorSlot slot,
@@ -2826,14 +2863,23 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
- HInstruction* NewPlainFunctionCall(HValue* fun, int argument_count);
+ void BuildEnsureCallable(HValue* object);
+
+ HInstruction* NewCallFunction(HValue* function, int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode);
- HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
- int argument_count,
- HValue* expected_param_count);
+ HInstruction* NewCallFunctionViaIC(HValue* function, int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode,
+ FeedbackVectorSlot slot);
- HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
- int argument_count);
+ HInstruction* NewCallConstantFunction(Handle<JSFunction> target,
+ int argument_count,
+ TailCallMode syntactic_tail_call_mode,
+ TailCallMode tail_call_mode);
bool CanBeFunctionApplyArguments(Call* expr);
@@ -3032,6 +3078,19 @@ class NoObservableSideEffectsScope final {
HGraphBuilder* builder_;
};
+class DoExpressionScope final {
+ public:
+ explicit DoExpressionScope(HOptimizedGraphBuilder* builder)
+ : builder_(builder) {
+ builder_->function_state()->IncrementInDoExpressionScope();
+ }
+ ~DoExpressionScope() {
+ builder_->function_state()->DecrementInDoExpressionScope();
+ }
+
+ private:
+ HOptimizedGraphBuilder* builder_;
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 239db8ba13..d8b20c87a7 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -57,13 +57,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
- dynamic_frame_alignment_ = info()->IsOptimizing() &&
- ((chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone());
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -130,31 +123,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -162,61 +130,29 @@ bool LCodeGen::GeneratePrologue() {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
}
- if (info()->IsOptimizing() &&
- dynamic_frame_alignment_ &&
- FLAG_debug_code) {
- __ test(esp, Immediate(kPointerSize));
- __ Assert(zero, kFrameIsExpectedToBeAligned);
- }
-
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
DCHECK(slots != 0 || !info()->IsOptimizing());
if (slots > 0) {
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
+ MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0),
- Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- }
-
- if (support_aligned_spilled_doubles_) {
- Comment(";;; Store dynamic frame alignment tag for spilled doubles");
- // Store dynamic frame alignment state in the first local.
- int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp, offset), edx);
- } else {
- __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
- }
- }
+ if (FLAG_debug_code) {
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
}
if (info()->saves_caller_doubles()) SaveCallerDoubles();
@@ -298,47 +234,11 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- // Align ebp + 4 to a multiple of 2 * kPointerSize.
- __ test(ebp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
- // Move all parts of the frame over one word. The frame consists of:
- // unoptimized frame slots, alignment state, context, frame pointer, return
- // address, receiver, and the arguments.
- __ mov(ecx, Immediate(scope()->num_parameters() +
- 5 + graph()->osr()->UnoptimizedFrameSlots()));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ sub(Operand(ebp), Immediate(kPointerSize));
- __ bind(&do_not_pad);
- }
-
- // Save the first local, which is overwritten by the alignment state.
- Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
- __ push(alignment_loc);
-
- // Set the dynamic frame alignment state.
- __ mov(alignment_loc, edx);
-
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 1);
- __ sub(esp, Immediate((slots - 1) * kPointerSize));
+ DCHECK(slots >= 0);
+ __ sub(esp, Immediate(slots * kPointerSize));
}
@@ -380,29 +280,24 @@ bool LCodeGen::GenerateJumpTable() {
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
/* stack layout
- 4: entry address
- 3: return address <-- esp
- 2: garbage
+ 3: entry address
+ 2: return address <-- esp
1: garbage
0: garbage
*/
- __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
- __ push(MemOperand(esp, kPointerSize)); // Copy return address.
- __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
+ __ push(MemOperand(esp, 0)); // Copy return address.
+ __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
/* stack layout
4: entry address
3: return address
- 2: garbage
1: return address
0: entry address <-- esp
*/
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
- // Copy context.
- __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
// Fill ebp with the right stack frame address.
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
@@ -411,8 +306,7 @@ bool LCodeGen::GenerateJumpTable() {
Immediate(Smi::FromInt(StackFrame::STUB)));
/* stack layout
- 4: old ebp
- 3: context pointer
+ 3: old ebp
2: stub marker
1: return address
0: entry address <-- esp
@@ -447,9 +341,8 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -1969,15 +1862,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, instr->TrueLabel(chunk_));
@@ -1985,13 +1879,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, factory()->false_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, instr->FalseLabel(chunk_));
@@ -2011,18 +1905,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, instr->FalseLabel(chunk_));
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2033,19 +1927,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2237,7 +2131,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
}
@@ -2267,11 +2161,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(edx));
DCHECK(ToRegister(instr->right()).is(eax));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ test(eax, eax);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(eax, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, equal);
}
@@ -2430,7 +2323,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2463,18 +2356,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ bind(&done);
}
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
- int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+void LCodeGen::EmitReturn(LReturn* instr) {
+ int extra_value_count = 1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
- if (dynamic_frame_alignment && FLAG_debug_code) {
- __ cmp(Operand(esp,
- (parameter_count + extra_value_count) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
@@ -2482,20 +2368,9 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
// The argument count parameter is a smi
__ SmiUntag(reg);
Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
- if (dynamic_frame_alignment && FLAG_debug_code) {
- DCHECK(extra_value_count == 2);
- __ cmp(Operand(esp, reg, times_pointer_size,
- extra_value_count * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
// emit code to restore stack based on instr->parameter_count()
__ pop(return_addr_reg); // save return address
- if (dynamic_frame_alignment) {
- __ inc(reg); // 1 more for alignment
- }
-
__ shl(reg, kPointerSizeLog2);
__ add(esp, reg);
__ jmp(return_addr_reg);
@@ -2514,25 +2389,12 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) RestoreCallerDoubles();
- if (dynamic_frame_alignment_) {
- // Fetch the state of the dynamic frame alignment.
- __ mov(edx, Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
- }
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
}
- if (dynamic_frame_alignment_) {
- Label no_padding;
- __ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding, Label::kNear);
-
- EmitReturn(instr, true);
- __ bind(&no_padding);
- }
- EmitReturn(instr, false);
+ EmitReturn(instr);
}
@@ -2942,11 +2804,12 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check for arguments adapter frame.
Label done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ mov(result,
+ Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(Operand(result),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
@@ -2962,6 +2825,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
+ } else {
+ __ mov(result, Operand(ebp));
}
}
@@ -3005,12 +2870,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &receiver_ok, dist);
}
@@ -3066,13 +2931,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(eax);
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3116,10 +2993,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3135,21 +3011,38 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function directly.
- if (function.is_identical_to(info()->closure())) {
- __ CallSelf();
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
- __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+ Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+ if (is_tail_call) {
+ __ jmp(target);
+ } else {
+ __ call(target);
+ }
+ }
+
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3191,35 +3084,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ mov(edx, factory()->undefined_value());
- __ mov(eax, instr->arity());
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3227,8 +3091,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+ uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
+ available_regs &= ~input_reg.bit();
+ if (instr->context()->IsRegister()) {
+ // Make sure that the context isn't overwritten in the AllocateHeapNumber
+ // macro below.
+ available_regs &= ~ToRegister(instr->context()).bit();
+ }
+
+ Register tmp =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+ available_regs &= ~tmp.bit();
+ Register tmp2 =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -3314,8 +3189,14 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
}
}
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ roundsd(output_reg, input_reg, kRoundDown);
+}
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3379,8 +3260,23 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
}
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ XMMRegister xmm_scratch = double_scratch0();
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ Label done;
+ __ roundsd(output_reg, input_reg, kRoundUp);
+ __ Move(xmm_scratch, -0.5);
+ __ addsd(xmm_scratch, output_reg);
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &done, Label::kNear);
+ __ Move(xmm_scratch, 1.0);
+ __ subsd(output_reg, xmm_scratch);
+ __ bind(&done);
+}
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
@@ -3570,54 +3466,78 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
}
-}
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(ebp, scratch2);
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count.
+ __ mov(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+ ReturnAddressState::kNotOnStack, 0);
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(edx));
- DCHECK(vector_register.is(ebx));
+ DCHECK(instr->HasPointerMap());
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
- __ mov(vector_register, vector);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(edi, no_reg, actual, flag, generator);
} else {
- __ Set(eax, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -4483,7 +4403,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, 1);
+ __ test_b(temp_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4705,7 +4625,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
- 1 << JSArrayBuffer::WasNeutered::kShift);
+ Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
}
@@ -4721,8 +4641,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType last;
instr->hydrogen()->GetCheckInterval(&first, &last);
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
// If there is only one type in the interval check for equality.
if (first == last) {
@@ -4731,8 +4650,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
@@ -4743,7 +4661,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
} else {
@@ -5027,13 +4945,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5100,7 +5011,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory()->function_string())) {
@@ -5121,7 +5032,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
// clang-format off
@@ -5382,13 +5293,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index 589ef2e05e..bc61c96339 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -29,8 +29,6 @@ class LCodeGen: public LCodeGenBase {
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
- dynamic_frame_alignment_(false),
- support_aligned_spilled_doubles_(false),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -193,11 +191,14 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -300,7 +301,7 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+ void EmitReturn(LReturn* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
@@ -319,8 +320,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
- bool dynamic_frame_alignment_;
- bool support_aligned_spilled_doubles_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index e2772d5ee3..4afeef5d68 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -267,27 +267,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -434,13 +413,6 @@ LPlatformChunk* LChunkBuilder::Build() {
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
- // Reserve the first spill slot for the state of dynamic alignment.
- if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- DCHECK_EQ(alignment_state_index, 4);
- USE(alignment_state_index);
- }
-
// If compiling for OSR, reserve space for the unoptimized frame,
// which will be subsumed into this frame.
if (graph()->has_osr()) {
@@ -618,11 +590,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -936,22 +904,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -964,15 +926,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1100,16 +1062,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1132,6 +1084,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1140,6 +1095,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1170,22 +1128,33 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
}
}
-
LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LMathFloorI* result = new (zone()) LMathFloorI(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathFloorD* result = new (zone()) LMathFloorD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathRoundD* result = new (zone()) LMathRoundD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathFround* result = new (zone()) LMathFround(input);
@@ -1253,22 +1222,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(edx);
- vector = FixedTemp(ebx);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
@@ -1834,13 +1787,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2507,11 +2453,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kNotEnoughSpillSlotsForOsr);
spill_index = 0;
}
- if (spill_index == 0) {
- // The dynamic frame alignment state overwrites the first local.
- // The first local is saved at the end of the unoptimized frame.
- spill_index = graph()->osr()->UnoptimizedFrameSlots();
- }
spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2551,13 +2492,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), ebx);
@@ -2595,11 +2529,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2660,13 +2592,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index e22ab437fc..68541a48c4 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -33,9 +33,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -105,12 +103,14 @@ class LCodeGen;
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
- V(MathFloor) \
+ V(MathFloorD) \
+ V(MathFloorI) \
V(MathFround) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
- V(MathRound) \
+ V(MathRoundD) \
+ V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -135,7 +135,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -147,7 +146,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -156,7 +154,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -228,6 +225,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -262,6 +266,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -539,6 +545,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
LOperand* elements() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
};
@@ -808,23 +815,43 @@ class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
void PrintDataTo(StringStream* stream) override;
};
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathFloor(LOperand* value) {
- inputs_[0] = value;
- }
+ explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* value, LOperand* temp) {
+ LMathRoundI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -832,7 +859,7 @@ class LMathRound final : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
@@ -1723,23 +1750,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1798,29 +1808,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- void PrintDataTo(StringStream* stream) override;
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2415,19 +2402,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2542,18 +2516,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/lithium-allocator-inl.h b/deps/v8/src/crankshaft/lithium-allocator-inl.h
index 22611b5efb..631af6024b 100644
--- a/deps/v8/src/crankshaft/lithium-allocator-inl.h
+++ b/deps/v8/src/crankshaft/lithium-allocator-inl.h
@@ -21,6 +21,8 @@
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
diff --git a/deps/v8/src/crankshaft/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
index 5d05292642..6155dc0f23 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.cc
+++ b/deps/v8/src/crankshaft/lithium-allocator.cc
@@ -510,9 +510,9 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
return LifetimePosition::Invalid();
}
-
LAllocator::LAllocator(int num_values, HGraph* graph)
- : chunk_(NULL),
+ : zone_(graph->isolate()->allocator()),
+ chunk_(NULL),
live_in_sets_(graph->blocks()->length(), zone()),
live_ranges_(num_values * 2, zone()),
fixed_live_ranges_(NULL),
@@ -529,7 +529,6 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
has_osr_entry_(false),
allocation_ok_(true) {}
-
void LAllocator::InitializeLivenessAnalysis() {
// Initialize the live_in sets for each block to NULL.
int block_count = graph_->blocks()->length();
diff --git a/deps/v8/src/crankshaft/lithium-allocator.h b/deps/v8/src/crankshaft/lithium-allocator.h
index 46289e0fbb..b648bd80c6 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.h
+++ b/deps/v8/src/crankshaft/lithium-allocator.h
@@ -6,6 +6,7 @@
#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#include "src/allocation.h"
+#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/lithium.h"
#include "src/zone.h"
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index c5b7e9c470..53fedcf1df 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -30,6 +30,9 @@
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
+#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -237,8 +240,8 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
break;
}
case JS_GETTER: {
- DCHECK(translation_size == 1);
- DCHECK(height == 0);
+ DCHECK_EQ(1, translation_size);
+ DCHECK_EQ(0, height);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
@@ -252,8 +255,8 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
break;
}
case JS_SETTER: {
- DCHECK(translation_size == 2);
- DCHECK(height == 0);
+ DCHECK_EQ(2, translation_size);
+ DCHECK_EQ(0, height);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
@@ -266,6 +269,20 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
}
break;
}
+ case TAIL_CALLER_FUNCTION: {
+ DCHECK_EQ(0, translation_size);
+ int shared_id = DefineDeoptimizationLiteral(
+ environment->entry() ? environment->entry()->shared()
+ : info()->shared_info());
+ translation->BeginTailCallerFrame(shared_id);
+ if (info()->closure().is_identical_to(environment->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->StoreLiteral(closure_id);
+ }
+ break;
+ }
case ARGUMENTS_ADAPTOR: {
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
diff --git a/deps/v8/src/crankshaft/lithium-inl.h b/deps/v8/src/crankshaft/lithium-inl.h
index 9044b4ca7a..938588e396 100644
--- a/deps/v8/src/crankshaft/lithium-inl.h
+++ b/deps/v8/src/crankshaft/lithium-inl.h
@@ -21,6 +21,8 @@
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index 677639095a..d34b04f5da 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -30,6 +30,9 @@
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
+#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -247,7 +250,9 @@ void LPointerMap::PrintTo(StringStream* stream) {
}
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : base_frame_slots_(StandardFrameConstants::kFixedFrameSize / kPointerSize),
+ : base_frame_slots_(info->IsStub()
+ ? TypedFrameConstants::kFixedSlotCount
+ : StandardFrameConstants::kFixedSlotCount),
current_frame_slots_(base_frame_slots_),
info_(info),
graph_(graph),
@@ -333,7 +338,6 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
}
}
-
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id(), zone());
}
@@ -461,7 +465,8 @@ Handle<Code> LChunk::Codegen() {
void* jit_handler_data =
assembler.positions_recorder()->DetachJITHandlerData();
LOG_CODE_EVENT(info()->isolate(),
- CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+ CodeEndLinePosInfoRecordEvent(AbstractCode::cast(*code),
+ jit_handler_data));
CodeGenerator::PrintCode(code, info());
DCHECK(!(info()->isolate()->serializer_enabled() &&
@@ -502,18 +507,94 @@ void LChunkBuilderBase::Retry(BailoutReason reason) {
status_ = ABORTED;
}
+void LChunkBuilderBase::CreateLazyBailoutForCall(HBasicBlock* current_block,
+ LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ if (!instr->IsCall()) return;
+
+ HEnvironment* hydrogen_env = current_block->last_environment();
+ HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+ DCHECK_NOT_NULL(hydrogen_env);
+ if (instr->IsSyntacticTailCall()) {
+ // If it was a syntactic tail call we need to drop the current frame and
+ // all the frames on top of it that are either an arguments adaptor frame
+ // or a tail caller frame.
+ hydrogen_env = hydrogen_env->outer();
+ while (hydrogen_env != nullptr &&
+ (hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR ||
+ hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION)) {
+ hydrogen_env = hydrogen_env->outer();
+ }
+ if (hydrogen_env != nullptr) {
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
+ // In case an outer frame is a function frame we have to replay
+ // environment manually because
+ // 1) it does not contain a result of inlined function yet,
+ // 2) we can't find the proper simulate that corresponds to the point
+ // after inlined call to do a ReplayEnvironment() on.
+ // So we push return value on top of outer environment.
+ // As for JS_GETTER/JS_SETTER/JS_CONSTRUCT nothing has to be done here,
+ // the deoptimizer ensures that the result of the callee is correctly
+ // propagated to result register during deoptimization.
+ hydrogen_env = hydrogen_env->Copy();
+ hydrogen_env->Push(hydrogen_val);
+ }
+ } else {
+ // Although we don't need this lazy bailout for normal execution
+ // (because when we tail call from the outermost function we should pop
+ // its frame) we still need it when debugger is on.
+ hydrogen_env = current_block->last_environment();
+ }
+ } else {
+ if (hydrogen_val->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+ sim->ReplayEnvironment(hydrogen_env);
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ }
+ LInstruction* bailout = LChunkBuilderBase::AssignEnvironment(
+ new (zone()) LLazyBailout(), hydrogen_env);
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block);
+}
+
+LInstruction* LChunkBuilderBase::AssignEnvironment(LInstruction* instr,
+ HEnvironment* hydrogen_env) {
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ DCHECK_NE(TAIL_CALLER_FUNCTION, hydrogen_env->frame_type());
+ instr->set_environment(CreateEnvironment(
+ hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+ return instr;
+}
LEnvironment* LChunkBuilderBase::CreateEnvironment(
HEnvironment* hydrogen_env, int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
+ BailoutId ast_id = hydrogen_env->ast_id();
+ DCHECK(!ast_id.IsNone() ||
+ (hydrogen_env->frame_type() != JS_FUNCTION &&
+ hydrogen_env->frame_type() != TAIL_CALLER_FUNCTION));
+
+ if (hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION) {
+ // Skip potential outer arguments adaptor frame.
+ HEnvironment* outer_hydrogen_env = hydrogen_env->outer();
+ if (outer_hydrogen_env != nullptr &&
+ outer_hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR) {
+ outer_hydrogen_env = outer_hydrogen_env->outer();
+ }
+ LEnvironment* outer = CreateEnvironment(
+ outer_hydrogen_env, argument_index_accumulator, objects_to_materialize);
+ return new (zone())
+ LEnvironment(hydrogen_env->closure(), hydrogen_env->frame_type(),
+ ast_id, 0, 0, 0, outer, hydrogen_env->entry(), zone());
+ }
+
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
objects_to_materialize);
- BailoutId ast_id = hydrogen_env->ast_id();
- DCHECK(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
? 0
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
index 5cfc0c358a..a2c028330b 100644
--- a/deps/v8/src/crankshaft/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -9,6 +9,7 @@
#include "src/allocation.h"
#include "src/bailout-reason.h"
+#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen.h"
#include "src/safepoint-table.h"
#include "src/zone-allocator.h"
@@ -744,6 +745,16 @@ class LChunkBuilderBase BASE_EMBEDDED {
// Will not be moved to a register even if one is freely available.
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
+ // Constructs proper environment for a lazy bailout point after call, creates
+ // LLazyBailout instruction and adds it to current block.
+ void CreateLazyBailoutForCall(HBasicBlock* current_block, LInstruction* instr,
+ HInstruction* hydrogen_val);
+
+ // Assigns given environment to an instruction. An instruction which can
+ // deoptimize must have an environment.
+ LInstruction* AssignEnvironment(LInstruction* instr,
+ HEnvironment* hydrogen_env);
+
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index 66fcf75ec0..f1717ca474 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -136,7 +136,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -278,18 +278,15 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ pop(at);
- __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -328,7 +325,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ PushCommonFrame();
__ Call(&needs_frame);
} else {
__ Call(&call_deopt_entry);
@@ -342,10 +339,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(at);
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -1966,29 +1962,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
@@ -2009,14 +2006,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2027,14 +2024,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2042,7 +2039,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Operand(SIMD128_VALUE_TYPE));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2274,11 +2271,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(a1));
DCHECK(ToRegister(instr->right()).is(a0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
- Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq, v0, Operand(at));
}
@@ -2989,17 +2985,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ Subu(result, sp, 2 * kPointerSize);
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ lw(result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
__ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ } else {
+ __ mov(result, fp);
}
}
@@ -3122,15 +3121,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ sll(scratch, length, 2);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(a0);
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3176,10 +3185,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3196,17 +3204,35 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(at);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(at);
+ } else {
+ __ Call(at);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3540,22 +3566,78 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Clz(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ lw(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(a1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3597,56 +3679,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Change context.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ li(a0, Operand(instr->arity()));
-
- // Load the code entry address
- __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ Call(at);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(a3));
- DCHECK(vector_register.is(a2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ li(vector_register, vector);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a0, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4190,8 +4222,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
- ne, &no_memento_found);
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(al, instr);
__ bind(&no_memento_found);
}
@@ -5140,14 +5171,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(a0));
- DCHECK(ToRegister(instr->result()).is(v0));
- __ push(a0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5535,13 +5558,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index df72b2e93c..7a316e5957 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -212,11 +212,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index a7c5488d04..71c34df516 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -255,27 +255,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -574,12 +553,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -907,22 +881,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -935,14 +903,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1064,16 +1032,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1097,6 +1055,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1105,6 +1066,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1224,22 +1188,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(a3);
- vector = FixedTemp(a2);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
@@ -1790,13 +1738,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2435,13 +2376,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a3);
@@ -2478,11 +2412,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2543,13 +2475,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index 8b36c5d055..7d41093be1 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -132,7 +130,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -144,7 +141,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -224,6 +220,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -258,6 +261,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -532,6 +537,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1684,23 +1690,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1763,29 +1752,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2385,19 +2351,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2514,18 +2467,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 5937f97386..c7bbe9f07a 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -111,7 +111,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -253,19 +253,15 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Daddu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ pop(at);
- __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ jmp(code->exit());
@@ -300,7 +296,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ PushCommonFrame();
__ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
__ li(t9, Operand(entry - base));
} else {
@@ -313,7 +309,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ PushCommonFrame();
__ BranchAndLink(&needs_frame);
} else {
__ BranchAndLink(&call_deopt_entry);
@@ -327,10 +323,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(at);
- __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -1343,9 +1338,10 @@ void LCodeGen::DoMulS(LMulS* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ Label no_overflow;
+ __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow);
} else {
__ Dsubu(result, zero_reg, left);
}
@@ -1444,9 +1440,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ Label no_overflow;
+ __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow);
} else {
__ Subu(result, zero_reg, left);
}
@@ -1652,13 +1649,13 @@ void LCodeGen::DoSubS(LSubS* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
+ Register scratch = scratch0();
+ Label no_overflow_label;
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1673,13 +1670,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
+ Register scratch = scratch0();
+ Label no_overflow_label;
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1813,13 +1810,13 @@ void LCodeGen::DoAddS(LAddS* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
+ Label no_overflow_label;
Register scratch = scratch1();
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1834,13 +1831,13 @@ void LCodeGen::DoAddI(LAddI* instr) {
DCHECK(right->IsRegister() || right->IsConstantOperand());
__ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
} else { // can_overflow.
- Register overflow = scratch0();
+ Label no_overflow_label;
Register scratch = scratch1();
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -2083,29 +2080,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ ld(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
@@ -2126,14 +2124,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2144,14 +2142,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2159,7 +2157,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Operand(SIMD128_VALUE_TYPE));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2391,11 +2389,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(a1));
DCHECK(ToRegister(instr->right()).is(a0));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
- Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq, v0, Operand(at));
}
@@ -3169,17 +3166,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ Dsubu(result, sp, 2 * kPointerSize);
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ ld(result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
__ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
+ } else {
+ __ mov(result, fp);
}
}
@@ -3306,15 +3306,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ dsll(scratch, length, kPointerSizeLog2);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(a0);
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3360,10 +3370,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3380,17 +3389,35 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ Call(at);
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ Jump(at);
+ } else {
+ __ Call(at);
+ }
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3743,22 +3770,75 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Clz(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(fp, scratch2);
+ __ ld(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use t0, t1 and t2 as scratch registers here given that
+ // we are not going to return to caller function anyway.
+ PrepareForTailCall(actual, t0, t1, t2);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(a1, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3800,56 +3880,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Change context.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ li(a0, Operand(instr->arity()));
-
- // Load the code entry address
- __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ Call(at);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(a3));
- DCHECK(vector_register.is(a2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ li(vector_register, vector);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a0, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4424,8 +4454,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
- ne, &no_memento_found);
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -5345,14 +5374,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(a0));
- DCHECK(ToRegister(instr->result()).is(v0));
- __ push(a0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5739,13 +5760,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index 2f1cefae76..4a700bd66c 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -214,11 +214,14 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index b66e8ba18a..bcfbc249d2 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -255,27 +255,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -574,12 +553,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -907,22 +881,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -935,14 +903,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1064,16 +1032,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1097,6 +1055,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1105,6 +1066,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1224,22 +1188,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(a3);
- vector = FixedTemp(a2);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
@@ -1796,13 +1744,6 @@ return result;
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2440,13 +2381,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a3);
@@ -2483,11 +2417,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2548,13 +2480,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index 8d2324f717..41cf93c2a4 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -31,9 +31,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -134,7 +132,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -147,7 +144,6 @@ class LCodeGen;
V(SubS) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -227,6 +223,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -261,6 +264,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -535,6 +540,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1746,23 +1752,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1825,29 +1814,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2431,19 +2397,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2560,18 +2513,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 9cf1db64bc..d5d01043dd 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -123,7 +123,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(prologue_offset);
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue(ip, prologue_offset);
+ __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
} else {
__ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
}
@@ -265,15 +265,14 @@ bool LCodeGen::GenerateDeferredCode() {
DCHECK(info()->IsStub());
frame_is_built_ = true;
__ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
- __ PushFixedFrame(scratch0());
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ PushCommonFrame(scratch0());
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
- __ PopFixedFrame(ip);
+ __ PopCommonFrame(scratch0());
frame_is_built_ = false;
}
__ b(code->exit());
@@ -322,7 +321,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
Comment(";;; call deopt with frame");
- __ PushFixedFrame();
+ __ PushCommonFrame();
__ b(&needs_frame, SetLK);
} else {
__ b(&call_deopt_entry, SetLK);
@@ -336,10 +335,9 @@ bool LCodeGen::GenerateJumpTable() {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
- DCHECK(info()->IsStub());
__ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
__ push(ip);
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ DCHECK(info()->IsStub());
}
Comment(";;; call deopt");
@@ -2103,29 +2101,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmpi(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ beq(instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ cmpi(reg, Operand::Zero());
__ beq(instr->FalseLabel(chunk_));
@@ -2148,13 +2147,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ bge(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2166,20 +2165,20 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
Label not_simd;
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2423,11 +2422,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(r4));
DCHECK(ToRegister(instr->right()).is(r3));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ cmpi(r3, Operand::Zero());
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
}
@@ -3209,11 +3207,12 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ subi(result, sp, Operand(2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
__ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(result,
- MemOperand(scratch, StandardFrameConstants::kContextOffset));
+ __ LoadP(
+ result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
// Result is the frame pointer for the frame if not adapted and for the real
@@ -3230,6 +3229,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ mr(result, scratch);
__ bind(&done);
}
+ } else {
+ __ mr(result, fp);
}
}
@@ -3349,14 +3350,26 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bdnz(&loop);
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(r3);
+ // It is safe to use r6, r7 and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r6 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r6, r7, r8);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r3, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3401,10 +3414,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3426,19 +3438,31 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Invoke function.
if (is_self_call) {
- __ CallSelf();
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
__ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- __ CallJSEntry(ip);
+ if (is_tail_call) {
+ __ JumpToJSEntry(ip);
+ } else {
+ __ CallJSEntry(ip);
+ }
}
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
} else {
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3767,22 +3791,77 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ cntlzw_(result, input);
}
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mr(fp, scratch2);
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(instr->HasPointerMap());
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use r6, r7 and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r6 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r6, r7, r8);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(r4, no_reg, actual, flag, generator);
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -3824,67 +3903,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- // Change context.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ mov(r3, Operand(instr->arity()));
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- __ CallJSEntry(ip);
- }
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(r6));
- DCHECK(vector_register.is(r5));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
-
- __ Move(vector_register, vector);
- __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r3, Operand(arity));
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r4));
@@ -4470,9 +4488,10 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+ __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
@@ -5390,13 +5409,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(r3));
- __ push(r3);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r6));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -5743,13 +5755,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
#undef __
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index 1b72bf82dc..28f168036c 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -194,11 +194,14 @@ class LCodeGen : public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 2a04d9926c..b7397869bb 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -261,27 +261,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -580,11 +559,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -911,22 +886,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -939,14 +908,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new (zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1070,15 +1039,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r4);
-
- LCallJSFunction* result = new (zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1101,6 +1061,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
LCallWithDescriptor* result =
new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r3), instr);
}
@@ -1109,6 +1072,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r4);
LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1225,22 +1191,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r4);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(r6);
- vector = FixedTemp(r5);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), cp);
return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
@@ -1806,13 +1756,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2263,9 +2206,10 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoTrapAllocationMemento(
HTrapAllocationMemento* instr) {
LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
LTrapAllocationMemento* result =
- new (zone()) LTrapAllocationMemento(object, temp);
+ new (zone()) LTrapAllocationMemento(object, temp1, temp2);
return AssignEnvironment(result);
}
@@ -2441,13 +2385,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r3);
- LToFastProperties* result = new (zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r6);
@@ -2486,7 +2423,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(
instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind());
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2548,11 +2485,5 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new (zone()) LStoreFrameContext(context);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index 0dfde053b7..c39f6204f8 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -133,7 +131,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -146,7 +143,6 @@ class LCodeGen;
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -223,6 +219,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -257,6 +260,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits : public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -525,6 +530,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
@@ -1659,21 +1665,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) { inputs_[0] = function; }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1736,29 +1727,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2090,16 +2058,17 @@ class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
public:
- LTrapAllocationMemento(LOperand* object, LOperand* temp) {
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
inputs_[0] = object;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
};
@@ -2326,17 +2295,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2445,16 +2403,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/s390/OWNERS b/deps/v8/src/crankshaft/s390/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
new file mode 100644
index 0000000000..689f4bc1ae
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -0,0 +1,5668 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
+ virtual ~SafepointGenerator() {}
+
+ void BeforeCall(int call_size) const override {}
+
+ void AfterCall() const override {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ DCHECK(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+ GenerateJumpTable() && GenerateSafepointTable();
+}
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ DCHECK(is_done());
+ code->set_stack_slots(GetTotalFrameSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ PopulateDeoptimizationData(code);
+}
+
+void LCodeGen::SaveCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ std(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+void LCodeGen::RestoreCallerDoubles() {
+ DCHECK(info()->saves_caller_doubles());
+ DCHECK(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ld(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+bool LCodeGen::GeneratePrologue() {
+ DCHECK(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // r3: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+ // ip: Our own function entry (required by the prologue)
+ }
+
+ int prologue_offset = masm_->pc_offset();
+
+ if (prologue_offset) {
+ // Prologue logic requires its starting address in ip and the
+ // corresponding offset from the function entry. Need to add
+ // 4 bytes for the size of AHI/AGHI that AddP expands into.
+ prologue_offset += sizeof(FourByteInstr);
+ __ AddP(ip, ip, Operand(prologue_offset));
+ }
+ info()->set_prologue_offset(prologue_offset);
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
+ } else {
+ __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
+ }
+ frame_is_built_ = true;
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
+ if (FLAG_debug_code) {
+ __ Push(r2, r3);
+ __ mov(r2, Operand(slots * kPointerSize));
+ __ mov(r3, Operand(kSlotsZapValue));
+ Label loop;
+ __ bind(&loop);
+ __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
+ __ lay(r2, MemOperand(r2, -kPointerSize));
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&loop);
+ __ Pop(r2, r3);
+ }
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+ return !is_aborted();
+}
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
+
+ // Possibly allocate a local context.
+ if (info()->scope()->num_heap_slots() > 0) {
+ Comment(";;; Allocate local context");
+ bool need_write_barrier = true;
+ // Argument to NewContext is the function, which is in r3.
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(r3);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r3);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ RecordSafepoint(deopt_mode);
+
+ // Context is returned in both r2 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ LoadRR(cp, r2);
+ __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ LoadP(r2, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ StoreP(r2, target);
+ // Update the write barrier. This clobbers r5 and r2.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
+ GetLinkRegisterState(), kSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r2, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ Comment(";;; Prologue end");
+}
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ DCHECK(slots >= 0);
+ __ lay(sp, MemOperand(sp, -slots * kPointerSize));
+}
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+bool LCodeGen::GenerateDeferredCode() {
+ DCHECK(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(
+ ";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(), code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+ __ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ DCHECK(!frame_is_built_);
+ DCHECK(info()->IsStub());
+ frame_is_built_ = true;
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+ __ PushCommonFrame(scratch0());
+ Comment(";;; Deferred code");
+ }
+ code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ DCHECK(frame_is_built_);
+ __ PopCommonFrame(scratch0());
+ frame_is_built_ = false;
+ }
+ __ b(code->exit());
+ }
+ }
+
+ return !is_aborted();
+}
+
+bool LCodeGen::GenerateJumpTable() {
+ // Check that the jump table is accessible from everywhere in the function
+ // code, i.e. that offsets in halfworld to the table can be encoded in the
+ // 32-bit signed immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table. We also don't consider the pc load delta.
+ // Each entry in the jump table generates one instruction and inlines one
+ // 32bit data after it.
+ // TODO(joransiu): The Int24 condition can likely be relaxed for S390
+ if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
+ Abort(kGeneratedCodeIsTooLarge);
+ }
+
+ if (jump_table_.length() > 0) {
+ Label needs_frame, call_deopt_entry;
+
+ Comment(";;; -------------------- Jump table --------------------");
+ Address base = jump_table_[0].address;
+
+ Register entry_offset = scratch0();
+
+ int length = jump_table_.length();
+ for (int i = 0; i < length; i++) {
+ Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+ __ bind(&table_entry->label);
+
+ DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+ Address entry = table_entry->address;
+ DeoptComment(table_entry->deopt_info);
+
+ // Second-level deopt table entries are contiguous and small, so instead
+ // of loading the full, absolute address of each one, load an immediate
+ // offset which will be added to the base address later.
+ __ mov(entry_offset, Operand(entry - base));
+
+ if (table_entry->needs_frame) {
+ DCHECK(!info()->saves_caller_doubles());
+ Comment(";;; call deopt with frame");
+ __ PushCommonFrame();
+ __ b(r14, &needs_frame);
+ } else {
+ __ b(r14, &call_deopt_entry);
+ }
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ }
+
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+ __ push(ip);
+ DCHECK(info()->IsStub());
+ }
+
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
+
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+ __ AddP(ip, entry_offset, ip);
+ __ Jump(ip);
+ }
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+bool LCodeGen::GenerateSafepointTable() {
+ DCHECK(is_done());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
+ return !is_aborted();
+}
+
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
+}
+
+DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
+ return DoubleRegister::from_code(code);
+}
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ DCHECK(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+ if (op->IsRegister()) {
+ return ToRegister(op->index());
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ Handle<Object> literal = constant->handle(isolate());
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
+ DCHECK(literal->IsNumber());
+ __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
+ } else if (r.IsDouble()) {
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+ } else {
+ DCHECK(r.IsSmiOrTagged());
+ __ Move(scratch, literal);
+ }
+ return scratch;
+ } else if (op->IsStackSlot()) {
+ __ LoadP(scratch, ToMemOperand(op));
+ return scratch;
+ }
+ UNREACHABLE();
+ return scratch;
+}
+
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
+ Register dst) {
+ DCHECK(IsInteger32(const_op));
+ HConstant* constant = chunk_->LookupConstant(const_op);
+ int32_t value = constant->Integer32Value();
+ if (IsSmi(const_op)) {
+ __ LoadSmiLiteral(dst, Smi::FromInt(value));
+ } else {
+ __ LoadIntLiteral(dst, value);
+ }
+}
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ DCHECK(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ DCHECK(r.IsSmiOrTagged());
+ return reinterpret_cast<intptr_t>(Smi::FromInt(value));
+}
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ DCHECK(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ DCHECK(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ DCHECK(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ DCHECK(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand::Zero();
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand::Zero();
+}
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+ DCHECK(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ DCHECK(op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp,
+ ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
+}
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+
+ WriteTranslation(environment->outer(), translation);
+ WriteTranslationFrame(environment, translation);
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ AddToTranslation(
+ environment, translation, value, environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+ }
+}
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation, LOperand* op,
+ bool is_tagged, bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment, translation, value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer, dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ int index = op->index();
+ if (is_tagged) {
+ translation->StoreStackSlot(index);
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(index);
+ } else {
+ translation->StoreInt32StackSlot(index);
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ int index = op->index();
+ translation->StoreDoubleStackSlot(index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ DCHECK(instr != NULL);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
+}
+
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
+ LInstruction* instr, SaveFPRegsMode save_doubles) {
+ DCHECK(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ LoadP(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+ LInstruction* instr, LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(instr->pointer_map(), argc,
+ Safepoint::kNoLazyDeopt);
+}
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ environment->set_has_been_used();
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index, translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type,
+ CRegister cr) {
+ LEnvironment* environment = instr->environment();
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ DCHECK(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ return;
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+
+ // Store the condition on the stack if necessary
+ if (cond != al) {
+ Label done;
+ __ LoadImmP(scratch, Operand::Zero());
+ __ b(NegateCondition(cond), &done, Label::kNear);
+ __ LoadImmP(scratch, Operand(1));
+ __ bind(&done);
+ __ push(scratch);
+ }
+
+ Label done;
+ __ Push(r3);
+ __ mov(scratch, Operand(count));
+ __ LoadW(r3, MemOperand(scratch));
+ __ Sub32(r3, r3, Operand(1));
+ __ Cmp32(r3, Operand::Zero());
+ __ bne(&no_deopt, Label::kNear);
+
+ __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
+ __ StoreW(r3, MemOperand(scratch));
+ __ Pop(r3);
+
+ if (cond != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+
+ __ b(&done);
+
+ __ bind(&no_deopt);
+ __ StoreW(r3, MemOperand(scratch));
+ __ Pop(r3);
+
+ if (cond != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ bind(&done);
+
+ if (cond != al) {
+ cond = ne;
+ __ CmpP(scratch, Operand::Zero());
+ }
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
+ }
+
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+ DCHECK(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+ } else {
+ Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+ !frame_is_built_);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ jump_table_.is_empty() ||
+ !table_entry.IsEquivalentTo(jump_table_.last())) {
+ jump_table_.Add(table_entry, zone());
+ }
+ __ b(cond, &jump_table_.last().label /*, cr*/);
+ }
+}
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
+ CRegister cr) {
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
+}
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kLazyDeopt);
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode) {
+ DCHECK(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_, label->hydrogen_value()->id(),
+ label->block_id(), LabelType(label));
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+}
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ DCHECK(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ CmpP(dividend, Operand::Zero());
+ __ bge(&dividend_is_not_negative, Label::kNear);
+ if (shift) {
+ // Note that this is correct even for kMinInt operands.
+ __ LoadComplementRR(dividend, dividend);
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+ __ LoadComplementRR(dividend, dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+ } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ mov(dividend, Operand::Zero());
+ } else {
+ DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+ }
+ __ b(&done, Label::kNear);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ if (shift) {
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+ } else {
+ __ mov(dividend, Operand::Zero());
+ }
+ __ bind(&done);
+}
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ Mul(result, result, ip);
+ __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&remainder_not_zero);
+ }
+}
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Register result_reg = ToRegister(instr->result());
+ Label done;
+
+ // Check for x % 0.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cmp32(right_reg, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ }
+
+ // Check for kMinInt % -1, dr will return undefined, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Cmp32(left_reg, Operand(kMinInt));
+ __ bne(&no_overflow_possible, Label::kNear);
+ __ Cmp32(right_reg, Operand(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ } else {
+ __ b(ne, &no_overflow_possible, Label::kNear);
+ __ mov(result_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ // Divide instruction dr will implicity use register pair
+ // r0 & r1 below.
+ DCHECK(!left_reg.is(r1));
+ DCHECK(!right_reg.is(r1));
+ DCHECK(!result_reg.is(r1));
+ __ LoadRR(r0, left_reg);
+ __ srda(r0, Operand(32));
+ __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder
+
+ __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ bne(&done, Label::kNear);
+ __ Cmp32(left_reg, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+ DCHECK(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ Cmp32(dividend, Operand(0x80000000));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ }
+
+ int32_t shift = WhichPowerOf2Abs(divisor);
+
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
+ __ TestBitRange(dividend, shift - 1, 0, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ LoadComplementRR(result, dividend);
+ return;
+ }
+ if (shift == 0) {
+ __ LoadRR(result, dividend);
+ } else {
+ if (shift == 1) {
+ __ ShiftRight(result, dividend, Operand(31));
+ } else {
+ __ ShiftRightArith(result, dividend, Operand(31));
+ __ ShiftRight(result, result, Operand(32 - shift));
+ }
+ __ AddP(result, dividend, result);
+ __ ShiftRightArith(result, result, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ }
+ if (divisor < 0) __ LoadComplementRR(result, result);
+}
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ Register scratch = scratch0();
+ __ mov(ip, Operand(divisor));
+ __ Mul(scratch, result, ip);
+ __ Cmp32(scratch, dividend);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ }
+}
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ const Register dividend = ToRegister(instr->dividend());
+ const Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ DCHECK(!dividend.is(result));
+ DCHECK(!divisor.is(result));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ Cmp32(dividend, Operand::Zero());
+ __ bne(&dividend_not_zero, Label::kNear);
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ Cmp32(dividend, Operand(kMinInt));
+ __ bne(&dividend_not_min_int, Label::kNear);
+ __ Cmp32(divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ __ bind(&dividend_not_min_int);
+ }
+
+ __ LoadRR(r0, dividend);
+ __ srda(r0, Operand(32));
+ __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
+
+ __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ // Deoptimize if remainder is not 0.
+ __ Cmp32(r0, Operand::Zero());
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ }
+}
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 0) {
+ if (shift || !result.is(dividend)) {
+ __ ShiftRightArith(result, dividend, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ }
+ return;
+ }
+
+// If the divisor is negative, we have to negate and handle edge cases.
+#if V8_TARGET_ARCH_S390X
+ if (divisor == -1 && can_overflow) {
+ __ Cmp32(dividend, Operand(0x80000000));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ }
+#endif
+
+ __ LoadComplementRR(result, dividend);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ }
+
+// If the negation could not overflow, simply shifting is OK.
+#if !V8_TARGET_ARCH_S390X
+ if (!can_overflow) {
+#endif
+ if (shift) {
+ __ ShiftRightArithP(result, result, Operand(shift));
+ }
+ return;
+#if !V8_TARGET_ARCH_S390X
+ }
+
+ // Dividing by -1 is basically negation, unless we overflow.
+ if (divisor == -1) {
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ return;
+ }
+
+ Label overflow_label, done;
+ __ b(overflow, &overflow_label, Label::kNear);
+ __ ShiftRightArith(result, result, Operand(shift));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ __ b(&done, Label::kNear);
+ __ bind(&overflow_label);
+ __ mov(result, Operand(kMinInt / divisor));
+ __ bind(&done);
+#endif
+}
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ DCHECK(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp32(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ DCHECK(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Cmp32(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+ __ b(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ LoadComplementRR(result, result);
+ __ SubP(result, result, Operand(1));
+ __ bind(&done);
+}
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ const Register dividend = ToRegister(instr->dividend());
+ const Register divisor = ToRegister(instr->divisor());
+ Register result = ToRegister(instr->result());
+
+ DCHECK(!dividend.is(result));
+ DCHECK(!divisor.is(result));
+
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ Cmp32(dividend, Operand::Zero());
+ __ bne(&dividend_not_zero, Label::kNear);
+ __ Cmp32(divisor, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&dividend_not_zero);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Cmp32(dividend, Operand(kMinInt));
+ __ bne(&no_overflow_possible, Label::kNear);
+ __ Cmp32(divisor, Operand(-1));
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ } else {
+ __ bne(&no_overflow_possible, Label::kNear);
+ __ LoadRR(result, dividend);
+ }
+ __ bind(&no_overflow_possible);
+ }
+
+ __ LoadRR(r0, dividend);
+ __ srda(r0, Operand(32));
+ __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
+
+ __ lr(result, r1); // Move quotient to result register
+
+ Label done;
+ Register scratch = scratch0();
+ // If both operands have the same sign then we are done.
+ __ Xor(scratch, dividend, divisor);
+ __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit
+ __ bge(&done, Label::kNear);
+
+ // If there is no remainder then we are done.
+ __ lr(scratch, result);
+ __ msr(scratch, divisor);
+ __ Cmp32(dividend, scratch);
+ __ beq(&done, Label::kNear);
+
+ // We performed a truncating division. Correct the result.
+ __ Sub32(result, result, Operand(1));
+ __ bind(&done);
+}
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ // Unable to use madbr as the intermediate value is not rounded
+ // to proper precision
+ __ ldr(result, multiplier);
+ __ mdbr(result, multiplicand);
+ __ adbr(result, addend);
+}
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+ DoubleRegister minuend = ToDoubleRegister(instr->minuend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ // Unable to use msdbr as the intermediate value is not rounded
+ // to proper precision
+ __ ldr(result, multiplier);
+ __ mdbr(result, multiplicand);
+ __ sdbr(result, minuend);
+}
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
+ Register left = ToRegister(instr->left());
+ LOperand* right_op = instr->right();
+
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ __ CmpP(left, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+
+ switch (constant) {
+ case -1:
+ if (can_overflow) {
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ LoadComplementRR(result, left);
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ LoadComplementRR(result, left);
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ }
+#endif
+ } else {
+ __ LoadComplementRR(result, left);
+ }
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+// If left is strictly negative and the constant is null, the
+// result is -0. Deoptimize if required, otherwise return 0.
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ Cmp32(left, Operand::Zero());
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ Cmp32(left, Operand::Zero());
+ }
+#endif
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ __ LoadImmP(result, Operand::Zero());
+ break;
+ case 1:
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (base::bits::IsPowerOfTwo32(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ ShiftLeftP(result, left, Operand(shift));
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ LoadComplementRR(result, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ ShiftLeftP(scratch, left, Operand(shift));
+ __ AddP(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ LoadComplementRR(result, result);
+ } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ ShiftLeftP(scratch, left, Operand(shift));
+ __ SubP(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ LoadComplementRR(result, result);
+ } else {
+ // Generate standard code.
+ __ Move(result, left);
+ __ MulP(result, Operand(constant));
+ }
+ }
+
+ } else {
+ DCHECK(right_op->IsRegister());
+ Register right = ToRegister(right_op);
+
+ if (can_overflow) {
+#if V8_TARGET_ARCH_S390X
+ // result = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ SmiUntag(scratch, right);
+ __ msgr(result, scratch);
+ } else {
+ __ LoadRR(result, left);
+ __ msgr(result, right);
+ }
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiTag(result);
+ }
+#else
+ // r0:scratch = scratch * right
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(scratch, left);
+ __ mr_z(r0, right);
+ __ LoadRR(result, scratch);
+ } else {
+ // r0:scratch = scratch * right
+ __ LoadRR(scratch, left);
+ __ mr_z(r0, right);
+ __ LoadRR(result, scratch);
+ }
+ __ TestIfInt32(r0, result, scratch);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+#endif
+ } else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ __ Mul(result, left, right);
+ }
+ }
+
+ if (bailout_on_minus_zero) {
+ Label done;
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+ __ XorP(r0, left, right);
+ __ LoadAndTestRR(r0, r0);
+ __ bge(&done, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ XorP(r0, left, right);
+ __ Cmp32(r0, Operand::Zero());
+ __ bge(&done, Label::kNear);
+ }
+#endif
+ // Bail out if the result is minus zero.
+ __ CmpP(result, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ __ bind(&done);
+ }
+ }
+}
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ LOperand* left_op = instr->left();
+ LOperand* right_op = instr->right();
+ DCHECK(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+
+ if (right_op->IsConstantOperand()) {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ AndP(result, left, Operand(ToOperand(right_op)));
+ break;
+ case Token::BIT_OR:
+ __ OrP(result, left, Operand(ToOperand(right_op)));
+ break;
+ case Token::BIT_XOR:
+ __ XorP(result, left, Operand(ToOperand(right_op)));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (right_op->IsStackSlot()) {
+ // Reg-Mem instruction clobbers, so copy src to dst first.
+ if (!left.is(result)) __ LoadRR(result, left);
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ AndP(result, ToMemOperand(right_op));
+ break;
+ case Token::BIT_OR:
+ __ OrP(result, ToMemOperand(right_op));
+ break;
+ case Token::BIT_XOR:
+ __ XorP(result, ToMemOperand(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ DCHECK(right_op->IsRegister());
+
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ AndP(result, left, ToRegister(right_op));
+ break;
+ case Token::BIT_OR:
+ __ OrP(result, left, ToRegister(right_op));
+ break;
+ case Token::BIT_XOR:
+ __ XorP(result, left, ToRegister(right_op));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ if (right_op->IsRegister()) {
+ // Mask the right_op operand.
+ __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
+ switch (instr->op()) {
+ case Token::ROR:
+ // rotate_right(a, b) == rotate_left(a, 32 - b)
+ __ LoadComplementRR(scratch, scratch);
+ __ rll(result, left, scratch, Operand(32));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ break;
+ case Token::SAR:
+ __ ShiftRightArith(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ break;
+ case Token::SHR:
+ __ ShiftRight(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ if (instr->can_deopt()) {
+#if V8_TARGET_ARCH_S390X
+ __ ltgfr(result, result /*, SetRC*/);
+#else
+ __ ltr(result, result); // Set the <,==,> condition
+#endif
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+ }
+ break;
+ case Token::SHL:
+ __ ShiftLeft(result, left, scratch);
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ rll(result, left, Operand(32 - shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ ShiftRightArith(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHR:
+ if (shift_count != 0) {
+ __ ShiftRight(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ } else {
+ if (instr->can_deopt()) {
+ __ Cmp32(left, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+ }
+ __ Move(result, left);
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ ShiftLeftP(result, left, Operand(shift_count));
+#else
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ if (shift_count != 1) {
+ __ ShiftLeft(result, left, Operand(shift_count - 1));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ __ SmiTagCheckOverflow(result, result, scratch);
+ } else {
+ __ SmiTagCheckOverflow(result, left, scratch);
+ }
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+ } else {
+ __ ShiftLeft(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_S390X
+ __ lgfr(result, result);
+#endif
+ }
+ } else {
+ __ Move(result, left);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+
+ bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+
+#if V8_TARGET_ARCH_S390X
+ // The overflow detection needs to be tested on the lower 32-bits.
+ // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+ // to set the CC overflow bit properly. The result is then sign-extended.
+ bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+ bool checkOverflow = true;
+#endif
+
+ if (right->IsConstantOperand()) {
+ if (!isInteger || !checkOverflow)
+ __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
+ else
+ __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else if (right->IsRegister()) {
+ if (!isInteger)
+ __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
+ else if (!checkOverflow)
+ __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
+ ToRegister(right));
+ else
+ __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
+ } else {
+ if (!left->Equals(instr->result()))
+ __ LoadRR(ToRegister(result), ToRegister(left));
+
+ MemOperand mem = ToMemOperand(right);
+ if (!isInteger) {
+ __ SubP(ToRegister(result), mem);
+ } else {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+ // We want to read the 32-bits directly from memory
+ MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+ MemOperand Upper32Mem = ToMemOperand(right);
+#endif
+ if (checkOverflow) {
+ __ Sub32(ToRegister(result), Upper32Mem);
+ } else {
+ __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
+ }
+ }
+ }
+
+#if V8_TARGET_ARCH_S390X
+ if (isInteger && checkOverflow)
+ __ lgfr(ToRegister(result), ToRegister(result));
+#endif
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ }
+}
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+
+ DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
+ right->IsConstantOperand());
+
+#if V8_TARGET_ARCH_S390X
+ // The overflow detection needs to be tested on the lower 32-bits.
+ // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+ // to set the CC overflow bit properly. The result is then sign-extended.
+ bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+ bool checkOverflow = true;
+#endif
+
+ Operand right_operand = ToOperand(right);
+ __ mov(r0, right_operand);
+
+ if (!checkOverflow) {
+ __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
+ } else {
+ __ Sub32(ToRegister(result), r0, ToRegister(left));
+ }
+}
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
+}
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ DCHECK(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ uint64_t bits = instr->bits();
+ __ LoadDoubleLiteral(result, bits, scratch0());
+}
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> object = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ Move(ToRegister(instr->result()), object);
+}
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ Register scratch = scratch0();
+ DCHECK(!scratch.is(string));
+ DCHECK(!scratch.is(ToRegister(index)));
+ // TODO(joransiu) : Fold Add into FieldMemOperand
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ AddP(scratch, string, ToRegister(index));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
+ __ AddP(scratch, string, scratch);
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ Register scratch = scratch0();
+ __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ __ AndP(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ CmpP(scratch,
+ Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
+ : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ llc(result, operand);
+ } else {
+ __ llh(result, operand);
+ }
+}
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type
+ : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ stc(value, operand);
+ } else {
+ __ sth(value, operand);
+ }
+}
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+ instr->hydrogen()->representation().IsExternal());
+#if V8_TARGET_ARCH_S390X
+ // The overflow detection needs to be tested on the lower 32-bits.
+ // As a result, on 64-bit, we need to force 32-bit arithmetic operations
+ // to set the CC overflow bit properly. The result is then sign-extended.
+ bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#else
+ bool checkOverflow = true;
+#endif
+
+ if (right->IsConstantOperand()) {
+ if (!isInteger || !checkOverflow)
+ __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
+ else
+ __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
+ } else if (right->IsRegister()) {
+ if (!isInteger)
+ __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
+ else if (!checkOverflow)
+ __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
+ ToRegister(right));
+ else
+ __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
+ } else {
+ if (!left->Equals(instr->result()))
+ __ LoadRR(ToRegister(result), ToRegister(left));
+
+ MemOperand mem = ToMemOperand(right);
+ if (!isInteger) {
+ __ AddP(ToRegister(result), mem);
+ } else {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+ // We want to read the 32-bits directly from memory
+ MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+ MemOperand Upper32Mem = ToMemOperand(right);
+#endif
+ if (checkOverflow) {
+ __ Add32(ToRegister(result), Upper32Mem);
+ } else {
+ __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
+ }
+ }
+ }
+
+#if V8_TARGET_ARCH_S390X
+ if (isInteger && checkOverflow)
+ __ lgfr(ToRegister(result), ToRegister(result));
+#endif
+ // Doptimize on overflow
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ }
+}
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ HMathMinMax::Operation operation = instr->hydrogen()->operation();
+ Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Register left_reg = ToRegister(left);
+ Register right_reg = EmitLoadRegister(right, ip);
+ Register result_reg = ToRegister(instr->result());
+ Label return_left, done;
+#if V8_TARGET_ARCH_S390X
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+#endif
+ __ CmpP(left_reg, right_reg);
+#if V8_TARGET_ARCH_S390X
+ } else {
+ __ Cmp32(left_reg, right_reg);
+ }
+#endif
+ __ b(cond, &return_left, Label::kNear);
+ __ Move(result_reg, right_reg);
+ __ b(&done, Label::kNear);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ } else {
+ DCHECK(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister left_reg = ToDoubleRegister(left);
+ DoubleRegister right_reg = ToDoubleRegister(right);
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Label check_nan_left, check_zero, return_left, return_right, done;
+ __ cdbr(left_reg, right_reg);
+ __ bunordered(&check_nan_left, Label::kNear);
+ __ beq(&check_zero);
+ __ b(cond, &return_left, Label::kNear);
+ __ b(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(left_reg, kDoubleRegZero);
+ __ bne(&return_left, Label::kNear); // left == right != 0.
+
+ // At this point, both left and right are either 0 or -0.
+ // N.B. The following works because +0 + -0 == +0
+ if (operation == HMathMinMax::kMathMin) {
+ // For min we want logical-or of sign bit: -(-L + -R)
+ __ lcdbr(left_reg, left_reg);
+ __ ldr(result_reg, left_reg);
+ if (left_reg.is(right_reg)) {
+ __ adbr(result_reg, right_reg);
+ } else {
+ __ sdbr(result_reg, right_reg);
+ }
+ __ lcdbr(result_reg, result_reg);
+ } else {
+ // For max we want logical-and of sign bit: (L + R)
+ __ ldr(result_reg, left_reg);
+ __ adbr(result_reg, right_reg);
+ }
+ __ b(&done, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ cdbr(left_reg, left_reg);
+ __ bunordered(&return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ if (!right_reg.is(result_reg)) {
+ __ ldr(result_reg, right_reg);
+ }
+ __ b(&done, Label::kNear);
+
+ __ bind(&return_left);
+ if (!left_reg.is(result_reg)) {
+ __ ldr(result_reg, left_reg);
+ }
+ __ bind(&done);
+ }
+}
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ // All operations except MOD are computed in-place.
+ DCHECK(instr->op() == Token::MOD || left.is(result));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ adbr(result, right);
+ break;
+ case Token::SUB:
+ __ sdbr(result, right);
+ break;
+ case Token::MUL:
+ __ mdbr(result, right);
+ break;
+ case Token::DIV:
+ __ ddbr(result, right);
+ break;
+ case Token::MOD: {
+ __ PrepareCallCFunction(0, 2, scratch0());
+ __ MovToFloatParameters(left, right);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3));
+ DCHECK(ToRegister(instr->right()).is(r2));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+template <class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block || cond == al) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ b(cond, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ b(cond, chunk_->GetAssemblyLabel(left_block));
+ __ b(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(true_block));
+}
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(false_block));
+}
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ DoubleRegister dbl_scratch = double_scratch0();
+
+ if (r.IsInteger32()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ Cmp32(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (r.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ Register reg = ToRegister(instr->value());
+ __ CmpP(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (r.IsDouble()) {
+ DCHECK(!info()->IsStub());
+ DoubleRegister reg = ToDoubleRegister(instr->value());
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(reg, kDoubleRegZero);
+ // Test the double value. Zero and NaN are false.
+ Condition lt_gt = static_cast<Condition>(lt | gt);
+
+ EmitBranch(instr, lt_gt);
+ } else {
+ DCHECK(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsBoolean()) {
+ DCHECK(!info()->IsStub());
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ DCHECK(!info()->IsStub());
+ __ CmpP(reg, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else if (type.IsJSArray()) {
+ DCHECK(!info()->IsStub());
+ EmitBranch(instr, al);
+ } else if (type.IsHeapNumber()) {
+ DCHECK(!info()->IsStub());
+ __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(dbl_scratch, kDoubleRegZero);
+ Condition lt_gt = static_cast<Condition>(lt | gt);
+ EmitBranch(instr, lt_gt);
+ } else if (type.IsString()) {
+ DCHECK(!info()->IsStub());
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ CmpP(ip, Operand::Zero());
+ EmitBranch(instr, ne);
+ } else {
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ // undefined -> false.
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ beq(instr->TrueLabel(chunk_));
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ __ beq(instr->FalseLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ __ CmpP(reg, Operand::Zero());
+ __ beq(instr->FalseLabel(chunk_));
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a Smi -> deopt.
+ __ TestIfSmi(reg);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ }
+
+ const Register map = scratch0();
+ if (expected.NeedsMap()) {
+ __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ __ bne(instr->FalseLabel(chunk_));
+ }
+ }
+
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
+ __ bge(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ bge(&not_string, Label::kNear);
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+ __ CmpP(ip, Operand::Zero());
+ __ bne(instr->TrueLabel(chunk_));
+ __ b(instr->FalseLabel(chunk_));
+ __ bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+ __ beq(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ Label not_simd;
+ __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+ __ beq(instr->TrueLabel(chunk_));
+ }
+
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ // heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ bne(&not_heap_number, Label::kNear);
+ __ LoadDouble(dbl_scratch,
+ FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ lzdr(kDoubleRegZero);
+ __ cdbr(dbl_scratch, kDoubleRegZero);
+ __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false.
+ __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false.
+ __ b(instr->TrueLabel(chunk_));
+ __ bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ }
+ }
+ }
+}
+
+void LCodeGen::EmitGoto(int block) {
+ if (!IsNextEmittedBlock(block)) {
+ __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
+
+Condition LCodeGen::TokenToCondition(Token::Value op) {
+ Condition cond = kNoCondition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = lt;
+ break;
+ case Token::GT:
+ cond = gt;
+ break;
+ case Token::LTE:
+ cond = le;
+ break;
+ case Token::GTE:
+ cond = ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ bool is_unsigned =
+ instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+ instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+ Condition cond = TokenToCondition(instr->op());
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ // Compare left and right operands as doubles and load the
+ // resulting flags into the normal status register.
+ __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
+ // If a NaN is involved, i.e. the result is unordered,
+ // jump to false block label.
+ __ bunordered(instr->FalseLabel(chunk_));
+ } else {
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+ } else {
+ __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+ }
+ } else {
+ if (is_unsigned) {
+ __ CmpLogical32(ToRegister(left), ToOperand(right));
+ } else {
+ __ Cmp32(ToRegister(left), ToOperand(right));
+ }
+ }
+ } else if (left->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+ } else {
+ __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+ }
+ } else {
+ if (is_unsigned) {
+ __ CmpLogical32(ToRegister(right), ToOperand(left));
+ } else {
+ __ Cmp32(ToRegister(right), ToOperand(left));
+ }
+ }
+ // We commuted the operands, so commute the condition.
+ cond = CommuteCondition(cond);
+ } else if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_unsigned) {
+ __ CmpLogicalP(ToRegister(left), ToRegister(right));
+ } else {
+ __ CmpP(ToRegister(left), ToRegister(right));
+ }
+ } else {
+ if (is_unsigned) {
+ __ CmpLogical32(ToRegister(left), ToRegister(right));
+ } else {
+ __ Cmp32(ToRegister(left), ToRegister(right));
+ }
+ }
+ }
+ EmitBranch(instr, cond);
+ }
+}
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ __ CmpP(left, right);
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ CmpP(input_reg, Operand(factory()->the_hole_value()));
+ EmitBranch(instr, eq);
+ return;
+ }
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->object());
+ __ cdbr(input_reg, input_reg);
+ EmitFalseBranch(instr, ordered);
+
+ Register scratch = scratch0();
+ // Convert to GPR and examine the upper 32 bits
+ __ lgdr(scratch, input_reg);
+ __ srlg(scratch, scratch, Operand(32));
+ __ Cmp32(scratch, Operand(kHoleNanUpper32));
+ EmitBranch(instr, eq);
+}
+
+Condition LCodeGen::EmitIsString(Register input, Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp());
+
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register input_reg = EmitLoadRegister(instr->value(), ip);
+ __ TestIfSmi(input_reg);
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ EmitBranch(instr, ne);
+}
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3));
+ DCHECK(ToRegister(instr->right()).is(r2));
+
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+}
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ DCHECK(from == to || to == LAST_TYPE);
+ return from;
+}
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return ge;
+ if (from == FIRST_TYPE) return le;
+ UNREACHABLE();
+ return eq;
+}
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register scratch = scratch0();
+ Register input = ToRegister(instr->value());
+
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+ __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
+ __ AndP(r0, scratch);
+ EmitBranch(instr, eq);
+}
+
+// Branches to a label or falls through with the answer in flags. Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
+ Handle<String> class_name, Register input,
+ Register temp, Register temp2) {
+ DCHECK(!input.is(temp));
+ DCHECK(!input.is(temp2));
+ DCHECK(!temp.is(temp2));
+
+ __ JumpIfSmi(input, is_false);
+
+ __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+ __ bge(is_true);
+ } else {
+ __ bge(is_false);
+ }
+
+ // Check if the constructor in the map is a function.
+ Register instance_type = ip;
+ __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+ if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
+ __ bne(is_true);
+ } else {
+ __ bne(is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(temp,
+ FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ __ CmpP(temp, Operand(class_name));
+ // End with the answer in flags.
+}
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->temp());
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+ class_name, input, temp, temp2);
+
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ __ mov(temp, Operand(instr->map()));
+ __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ EmitBranch(instr, eq);
+}
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(r2));
+ InstanceOfStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_instance_type = ip;
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
+
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ TestIfSmi(object);
+ EmitFalseBranch(instr, eq);
+ }
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ LoadlB(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ __ LoadP(object_prototype,
+ FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ CmpP(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq);
+ __ LoadP(object_map,
+ FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ b(&loop);
+}
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // This instruction also signals no smi code inlined
+ __ CmpP(r2, Operand::Zero());
+
+ Condition condition = ComputeCompareCondition(op);
+ Label true_value, done;
+
+ __ b(condition, &true_value, Label::kNear);
+
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ b(&done, Label::kNear);
+
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r2. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ push(r2);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit);
+ }
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+ if (NeedsEagerFrame()) {
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ } else if (sp_delta != 0) {
+ // TODO(joransiu): Clean this up into Macro Assembler
+ if (sp_delta >= 0 && sp_delta < 4096)
+ __ la(sp, MemOperand(sp, sp_delta));
+ else
+ __ lay(sp, MemOperand(sp, sp_delta));
+ }
+ } else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
+ Register reg = ToRegister(instr->parameter_count());
+ // The argument count parameter is a smi
+ if (NeedsEagerFrame()) {
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ }
+ __ SmiToPtrArrayOffset(r0, reg);
+ __ AddP(sp, sp, r0);
+ }
+
+ __ Ret();
+}
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = LoadDescriptor::SlotRegister();
+ DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(r2));
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ // No need to allocate this register.
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+ Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = ToRegister(instr->temp_slot());
+
+ AllowDeferredHandleDereference vector_structure_check;
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->global_object())
+ .is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ } else {
+ Label skip;
+ __ bne(&skip, Label::kNear);
+ __ mov(result, Operand(factory()->undefined_value()));
+ __ bind(&skip);
+ }
+ }
+}
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadP(scratch, target);
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ } else {
+ __ bne(&skip_assignment);
+ }
+ }
+
+ __ StoreP(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed);
+ }
+
+ __ bind(&skip_assignment);
+}
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ MemOperand operand = MemOperand(object, offset);
+ __ LoadRepresentation(result, operand, access.representation(), r0);
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ ld(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (!access.IsInobject()) {
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ object = result;
+ }
+
+ Representation representation = access.representation();
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit Smi optimization
+ if (representation.IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ offset = SmiWordOffset(offset);
+ representation = Representation::Integer32();
+ }
+#endif
+
+ __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
+ r0);
+}
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ // Name is always in r4.
+ __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+ EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register scratch = scratch0();
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Get the prototype or initial map from the function.
+ __ LoadP(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ __ bne(&done, Label::kNear);
+
+ // Get the prototype from the initial map.
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+ // All done.
+ __ bind(&done);
+}
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
+ int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ LoadP(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SubP(result, index, Operand(const_length + 1));
+ __ LoadComplementRR(result, result);
+ __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ SubP(result, length, Operand(loc));
+ __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ } else {
+ __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ }
+ } else {
+ Register length = ToRegister(instr->length());
+ Register index = ToRegister(instr->index());
+ __ SubP(result, length, index);
+ __ AddP(result, result, Operand(1));
+ __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
+ __ LoadP(result, MemOperand(arguments, result));
+ }
+}
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset();
+ bool use_scratch = false;
+
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ base_offset += constant_key << element_size_shift;
+ if (!is_int20(base_offset)) {
+ __ mov(scratch0(), Operand(base_offset));
+ base_offset = 0;
+ use_scratch = true;
+ }
+ } else {
+ __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
+ use_scratch = true;
+ }
+ if (elements_kind == FLOAT32_ELEMENTS) {
+ if (!use_scratch) {
+ __ ldeb(result, MemOperand(external_pointer, base_offset));
+ } else {
+ __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
+ }
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ if (!use_scratch) {
+ __ ld(result, MemOperand(external_pointer, base_offset));
+ } else {
+ __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
+ }
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand =
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+ constant_key, element_size_shift, base_offset);
+ switch (elements_kind) {
+ case INT8_ELEMENTS:
+ __ LoadB(result, mem_operand);
+ break;
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ LoadlB(result, mem_operand);
+ break;
+ case INT16_ELEMENTS:
+ __ LoadHalfWordP(result, mem_operand);
+ break;
+ case UINT16_ELEMENTS:
+ __ LoadLogicalHalfWordP(result, mem_operand);
+ break;
+ case INT32_ELEMENTS:
+ __ LoadW(result, mem_operand, r0);
+ break;
+ case UINT32_ELEMENTS:
+ __ LoadlW(result, mem_operand, r0);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ CmpLogical32(result, Operand(0x80000000));
+ DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ bool use_scratch = false;
+ intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
+ if (!key_is_constant) {
+ use_scratch = true;
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ }
+
+ // Memory references support up to 20-bits signed displacement in RXY form
+ // Include Register::kExponentOffset in check, so we are guaranteed not to
+ // overflow displacement later.
+ if (!is_int20(base_offset + Register::kExponentOffset)) {
+ use_scratch = true;
+ if (key_is_constant) {
+ __ mov(scratch, Operand(base_offset));
+ } else {
+ __ AddP(scratch, Operand(base_offset));
+ }
+ base_offset = 0;
+ }
+
+ if (!use_scratch) {
+ __ ld(result, MemOperand(elements, base_offset));
+ } else {
+ __ ld(result, MemOperand(scratch, elements, base_offset));
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (!use_scratch) {
+ __ LoadlW(r0,
+ MemOperand(elements, base_offset + Register::kExponentOffset));
+ } else {
+ __ LoadlW(r0, MemOperand(scratch, elements,
+ base_offset + Register::kExponentOffset));
+ }
+ __ Cmp32(r0, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ HLoadKeyed* hinstr = instr->hydrogen();
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ int offset = instr->base_offset();
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ } else {
+ Register key = ToRegister(instr->key());
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (hinstr->key()->representation().IsSmi()) {
+ __ SmiToPtrArrayOffset(scratch, key);
+ } else {
+ __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
+ }
+ }
+
+ bool requires_hole_check = hinstr->RequiresHoleCheck();
+ Representation representation = hinstr->representation();
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit Smi optimization
+ if (representation.IsInteger32() &&
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+ DCHECK(!requires_hole_check);
+ // Read int value directly from upper half of the smi.
+ offset = SmiWordOffset(offset);
+ }
+#endif
+
+ if (instr->key()->IsConstantOperand()) {
+ __ LoadRepresentation(result, MemOperand(elements, offset), representation,
+ r1);
+ } else {
+ __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
+ representation, r1);
+ }
+
+ // Check for the hole value.
+ if (requires_hole_check) {
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+ __ TestIfSmi(result);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ }
+ } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+ DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+ Label done;
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ CmpP(result, scratch);
+ __ bne(&done);
+ if (info()->IsStub()) {
+ // A stub can safely convert the hole to undefined only if the array
+ // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // it needs to bail out.
+ __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+ __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ }
+}
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_fixed_typed_array()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
+ bool key_is_constant, bool key_is_smi,
+ int constant_key,
+ int element_size_shift,
+ int base_offset) {
+ Register scratch = scratch0();
+
+ if (key_is_constant) {
+ int offset = (base_offset + (constant_key << element_size_shift));
+ if (!is_int20(offset)) {
+ __ mov(scratch, Operand(offset));
+ return MemOperand(base, scratch);
+ } else {
+ return MemOperand(base,
+ (constant_key << element_size_shift) + base_offset);
+ }
+ }
+
+ bool needs_shift =
+ (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
+
+ if (needs_shift) {
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ } else {
+ scratch = key;
+ }
+
+ if (!is_int20(base_offset)) {
+ __ AddP(scratch, Operand(base_offset));
+ base_offset = 0;
+ }
+ return MemOperand(scratch, base, base_offset);
+}
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+ }
+
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ __ lay(result, MemOperand(sp, -2 * kPointerSize));
+ } else if (instr->hydrogen()->arguments_adaptor()) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label done, adapted;
+ __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(
+ result,
+ MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ beq(&adapted, Label::kNear);
+ __ LoadRR(result, fp);
+ __ b(&done, Label::kNear);
+
+ __ bind(&adapted);
+ __ LoadRR(result, scratch);
+ __ bind(&done);
+ } else {
+ __ LoadRR(result, fp);
+ }
+}
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elem = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ CmpP(fp, elem);
+ __ mov(result, Operand(scope()->num_parameters()));
+ __ beq(&done, Label::kNear);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(result,
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(result);
+
+ // Argument length is in result register.
+ __ bind(&done);
+}
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, result_in_receiver;
+
+ if (!instr->hydrogen()->known_function()) {
+ // Do not transform the receiver to object for strict mode
+ // functions or builtins.
+ __ LoadP(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadlW(scratch, FieldMemOperand(
+ scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
+ __ bne(&result_in_receiver, Label::kNear);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ beq(&global_object, Label::kNear);
+ __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ beq(&global_object, Label::kNear);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ TestIfSmi(receiver);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+
+ __ b(&result_in_receiver, Label::kNear);
+ __ bind(&global_object);
+ __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ b(&result_ok, Label::kNear);
+ __ bind(&result_in_receiver);
+ __ LoadRR(result, receiver);
+ __ bind(&result_ok);
+ }
+}
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ DCHECK(receiver.is(r2)); // Used for parameter count.
+ DCHECK(function.is(r3)); // Required by InvokeFunction.
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ CmpLogicalP(length, Operand(kArgumentsLimit));
+ DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ LoadRR(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ AddP(elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ CmpP(length, Operand::Zero());
+ __ beq(&invoke, Label::kNear);
+ __ bind(&loop);
+ __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
+ __ LoadP(scratch, MemOperand(elements, r1));
+ __ push(scratch);
+ __ BranchOnCount(length, &loop);
+
+ __ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(r2);
+ // It is safe to use r5, r6 and r7 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r5 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r5, r6, r7);
+ }
+
+ DCHECK(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in receiver which is r2, as expected
+ // by InvokeFunction.
+ ParameterCount actual(receiver);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
+}
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ Register argument_reg = EmitLoadRegister(argument, ip);
+ __ push(argument_reg);
+ }
+}
+
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ DCHECK(result.is(cp));
+ }
+}
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ __ Move(scratch0(), instr->hydrogen()->pairs());
+ __ push(scratch0());
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
+ __ push(scratch0());
+ CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count, int arity,
+ bool is_tail_call, LInstruction* instr) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ Register function_reg = r3;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, Operand(arity));
+
+ bool is_self_call = function.is_identical_to(info()->closure());
+
+ // Invoke function.
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
+ } else {
+ __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ if (is_tail_call) {
+ __ JumpToJSEntry(ip);
+ } else {
+ __ CallJSEntry(ip);
+ }
+ }
+
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ }
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(arity);
+ ParameterCount expected(formal_parameter_count);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
+ }
+}
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ DCHECK(instr->context() != NULL);
+ DCHECK(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+ Label done;
+ Register exponent = scratch0();
+ scratch = no_reg;
+ __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it.
+ __ Cmp32(exponent, Operand::Zero());
+ // Move the input to the result if necessary.
+ __ Move(result, input);
+ __ bge(&done);
+
+ // Input is negative. Reverse its sign.
+ // Preserve the value of all registers.
+ {
+ PushSafepointRegistersScope scope(this);
+
+ // Registers were saved at the safepoint, so we can use
+ // many scratch registers.
+ Register tmp1 = input.is(r3) ? r2 : r3;
+ Register tmp2 = input.is(r4) ? r2 : r4;
+ Register tmp3 = input.is(r5) ? r2 : r5;
+ Register tmp4 = input.is(r6) ? r2 : r6;
+
+ // exponent: floating point exponent value.
+
+ Label allocated, slow;
+ __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+ __ b(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input, input);
+ __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+ __ bind(&allocated);
+ // exponent: floating point exponent value.
+ // tmp1: allocated heap number.
+
+ // Clear the sign bit.
+ __ nilf(exponent, Operand(~HeapNumber::kSignMask));
+ __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+ __ StoreToSafepointRegisterSlot(tmp1, result);
+ }
+
+ __ bind(&done);
+}
+
+void LCodeGen::EmitMathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ CmpP(input, Operand::Zero());
+ __ Move(result, input);
+ __ bge(&done, Label::kNear);
+ __ LoadComplementRR(result, result);
+ // Deoptimize on overflow.
+ DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ __ bind(&done);
+}
+
+#if V8_TARGET_ARCH_S390X
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ Cmp32(input, Operand::Zero());
+ __ Move(result, input);
+ __ bge(&done, Label::kNear);
+
+ // Deoptimize on overflow.
+ __ Cmp32(input, Operand(0x80000000));
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+
+ __ LoadComplementRR(result, result);
+ __ bind(&done);
+}
+#endif
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMathAbs* instr_;
+ };
+
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ lpdbr(result, input);
+#if V8_TARGET_ARCH_S390X
+ } else if (r.IsInteger32()) {
+ EmitInteger32MathAbs(instr);
+ } else if (r.IsSmi()) {
+#else
+ } else if (r.IsSmiOrInteger32()) {
+#endif
+ EmitMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->value());
+ // Smi check.
+ __ JumpIfNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitMathAbs(instr);
+ __ bind(deferred->exit());
+ }
+}
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register input_high = scratch0();
+ Register scratch = ip;
+ Label done, exact;
+
+ __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
+ &exact);
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+ __ bind(&exact);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Test for -0.
+ __ CmpP(result, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ Cmp32(input_high, Operand::Zero());
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ __ bind(&done);
+}
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+ DoubleRegister input_plus_dot_five = double_scratch1;
+ Register scratch1 = scratch0();
+ Register scratch2 = ip;
+ DoubleRegister dot_five = double_scratch0();
+ Label convert, done;
+
+ __ LoadDoubleLiteral(dot_five, 0.5, r0);
+ __ lpdbr(double_scratch1, input);
+ __ cdbr(double_scratch1, dot_five);
+ DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5].
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // [-0.5, -0] (negative) yields minus zero.
+ __ TestDoubleSign(input, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ Label return_zero;
+ __ cdbr(input, dot_five);
+ __ bne(&return_zero, Label::kNear);
+ __ LoadImmP(result, Operand(1)); // +0.5.
+ __ b(&done, Label::kNear);
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero.
+ __ bind(&return_zero);
+ __ LoadImmP(result, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ __ bind(&convert);
+ __ ldr(input_plus_dot_five, input);
+ __ adbr(input_plus_dot_five, dot_five);
+ // Reuse dot_five (double_scratch0) as we no longer need this value.
+ __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
+ double_scratch0(), &done, &done);
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ __ bind(&done);
+}
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DoubleRegister output_reg = ToDoubleRegister(instr->result());
+
+ // Round double to float
+ __ ledbr(output_reg, input_reg);
+ // Extend from float to double
+ __ ldebr(output_reg, output_reg);
+}
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ sqdbr(result, input);
+}
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister temp = double_scratch0();
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label skip, done;
+
+ __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
+ __ cdbr(input, temp);
+ __ bne(&skip, Label::kNear);
+ __ lcdbr(result, temp);
+ __ b(&done, Label::kNear);
+
+ // Add +0 to convert -0 to +0.
+ __ bind(&skip);
+ __ ldr(result, input);
+ __ lzdr(kDoubleRegZero);
+ __ adbr(result, kDoubleRegZero);
+ __ sqdbr(result, result);
+ __ bind(&done);
+}
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+ DCHECK(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d2));
+ DCHECK(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(tagged_exponent));
+ DCHECK(ToDoubleRegister(instr->left()).is(d1));
+ DCHECK(ToDoubleRegister(instr->result()).is(d3));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(tagged_exponent, &no_deopt);
+ __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+ __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ __ bind(&no_deopt);
+ MathPowStub stub(isolate(), MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(isolate(), MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ DCHECK(exponent_type.IsDouble());
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
+ double_scratch2, temp1, temp2, scratch0());
+}
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
+ 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done;
+ __ llgfr(result, input);
+ __ flogr(r0, result);
+ __ LoadRR(result, r0);
+ __ CmpP(r0, Operand::Zero());
+ __ beq(&done, Label::kNear);
+ __ SubP(result, Operand(32));
+ __ bind(&done);
+}
+
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
+ } else {
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ LoadRR(fp, scratch2);
+ __ LoadP(caller_args_count_reg,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
+
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->function()).is(r3));
+ DCHECK(instr->HasPointerMap());
+
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
+
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use r5, r6 and r7 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) r5 (new.target) will be initialized below.
+ PrepareForTailCall(actual, r5, r6, r7);
+ }
+
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(r3, no_reg, actual, flag, generator);
+ } else {
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
+ }
+}
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+ }
+ } else {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(ip);
+ }
+ generator.AfterCall();
+ }
+}
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->constructor()).is(r3));
+ DCHECK(ToRegister(instr->result()).is(r2));
+
+ __ mov(r2, Operand(instr->arity()));
+ if (instr->arity() == 1) {
+ // We only need the allocation site for the case we have a length argument.
+ // The case may bail out to the runtime, which will determine the correct
+ // elements kind with the site.
+ __ Move(r4, instr->hydrogen()->site());
+ } else {
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ }
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+ // We might need a change here
+ // look at the first argument
+ __ LoadP(r7, MemOperand(sp, 0));
+ __ CmpP(r7, Operand::Zero());
+ __ beq(&packed_case, Label::kNear);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
+ override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ b(&done, Label::kNear);
+ __ bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
+}
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lay(code_object,
+ MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
+}
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lay(result, MemOperand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lay(result, MemOperand(base, offset));
+ }
+}
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ HStoreNamedField* hinstr = instr->hydrogen();
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ Register scratch = scratch0();
+ HObjectAccess access = hinstr->access();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = MemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ return;
+ }
+
+ __ AssertNotSmi(object);
+
+#if V8_TARGET_ARCH_S390X
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+ IsInteger32(LConstantOperand::cast(instr->value())));
+#else
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+ IsSmi(LConstantOperand::cast(instr->value())));
+#endif
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DCHECK(!hinstr->has_transition());
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DCHECK(offset >= 0);
+ __ std(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ if (hinstr->has_transition()) {
+ Handle<Map> transition = hinstr->transition_map();
+ AddDeprecationDependency(transition);
+ __ mov(scratch, Operand(transition));
+ __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
+ if (hinstr->NeedsWriteBarrierForMap()) {
+ Register temp = ToRegister(instr->temp());
+ // Update the write barrier for the map field.
+ __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
+ kSaveFPRegs);
+ }
+ }
+
+ // Do the store.
+ Register record_dest = object;
+ Register record_value = no_reg;
+ Register record_scratch = scratch;
+#if V8_TARGET_ARCH_S390X
+ if (FLAG_unbox_double_fields && representation.IsDouble()) {
+ DCHECK(access.IsInobject());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ std(value, FieldMemOperand(object, offset));
+ if (hinstr->NeedsWriteBarrier()) {
+ record_value = ToRegister(instr->value());
+ }
+ } else {
+ if (representation.IsSmi() &&
+ hinstr->value()->representation().IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ // 64-bit Smi optimization
+ // Store int value directly to upper half of the smi.
+ offset = SmiWordOffset(offset);
+ representation = Representation::Integer32();
+ }
+#endif
+ if (access.IsInobject()) {
+ Register value = ToRegister(instr->value());
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_value = value;
+ } else {
+ Register value = ToRegister(instr->value());
+ __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ StoreRepresentation(value, operand, representation, r0);
+ record_dest = scratch;
+ record_value = value;
+ record_scratch = object;
+ }
+#if V8_TARGET_ARCH_S390X
+ }
+#endif
+
+ if (hinstr->NeedsWriteBarrier()) {
+ __ RecordWriteField(record_dest, offset, record_value, record_scratch,
+ GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+ hinstr->PointersToHereCheckForValue());
+ }
+}
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+ }
+
+ __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+ Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Representation representation = instr->hydrogen()->length()->representation();
+ DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+ DCHECK(representation.IsSmiOrInteger32());
+
+ Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
+ if (instr->length()->IsConstantOperand()) {
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+ Register index = ToRegister(instr->index());
+ if (representation.IsSmi()) {
+ __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
+ } else {
+ __ CmpLogical32(index, Operand(length));
+ }
+ cc = CommuteCondition(cc);
+ } else if (instr->index()->IsConstantOperand()) {
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+ Register length = ToRegister(instr->length());
+ if (representation.IsSmi()) {
+ __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
+ } else {
+ __ CmpLogical32(length, Operand(index));
+ }
+ } else {
+ Register index = ToRegister(instr->index());
+ Register length = ToRegister(instr->length());
+ if (representation.IsSmi()) {
+ __ CmpLogicalP(length, index);
+ } else {
+ __ CmpLogical32(length, index);
+ }
+ }
+ if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+ Label done;
+ __ b(NegateCondition(cc), &done, Label::kNear);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ }
+}
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset();
+
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+ Register address = scratch0();
+ DoubleRegister value(ToDoubleRegister(instr->value()));
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ base_offset += constant_key << element_size_shift;
+ if (!is_int20(base_offset)) {
+ __ mov(address, Operand(base_offset));
+ __ AddP(address, external_pointer);
+ } else {
+ __ AddP(address, external_pointer, Operand(base_offset));
+ }
+ base_offset = 0;
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
+ __ AddP(address, external_pointer);
+ }
+ if (elements_kind == FLOAT32_ELEMENTS) {
+ __ ledbr(double_scratch0(), value);
+ __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
+ } else { // Storing doubles, not floats.
+ __ StoreDouble(value, MemOperand(address, base_offset));
+ }
+ } else {
+ Register value(ToRegister(instr->value()));
+ MemOperand mem_operand =
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+ constant_key, element_size_shift, base_offset);
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreByte(value, mem_operand, r0);
+ } else {
+ __ StoreByte(value, mem_operand);
+ }
+ break;
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreHalfWord(value, mem_operand, r0);
+ } else {
+ __ StoreHalfWord(value, mem_operand);
+ }
+ break;
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ if (key_is_constant) {
+ __ StoreW(value, mem_operand, r0);
+ } else {
+ __ StoreW(value, mem_operand);
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+ bool use_scratch = false;
+ intptr_t address_offset = base_offset;
+
+ if (key_is_constant) {
+ // Memory references support up to 20-bits signed displacement in RXY form
+ if (!is_int20((address_offset))) {
+ __ mov(scratch, Operand(address_offset));
+ address_offset = 0;
+ use_scratch = true;
+ }
+ } else {
+ use_scratch = true;
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ // Memory references support up to 20-bits signed displacement in RXY form
+ if (!is_int20((address_offset))) {
+ __ AddP(scratch, Operand(address_offset));
+ address_offset = 0;
+ }
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Turn potential sNaN value into qNaN.
+ __ CanonicalizeNaN(double_scratch, value);
+ DCHECK(address_offset >= 0);
+ if (use_scratch)
+ __ std(double_scratch, MemOperand(scratch, elements, address_offset));
+ else
+ __ std(double_scratch, MemOperand(elements, address_offset));
+ } else {
+ if (use_scratch)
+ __ std(value, MemOperand(scratch, elements, address_offset));
+ else
+ __ std(value, MemOperand(elements, address_offset));
+ }
+}
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ HStoreKeyed* hinstr = instr->hydrogen();
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+ int offset = instr->base_offset();
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset += ToInteger32(const_operand) * kPointerSize;
+ } else {
+ // Even though the HLoadKeyed instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (hinstr->key()->representation().IsSmi()) {
+ __ SmiToPtrArrayOffset(scratch, key);
+ } else {
+ if (instr->hydrogen()->IsDehoisted() ||
+ !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+#if V8_TARGET_ARCH_S390X
+ // If array access is dehoisted, the key, being an int32, can contain
+ // a negative value, as needs to be sign-extended to 64-bit for
+ // memory access.
+ __ lgfr(key, key);
+#endif
+ __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
+ } else {
+ // Small optimization to reduce pathlength. After Bounds Check,
+ // the key is guaranteed to be non-negative. Leverage RISBG,
+ // which also performs zero-extension.
+ __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
+ Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
+ true);
+ }
+ }
+ }
+
+ Representation representation = hinstr->value()->representation();
+
+#if V8_TARGET_ARCH_S390X
+ // 64-bit Smi optimization
+ if (representation.IsInteger32()) {
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+ // Store int value directly to upper half of the smi.
+ offset = SmiWordOffset(offset);
+ }
+#endif
+
+ if (instr->key()->IsConstantOperand()) {
+ __ StoreRepresentation(value, MemOperand(elements, offset), representation,
+ scratch);
+ } else {
+ __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
+ representation, r0);
+ }
+
+ if (hinstr->NeedsWriteBarrier()) {
+ SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+ ? OMIT_SMI_CHECK
+ : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ if (instr->key()->IsConstantOperand()) {
+ __ lay(key, MemOperand(elements, offset));
+ } else {
+ __ lay(key, MemOperand(scratch, elements, offset));
+ }
+ __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+ EMIT_REMEMBERED_SET, check_needed,
+ hinstr->PointersToHereCheckForValue());
+ }
+}
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ // By cases: external, fast double
+ if (instr->is_fixed_typed_array()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+ DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+ DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+ if (instr->hydrogen()->HasVectorAndSlot()) {
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+ }
+
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state())
+ .code();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+ class DeferredMaybeGrowElements final : public LDeferredCode {
+ public:
+ DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LMaybeGrowElements* instr_;
+ };
+
+ Register result = r2;
+ DeferredMaybeGrowElements* deferred =
+ new (zone()) DeferredMaybeGrowElements(this, instr);
+ LOperand* key = instr->key();
+ LOperand* current_capacity = instr->current_capacity();
+
+ DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+ DCHECK(key->IsConstantOperand() || key->IsRegister());
+ DCHECK(current_capacity->IsConstantOperand() ||
+ current_capacity->IsRegister());
+
+ if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ if (constant_key >= constant_capacity) {
+ // Deferred case.
+ __ b(deferred->entry());
+ }
+ } else if (key->IsConstantOperand()) {
+ int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+ __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
+ __ ble(deferred->entry());
+ } else if (current_capacity->IsConstantOperand()) {
+ int32_t constant_capacity =
+ ToInteger32(LConstantOperand::cast(current_capacity));
+ __ Cmp32(ToRegister(key), Operand(constant_capacity));
+ __ bge(deferred->entry());
+ } else {
+ __ Cmp32(ToRegister(key), ToRegister(current_capacity));
+ __ bge(deferred->entry());
+ }
+
+ if (instr->elements()->IsRegister()) {
+ __ Move(result, ToRegister(instr->elements()));
+ } else {
+ __ LoadP(result, ToMemOperand(instr->elements()));
+ }
+
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = r2;
+ __ LoadImmP(result, Operand::Zero());
+
+ // We have to call a stub.
+ {
+ PushSafepointRegistersScope scope(this);
+ if (instr->object()->IsRegister()) {
+ __ Move(result, ToRegister(instr->object()));
+ } else {
+ __ LoadP(result, ToMemOperand(instr->object()));
+ }
+
+ LOperand* key = instr->key();
+ if (key->IsConstantOperand()) {
+ __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
+ } else {
+ __ SmiTag(r5, ToRegister(key));
+ }
+
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+ instr->hydrogen()->kind());
+ __ CallStub(&stub);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ __ StoreToSafepointRegisterSlot(result, result);
+ }
+
+ // Deopt on smi, which means the elements array changed to dictionary mode.
+ __ TestIfSmi(result);
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+}
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object_reg = ToRegister(instr->object());
+ Register scratch = scratch0();
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ CmpP(scratch, Operand(from_map));
+ __ bne(&not_applicable);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
+ __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteForMap(object_reg, new_map_reg, scratch,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(object_reg.is(r2));
+ PushSafepointRegistersScope scope(this);
+ __ Move(r3, to_map);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kLazyDeopt);
+ }
+ __ bind(&not_applicable);
+}
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ __ bind(&no_memento_found);
+}
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->left()).is(r3));
+ DCHECK(ToRegister(instr->right()).is(r2));
+ StringAddStub stub(isolate(), instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt final : public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new (zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(
+ masm(), ToRegister(instr->string()), ToRegister(instr->index()),
+ ToRegister(instr->result()), deferred->entry());
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadImmP(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+ instr->context());
+ __ AssertSmi(r2);
+ __ SmiUntag(r2);
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode final : public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new (zone()) DeferredStringCharFromCode(this, instr);
+
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ DCHECK(!char_code.is(result));
+
+ __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
+ __ bgt(deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
+ __ AddP(result, r0);
+ __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ beq(deferred->entry());
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadImmP(result, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ SmiTag(char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ DCHECK(output->IsDoubleRegister());
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ LoadP(scratch, ToMemOperand(input));
+ __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
+ } else {
+ __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
+ }
+}
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
+}
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ class DeferredNumberTagI final : public LDeferredCode {
+ public:
+ DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), SIGNED_INT32);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LNumberTagI* instr_;
+ };
+
+ Register src = ToRegister(instr->value());
+ Register dst = ToRegister(instr->result());
+
+ DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(dst, src);
+#else
+ // Add src to itself to defect SMI overflow.
+ __ Add32(dst, src, src);
+ __ b(overflow, deferred->entry());
+#endif
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU final : public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override {
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
+ __ CmpLogicalP(input, Operand(Smi::kMaxValue));
+ __ bgt(deferred->entry());
+ __ SmiTag(result, input);
+ __ bind(deferred->exit());
+}
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+ LOperand* temp1, LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
+ Register src = ToRegister(value);
+ Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
+ DoubleRegister dbl_scratch = double_scratch0();
+
+ if (signedness == SIGNED_INT32) {
+ // There was overflow, so bits 30 and 31 of the original integer
+ // disagree. Try to allocate a heap number in new space and store
+ // the value in there. If that fails, call the runtime system.
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ xilf(src, Operand(HeapNumber::kSignMask));
+ }
+ __ ConvertIntToDouble(src, dbl_scratch);
+ } else {
+ __ ConvertUnsignedIntToDouble(src, dbl_scratch);
+ }
+
+ if (FLAG_inline_new) {
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
+ __ b(&done);
+ }
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ LoadImmP(dst, Operand::Zero());
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, dst);
+ }
+
+ // Done. Put the value in dbl_scratch into the value of the allocated heap
+ // number.
+ __ bind(&done);
+ __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD final : public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = scratch0();
+ Register reg = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ } else {
+ __ b(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ LoadImmP(reg, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, reg);
+}
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ TestUnsignedSmiCandidate(input, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+ }
+#if !V8_TARGET_ARCH_S390X
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, r0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ } else {
+#endif
+ __ SmiTag(output, input);
+#if !V8_TARGET_ARCH_S390X
+ }
+#endif
+}
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ if (instr->needs_check()) {
+ __ tmll(input, Operand(kHeapObjectTag));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ __ SmiUntag(result, input);
+ } else {
+ __ SmiUntag(result, input);
+ }
+}
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+ DoubleRegister result_reg,
+ NumberUntagDMode mode) {
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+ bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+ Register scratch = scratch0();
+ DCHECK(!result_reg.is(double_scratch0()));
+
+ Label convert, load_smi, done;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+ // Heap number map check.
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
+
+ if (can_convert_undefined_to_nan) {
+ __ bne(&convert, Label::kNear);
+ } else {
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ }
+ // load heap number
+ __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ TestDoubleIsMinusZero(result_reg, scratch, ip);
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ }
+ __ b(&done, Label::kNear);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ b(&done, Label::kNear);
+ }
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+ // Smi to double register conversion
+ __ bind(&load_smi);
+ // scratch: untagged value of input_reg
+ __ ConvertIntToDouble(scratch, result_reg);
+ __ bind(&done);
+}
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Register input_reg = ToRegister(instr->value());
+ Register scratch1 = scratch0();
+ Register scratch2 = ToRegister(instr->temp());
+ DoubleRegister double_scratch = double_scratch0();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+ DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+ DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+ Label done;
+
+ // Heap number map check.
+ __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ Label no_heap_number, check_bools, check_false;
+ __ bne(&no_heap_number, Label::kNear);
+ __ LoadRR(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ b(&done, Label::kNear);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ __ bne(&check_bools);
+ __ LoadImmP(input_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ __ bind(&check_bools);
+ __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+ __ bne(&check_false, Label::kNear);
+ __ LoadImmP(input_reg, Operand(1));
+ __ b(&done, Label::kNear);
+
+ __ bind(&check_false);
+ __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ __ LoadImmP(input_reg, Operand::Zero());
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+ __ ld(double_scratch2,
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // preserve heap number pointer in scratch2 for minus zero check below
+ __ LoadRR(scratch2, input_reg);
+ }
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
+ double_scratch);
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ CmpP(input_reg, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ TestHeapNumberSign(scratch2, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ }
+ }
+ __ bind(&done);
+}
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI final : public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LTaggedToI* instr_;
+ };
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ DCHECK(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
+
+ // Branch to deferred code if the input is a HeapObject.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
+}
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ LOperand* result = instr->result();
+ DCHECK(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ DoubleRegister result_reg = ToDoubleRegister(result);
+
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI
+ : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+ DoubleRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+ double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ CmpP(result_reg, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ TestDoubleSign(double_input, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&done);
+ }
+ }
+}
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+ Register result_reg = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ DoubleRegister double_input = ToDoubleRegister(instr->value());
+ DoubleRegister double_scratch = double_scratch0();
+
+ if (instr->truncating()) {
+ __ TruncateDoubleToI(result_reg, double_input);
+ } else {
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+ double_scratch);
+ // Deoptimize if the input wasn't a int32 (inside a double).
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label done;
+ __ CmpP(result_reg, Operand::Zero());
+ __ bne(&done, Label::kNear);
+ __ TestDoubleSign(double_input, scratch1);
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ __ bind(&done);
+ }
+ }
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(result_reg);
+#else
+ __ SmiTagCheckOverflow(result_reg, r0);
+ DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+}
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ LOperand* input = instr->value();
+ __ TestIfSmi(ToRegister(input));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+}
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+ LOperand* input = instr->value();
+ __ TestIfSmi(ToRegister(input));
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ }
+}
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+ LCheckArrayBufferNotNeutered* instr) {
+ Register view = ToRegister(instr->view());
+ Register scratch = scratch0();
+
+ __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+ __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+ __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
+ DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
+}
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = scratch0();
+
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
+ Operand(first));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ } else {
+ DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
+ Operand(last));
+ DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+ }
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (base::bits::IsPowerOfTwo32(mask)) {
+ DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+ __ AndP(scratch, Operand(mask));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+ } else {
+ __ AndP(scratch, Operand(mask));
+ __ CmpP(scratch, Operand(tag));
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ }
+ }
+}
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Register reg = ToRegister(instr->value());
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ mov(ip, Operand(cell));
+ __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
+ } else {
+ __ CmpP(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+}
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ LoadImmP(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(instr->pointer_map(), 1,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, temp);
+ }
+ __ TestIfSmi(temp);
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+}
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps final : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ void Generate() override {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->IsStabilityCheck()) {
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ for (int i = 0; i < maps->size(); ++i) {
+ AddStabilityDependency(maps->at(i).handle());
+ }
+ return;
+ }
+
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ Register reg = ToRegister(input);
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+ Label success;
+ for (int i = 0; i < maps->size() - 1; i++) {
+ Handle<Map> map = maps->at(i).handle();
+ __ CompareMap(reg, map, &success);
+ __ beq(&success);
+ }
+
+ Handle<Map> map = maps->at(maps->size() - 1).handle();
+ __ CompareMap(reg, map, &success);
+ if (instr->hydrogen()->HasMigrationTarget()) {
+ __ bne(deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ }
+
+ __ bind(&success);
+}
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register unclamped_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ __ ClampUint8(result_reg, unclamped_reg);
+}
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register scratch = scratch0();
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ Label is_smi, done, heap_number;
+
+ // Both smi and heap number cases are handled.
+ __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+ // Check for heap number
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ CmpP(scratch, Operand(factory()->heap_number_map()));
+ __ beq(&heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ CmpP(input_reg, Operand(factory()->undefined_value()));
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ __ LoadImmP(result_reg, Operand::Zero());
+ __ b(&done, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+ __ b(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ __ ClampUint8(result_reg, result_reg);
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ __ lgdr(result_reg, value_reg);
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ srlg(result_reg, result_reg, Operand(32));
+ } else {
+ __ llgfr(result_reg, result_reg);
+ }
+}
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ // Combine hi_reg:lo_reg into a single 64-bit register.
+ __ sllg(scratch, hi_reg, Operand(32));
+ __ lr(scratch, lo_reg);
+
+ // Bitwise convert from GPR to FPR
+ __ ldgr(result_reg, scratch);
+}
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate final : public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ LoadIntLiteral(scratch, size);
+ } else {
+ scratch = ToRegister(instr->size());
+ }
+ __ lay(scratch, MemOperand(scratch, -kPointerSize));
+ Label loop;
+ __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ bind(&loop);
+ __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
+#if V8_TARGET_ARCH_S390X
+ __ lay(scratch, MemOperand(scratch, -kPointerSize));
+#else
+ // TODO(joransiu): Improve the following sequence.
+ // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
+ // incorrect result with the signed compare
+ __ AddP(scratch, Operand(-kPointerSize));
+#endif
+ __ CmpP(scratch, Operand::Zero());
+ __ bge(&loop);
+ }
+}
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ DCHECK(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+#if !V8_TARGET_ARCH_S390X
+ if (size >= 0 && size <= Smi::kMaxValue) {
+#endif
+ __ Push(Smi::FromInt(size));
+#if !V8_TARGET_ARCH_S390X
+ } else {
+ // We should never get here at runtime => abort
+ __ stop("invalid allocation size");
+ return;
+ }
+#endif
+ }
+
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ DCHECK(ToRegister(instr->value()).is(r5));
+ DCHECK(ToRegister(instr->result()).is(r2));
+ Label end, do_call;
+ Register value_register = ToRegister(instr->value());
+ __ JumpIfNotSmi(value_register, &do_call);
+ __ mov(r2, Operand(isolate()->factory()->number_string()));
+ __ b(&end);
+ __ bind(&do_call);
+ TypeofStub stub(isolate());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&end);
+}
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+
+ Condition final_branch_condition =
+ EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
+ instr->type_literal());
+ if (final_branch_condition != kNoCondition) {
+ EmitBranch(instr, final_branch_condition);
+ }
+}
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
+ Register input, Handle<String> type_name) {
+ Condition final_branch_condition = kNoCondition;
+ Register scratch = scratch0();
+ Factory* factory = isolate()->factory();
+ if (String::Equals(type_name, factory->number_string())) {
+ __ JumpIfSmi(input, true_label);
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->string_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+ final_branch_condition = lt;
+
+ } else if (String::Equals(type_name, factory->symbol_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->boolean_string())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ beq(true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->undefined_string())) {
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ beq(false_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ __ CmpP(r0, Operand::Zero());
+ final_branch_condition = ne;
+
+ } else if (String::Equals(type_name, factory->function_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ AndP(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ CmpP(scratch, Operand(1 << Map::kIsCallable));
+ final_branch_condition = eq;
+
+ } else if (String::Equals(type_name, factory->object_string())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ beq(true_label);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
+ __ blt(false_label);
+ // Check for callable or undetectable objects => false.
+ __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ AndP(r0, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ CmpP(r0, Operand::Zero());
+ final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
+ } else {
+ __ b(false_label);
+ }
+
+ return final_branch_condition;
+}
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % 2);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= 2;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+
+ DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck final : public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) {}
+ void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LStackCheck* instr_;
+ };
+
+ DCHECK(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&done, Label::kNear);
+ DCHECK(instr->context()->IsRegister());
+ DCHECK(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
+ instr);
+ __ bind(&done);
+ } else {
+ DCHECK(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new (zone()) DeferredStackCheck(this, instr);
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ blt(deferred_stack_check->entry());
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ DCHECK(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r2);
+ CallRuntime(Runtime::kForInEnumerate, instr);
+ __ bind(&use_cache);
+}
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+ __ EnumLength(result, map);
+ __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+ __ bne(&load_cache, Label::kNear);
+ __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ b(&done, Label::kNear);
+
+ __ bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ CmpP(result, Operand::Zero());
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+
+ __ bind(&done);
+}
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ CmpP(map, scratch0());
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+}
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result, Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object, index);
+ __ LoadImmP(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(instr->pointer_map(), 2,
+ Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r2, result);
+}
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble final : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
+ Register result, Register object, Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {}
+ void Generate() override {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ LInstruction* instr() override { return instr_; }
+
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ DeferredLoadMutableDouble* deferred;
+ deferred = new (zone())
+ DeferredLoadMutableDouble(this, instr, result, object, index);
+
+ Label out_of_object, done;
+
+ __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
+ __ bne(deferred->entry());
+ __ ShiftRightArithP(index, index, Operand(1));
+
+ __ CmpP(index, Operand::Zero());
+ __ blt(&out_of_object, Label::kNear);
+
+ __ SmiToPtrArrayOffset(r0, index);
+ __ AddP(scratch, object, r0);
+ __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+ __ b(&done, Label::kNear);
+
+ __ bind(&out_of_object);
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ SmiToPtrArrayOffset(r0, index);
+ __ SubP(scratch, result, r0);
+ __ LoadP(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
new file mode 100644
index 0000000000..6d364cbe11
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
@@ -0,0 +1,359 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+#include "src/crankshaft/s390/lithium-s390.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen : public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ jump_table_(4, info->zone()),
+ scope_(info->scope()),
+ deferred_(8, info->zone()),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LConstantOperand must be an Integer32 or Smi
+ void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ intptr_t ToRepresentation(LConstantOperand* op,
+ const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
+
+ bool IsInteger32(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+ LOperand* temp1, LOperand* temp2,
+ IntegerSignedness signedness);
+
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
+ Register object, Register index);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
+
+ MemOperand PrepareKeyedOperand(Register key, Register base,
+ bool key_is_constant, bool key_is_tagged,
+ int constant_key, int element_size_shift,
+ int base_offset);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ LanguageMode language_mode() const { return info()->language_mode(); }
+
+ Scope* scope() const { return scope_; }
+
+ Register scratch0() { return kLithiumScratch; }
+ DoubleRegister double_scratch0() { return kScratchDoubleReg; }
+
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true, Label* if_false,
+ Handle<String> class_name, Register input,
+ Register temporary, Register temporary2);
+
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) override;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+ LInstruction* instr, SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function, int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+ LInstruction* instr, LOperand* context);
+
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in r4.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count, int arity,
+ bool is_tail_call, LInstruction* instr);
+
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason,
+ Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
+
+ void AddToTranslation(LEnvironment* environment, Translation* translation,
+ LOperand* op, bool is_tagged, bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ Register ToRegister(int index) const;
+ DoubleRegister ToDoubleRegister(int index) const;
+
+ MemOperand BuildSeqStringOperand(Register string, LOperand* index,
+ String::Encoding encoding);
+
+ void EmitMathAbs(LMathAbs* instr);
+#if V8_TARGET_ARCH_S390X
+ void EmitInteger32MathAbs(LMathAbs* instr);
+#endif
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
+ Safepoint::DeoptMode mode);
+
+ void RecordAndWritePosition(int position) override;
+
+ static Condition TokenToCondition(Token::Value op);
+ void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
+ template <class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition);
+ template <class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition);
+ void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+ DoubleRegister result, NumberUntagDMode mode);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
+ Handle<String> type_name);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
+ int* offset, AllocationSiteMode mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) override;
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+ template <class T>
+ void EmitVectorLoadICRegisters(T* instr);
+ template <class T>
+ void EmitVectorStoreICRegisters(T* instr);
+
+ ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+ Scope* const scope_;
+ ZoneList<LDeferredCode*> deferred_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ class PushSafepointRegistersScope final BASE_EMBEDDED {
+ public:
+ explicit PushSafepointRegistersScope(LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ }
+
+ ~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+class LDeferredCode : public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() {}
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
diff --git a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc
new file mode 100644
index 0000000000..cffcede226
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc
@@ -0,0 +1,280 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
+
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = {1};
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner),
+ moves_(32, owner->zone()),
+ root_index_(0),
+ in_cycle_(false),
+ saved_destination_(NULL) {}
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ DCHECK(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ DCHECK(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ DCHECK(!moves_[index].IsPending());
+ DCHECK(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ DCHECK(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ DCHECK(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ LoadRR(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ ldr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ LoadDouble(kScratchDoubleReg, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+void LGapResolver::RestoreValue() {
+ DCHECK(in_cycle_);
+ DCHECK(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ LoadRR(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ ldr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ StoreDouble(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ LoadRR(cgen_->ToRegister(destination), source_register);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ __ StoreP(source_register, cgen_->ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(cgen_->ToRegister(destination), source_operand);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ __ LoadP(ip, source_operand);
+ __ StoreP(ip, destination_operand);
+ } else {
+ __ LoadP(kSavedValueRegister, source_operand);
+ __ StoreP(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32(constant_source)) {
+ cgen_->EmitLoadIntegerConstant(constant_source, dst);
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ double v = cgen_->ToDouble(constant_source);
+ __ LoadDoubleLiteral(result, v, ip);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
+ if (cgen_->IsInteger32(constant_source)) {
+ cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
+ } else {
+ __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+ }
+ __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldr(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ StoreDouble(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ LoadDouble(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+// kSavedDoubleValueRegister was used to break the cycle,
+// but kSavedValueRegister is free.
+#if V8_TARGET_ARCH_S390X
+ __ lg(kSavedValueRegister, source_operand);
+ __ stg(kSavedValueRegister, destination_operand);
+#else
+ MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ LoadlW(kSavedValueRegister, source_operand);
+ __ StoreW(kSavedValueRegister, destination_operand);
+ __ LoadlW(kSavedValueRegister, source_high_operand);
+ __ StoreW(kSavedValueRegister, destination_high_operand);
+#endif
+ } else {
+ __ LoadDouble(kScratchDoubleReg, source_operand);
+ __ StoreDouble(kScratchDoubleReg, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+#undef __
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h
new file mode 100644
index 0000000000..087224c861
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
new file mode 100644
index 0000000000..a18f877187
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.cc
@@ -0,0 +1,2290 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/s390/lithium-s390.h"
+
+#include <sstream>
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/s390/lithium-codegen-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD:
+ return "add-d";
+ case Token::SUB:
+ return "sub-d";
+ case Token::MUL:
+ return "mul-d";
+ case Token::DIV:
+ return "div-d";
+ case Token::MOD:
+ return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD:
+ return "add-t";
+ case Token::SUB:
+ return "sub-t";
+ case Token::MUL:
+ return "mul-t";
+ case Token::MOD:
+ return "mod-t";
+ case Token::DIV:
+ return "div-t";
+ case Token::BIT_AND:
+ return "bit-and-t";
+ case Token::BIT_OR:
+ return "bit-or-t";
+ case Token::BIT_XOR:
+ return "bit-xor-t";
+ case Token::ROR:
+ return "ror-t";
+ case Token::SHL:
+ return "shl-t";
+ case Token::SAR:
+ return "sar-t";
+ case Token::SHR:
+ return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
+ true_block_id(), false_block_id());
+}
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(), true_block_id(),
+ false_block_id());
+}
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ std::ostringstream os;
+ os << hydrogen()->access() << " <- ";
+ stream->Add(os.str().c_str());
+ value()->PrintTo(stream);
+}
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", base_offset());
+ } else {
+ stream->Add("]");
+ }
+}
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", base_offset());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (value() == NULL) {
+ DCHECK(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ value()->PrintTo(stream);
+ }
+}
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+ // Skip a slot if for a double-width slot.
+ if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+ return current_frame_slots_++;
+}
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ DCHECK(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+LPlatformChunk* LChunkBuilder::Build() {
+ DCHECK(is_unused());
+ chunk_ = new (zone()) LPlatformChunk(info(), graph());
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* next = NULL;
+ if (i < blocks->length() - 1) next = blocks->at(i + 1);
+ DoBasicBlock(blocks->at(i), next);
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value,
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
+}
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new (zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
+}
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
+}
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ // We can't really figure out if the environment is needed or not.
+ instr->environment()->set_has_been_used();
+ }
+
+ return instr;
+}
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ DCHECK(!instr->HasPointerMap());
+ instr->set_pointer_map(new (zone()) LPointerMap(zone()));
+ return instr;
+}
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+ LUnallocated* operand =
+ new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ DCHECK(operand->HasFixedPolicy());
+ return operand;
+}
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new (zone()) LLabel(instr->block());
+}
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new (zone()) LDeoptimize);
+}
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = UseFixedDouble(instr->right(), d2);
+ LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
+}
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ DCHECK(left->representation().IsTagged());
+ DCHECK(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, r3);
+ LOperand* right_operand = UseFixed(right, r2);
+ LArithmeticT* result =
+ new (zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+ DCHECK(is_building());
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ DCHECK(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ DCHECK(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ DCHECK(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ DCHECK(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new (zone()) LDummy());
+ } else {
+ DCHECK(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new (zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ HBasicBlock* successor;
+ if (current->IsControlInstruction() &&
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+ successor != NULL) {
+ instr = new (zone()) LGoto(successor);
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+ }
+
+ argument_count_ += current->argument_delta();
+ DCHECK(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ AddInstruction(instr, current);
+ }
+
+ current_instruction_ = old_current;
+}
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+ HInstruction* hydrogen_val) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, The register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ DCHECK(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
+}
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new (zone()) LGoto(instr->FirstSuccessor());
+}
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new (zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
+ }
+ return branch;
+}
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new (zone()) LDebugBreak();
+}
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LCmpMapAndBranch(value, temp);
+}
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegister(instr->value());
+ return DefineAsRegister(new (zone()) LArgumentsLength(value));
+}
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ info()->MarkAsRequiresFrame();
+ return DefineAsRegister(new (zone()) LArgumentsElements);
+}
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), r3);
+ LOperand* receiver = UseFixed(instr->receiver(), r2);
+ LOperand* length = UseFixed(instr->length(), r4);
+ LOperand* elements = UseFixed(instr->elements(), r5);
+ LApplyArguments* result =
+ new (zone()) LApplyArguments(function, receiver, length, elements);
+ return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+ int argc = instr->OperandCount();
+ for (int i = 0; i < argc; ++i) {
+ LOperand* argument = Use(instr->argument(i));
+ AddInstruction(new (zone()) LPushArgument(argument), instr);
+ }
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new (zone()) LStoreCodeEntry(function, code_object);
+}
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(new (zone())
+ LInnerAllocatedObject(base_object, offset));
+}
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL
+ : DefineAsRegister(new (zone()) LThisFunction);
+}
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new (zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new (zone()) LContext);
+}
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
+}
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
+ CallInterfaceDescriptor descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ // Target
+ ops.Add(target, zone());
+ // Context
+ LOperand* op = UseFixed(instr->OperandAt(1), cp);
+ ops.Add(op, zone());
+ // Other register parameters
+ for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+ i < instr->OperandCount(); i++) {
+ op =
+ UseFixed(instr->OperandAt(i),
+ descriptor.GetRegisterParameter(
+ i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result =
+ new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), r3);
+ LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
+ return MarkAsCall(DefineFixed(result, r2), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathFloor:
+ return DoMathFloor(instr);
+ case kMathRound:
+ return DoMathRound(instr);
+ case kMathFround:
+ return DoMathFround(instr);
+ case kMathAbs:
+ return DoMathAbs(instr);
+ case kMathLog:
+ return DoMathLog(instr);
+ case kMathExp:
+ return DoMathExp(instr);
+ case kMathSqrt:
+ return DoMathSqrt(instr);
+ case kMathPowHalf:
+ return DoMathPowHalf(instr);
+ case kMathClz32:
+ return DoMathClz32(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFloor* result = new (zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempDoubleRegister();
+ LMathRound* result = new (zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFround* result = new (zone()) LMathFround(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LMathAbs(context, input));
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d1);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new (zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = TempDoubleRegister();
+ LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new (zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+ return DefineAsRegister(result);
+}
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* constructor = UseFixed(instr->constructor(), r3);
+ LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ return DefineAsRegister(new (zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
+ ? NULL
+ : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ (instr->CheckFlag(HValue::kCanOverflow) &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
+ if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LModI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ int32_t constant_value = 0;
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
+ LMulI* mul = new (zone()) LMulI(left_op, right_op);
+ if (right_op->IsConstantOperand()
+ ? ((can_overflow && constant_value == -1) ||
+ (bailout_on_minus_zero && constant_value <= 0))
+ : (can_overflow || bailout_on_minus_zero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineAsRegister(mul);
+
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+ if (instr->left()->IsConstant() &&
+ !instr->CheckFlag(HValue::kCanOverflow)) {
+ // If lhs is constant, do reverse subtraction instead.
+ return DoRSub(instr);
+ }
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new (zone()) LSubI(left, right);
+ LInstruction* result = DefineAsRegister(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+ DCHECK(instr->representation().IsSmiOrInteger32());
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+
+ // Note: The lhs of the subtraction becomes the rhs of the
+ // reverse-subtraction.
+ LOperand* left = UseRegisterAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->left());
+ LRSubI* rsb = new (zone()) LRSubI(left, right);
+ LInstruction* result = DefineAsRegister(rsb);
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegister(mul->left());
+ LOperand* multiplicand_op = UseRegister(mul->right());
+ LOperand* addend_op = UseRegister(addend);
+ return DefineAsRegister(
+ new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
+}
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+ LOperand* minuend_op = UseRegister(minuend);
+ LOperand* multiplier_op = UseRegister(mul->left());
+ LOperand* multiplicand_op = UseRegister(mul->right());
+
+ return DefineAsRegister(
+ new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
+}
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+ LAddI* add = new (zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ DCHECK(instr->IsConsistentExternalRepresentation());
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new (zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(instr->representation()));
+ DCHECK(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ left = UseRegister(instr->left());
+ right = UseRegister(instr->right());
+ }
+ return DefineAsRegister(new (zone()) LMathMinMax(left, right));
+}
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ DCHECK(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ DCHECK(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d2)
+ : UseFixed(instr->right(), r4);
+ LPower* result = new (zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d3), instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r3);
+ LOperand* right = UseFixed(instr->right(), r2);
+ LCmpT* result = new (zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmiOrInteger32()) {
+ DCHECK(instr->left()->representation().Equals(r));
+ DCHECK(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new (zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ DCHECK(r.IsDouble());
+ DCHECK(instr->left()->representation().IsDouble());
+ DCHECK(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new (zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new (zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LCmpHoleAndBranch(value);
+}
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new (zone()) LIsStringAndBranch(value, temp);
+}
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ return new (zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ DCHECK(instr->left()->representation().IsTagged());
+ DCHECK(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r3);
+ LOperand* right = UseFixed(instr->right(), r2);
+ LStringCompareAndBranch* result =
+ new (zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new (zone()) LHasInstanceTypeAndBranch(value);
+}
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
+}
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ return new (zone())
+ LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ DCHECK(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+ return new (zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
+}
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new (zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ if (!FLAG_debug_code && instr->skip_check()) return NULL;
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = !index->IsConstantOperand()
+ ? UseRegisterOrConstantAtStart(instr->length())
+ : UseRegisterAtStart(instr->length());
+ LInstruction* result = new (zone()) LBoundsCheck(index, length);
+ if (!FLAG_debug_code || !instr->skip_check()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+ HValue* val = instr->value();
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(val);
+ return DefineSameAsFirst(new (zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LNumberUntagD(value));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi()) {
+ return DefineSameAsFirst(new (zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(
+ DefineSameAsFirst(new (zone()) LCheckSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new (zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempDoubleRegister();
+ LInstruction* result =
+ DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
+ if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+ return result;
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(Define(result, result_temp));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ return AssignEnvironment(
+ DefineAsRegister(new (zone()) LDoubleToSmi(value)));
+ } else {
+ DCHECK(to.IsInteger32());
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+ return result;
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ LOperand* value = UseRegisterAtStart(val);
+ return DefineAsRegister(new (zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ LOperand* value = UseRegisterAtStart(val);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(val);
+ LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ DCHECK(to.IsDouble());
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
+ } else {
+ return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
+ }
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new (zone()) LCheckNonSmi(value);
+ if (!instr->value()->type().IsHeapObject()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new (zone()) LCheckSmi(value));
+}
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+ HCheckArrayBufferNotNeutered* instr) {
+ LOperand* view = UseRegisterAtStart(instr->value());
+ LCheckArrayBufferNotNeutered* result =
+ new (zone()) LCheckArrayBufferNotNeutered(view);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new (zone()) LCheckInstanceType(value);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new (zone()) LCheckValue(value));
+}
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result =
+ AssignEnvironment(new (zone()) LCheckMaps(value, temp));
+ if (instr->HasMigrationTarget()) {
+ info()->MarkAsDeferredCalling();
+ result = AssignPointerMap(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new (zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new (zone()) LClampIToUint8(reg));
+ } else {
+ DCHECK(input_rep.IsSmiOrTagged());
+ LClampTToUint8* result =
+ new (zone()) LClampTToUint8(reg, TempDoubleRegister());
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ DCHECK(value->representation().IsDouble());
+ return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
+}
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
+}
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new (zone())
+ LReturn(UseFixed(instr->value(), r2), context, parameter_count);
+}
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new (zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new (zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new (zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new (zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new (zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object =
+ UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+ }
+ LLoadGlobalGeneric* result =
+ new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new (zone()) LLoadContextSlot(context));
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new (zone()) LStoreContextSlot(context, value);
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new (zone()) LLoadNamedField(obj));
+}
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+ }
+
+ LInstruction* result =
+ DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ return AssignEnvironment(DefineAsRegister(
+ new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new (zone()) LLoadRoot);
+}
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ DCHECK(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LInstruction* result = NULL;
+
+ if (!instr->is_fixed_typed_array()) {
+ LOperand* obj = NULL;
+ if (instr->representation().IsDouble()) {
+ obj = UseRegister(instr->elements());
+ } else {
+ obj = UseRegisterAtStart(instr->elements());
+ }
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+ } else {
+ DCHECK((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(elements_kind)));
+ LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+ }
+
+ bool needs_environment;
+ if (instr->is_fixed_typed_array()) {
+ // see LCodeGen::DoLoadKeyedExternalArray
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
+ !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ needs_environment =
+ instr->RequiresHoleCheck() ||
+ (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+ }
+
+ if (needs_environment) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object =
+ UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+ }
+
+ LInstruction* result = DefineFixed(
+ new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ if (!instr->is_fixed_typed_array()) {
+ DCHECK(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
+ }
+
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
+ }
+
+ DCHECK((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ DCHECK(instr->elements()->representation().IsExternal());
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+ DCHECK(instr->object()->representation().IsTagged());
+ DCHECK(instr->key()->representation().IsTagged());
+ DCHECK(instr->value()->representation().IsTagged());
+
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreKeyedGeneric* result =
+ new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* new_map_reg = TempRegister();
+ LTransitionElementsKind* result =
+ new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+ return result;
+ } else {
+ LOperand* object = UseFixed(instr->object(), r2);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new (zone()) LTransitionElementsKind(object, context, NULL);
+ return MarkAsCall(result, instr);
+ }
+}
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new (zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = Use(instr->object());
+ LOperand* elements = Use(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+ LMaybeGrowElements* result = new (zone())
+ LMaybeGrowElements(context, object, elements, key, current_capacity);
+ DefineFixed(result, r2);
+ return AssignPointerMap(AssignEnvironment(result));
+}
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ bool is_in_object = instr->access().IsInobject();
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ bool needs_write_barrier_for_map =
+ instr->has_transition() && instr->NeedsWriteBarrierForMap();
+
+ LOperand* obj;
+ if (needs_write_barrier) {
+ obj = is_in_object ? UseRegister(instr->object())
+ : UseTempRegister(instr->object());
+ } else {
+ obj = needs_write_barrier_for_map ? UseRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+ }
+
+ LOperand* val;
+ if (needs_write_barrier) {
+ val = UseTempRegister(instr->value());
+ } else if (instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
+
+ // We need a temporary register for write barrier of the map field.
+ LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+ return new (zone()) LStoreNamedField(obj, val, temp);
+}
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* obj =
+ UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+ LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (instr->HasVectorAndSlot()) {
+ slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+ vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+ }
+
+ LStoreNamedGeneric* result =
+ new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+ return MarkAsCall(result, instr);
+}
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), r3);
+ LOperand* right = UseFixed(instr->right(), r2);
+ return MarkAsCall(
+ DefineFixed(new (zone()) LStringAdd(context, left, right), r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseTempRegister(instr->string());
+ LOperand* index = UseTempRegister(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new (zone()) LStringCharCodeAt(context, string, index);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new (zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ DCHECK(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new (zone()) LOsrEntry);
+}
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new (zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ DCHECK(info()->IsStub());
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor.GetRegisterParameter(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Retry(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ spill_index += StandardFrameConstants::kFixedSlotCount;
+ }
+ return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
+}
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = UseRegister(instr->arguments());
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
+}
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(), r5);
+ LTypeof* result = new (zone()) LTypeof(context, value);
+ return MarkAsCall(DefineFixed(result, r2), instr);
+}
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new (zone()) LStackCheck(context), instr);
+ } else {
+ DCHECK(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new (zone()) LStackCheck(context)));
+ }
+}
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ outer->set_ast_id(instr->ReturnId());
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->BindContext(instr->closure_context());
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedFunction(instr->shared());
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new (zone()) LDrop(argument_count);
+ DCHECK(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->enumerable(), r2);
+ LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(
+ DefineAsRegister(new (zone()) LForInCacheArray(map)));
+}
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
+}
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
new file mode 100644
index 0000000000..b6a161411d
--- /dev/null
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.h
@@ -0,0 +1,2414 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_S390_LITHIUM_S390_H_
+#define V8_CRANKSHAFT_S390_LITHIUM_S390_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallWithDescriptor) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CheckArrayBufferNotNeutered) \
+ V(CheckInstanceType) \
+ V(CheckNonSmi) \
+ V(CheckMaps) \
+ V(CheckMapValue) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToI) \
+ V(DoubleToSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsStringAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadRoot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyed) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(MathAbs) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathFround) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(MaybeGrowElements) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulI) \
+ V(MultiplyAddD) \
+ V(MultiplySubD) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(Prologue) \
+ V(PushArgument) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreKeyed) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(RSubI) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ Opcode opcode() const final { return LInstruction::k##type; } \
+ void CompileToNative(LCodeGen* generator) final; \
+ const char* Mnemonic() const final { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ DCHECK(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {}
+
+ virtual ~LInstruction() {}
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+// Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+// Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ // Try deleting this instruction if possible.
+ virtual bool TryDelete() { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+ return IsCall();
+ }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+
+ private:
+ // Iterator support.
+ friend class InputIterator;
+
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ class IsCallBits : public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int bit_field_;
+};
+
+// R = number of result operands (0 or 1).
+template <int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ bool HasResult() const final { return R != 0 && result() != NULL; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const override { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template <int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ int InputCount() final { return I; }
+ LOperand* InputAt(int i) final { return inputs_[i]; }
+
+ int TempCount() final { return T; }
+ LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block) : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ bool IsGap() const override { return true; }
+ void PrintDataTo(StringStream* stream) override;
+ static LGap* cast(LInstruction* instr) {
+ DCHECK(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new (zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+class LInstructionGap final : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ void PrintDataTo(StringStream* stream) override;
+ bool IsControl() const override { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ LDummy() {}
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ bool IsControl() const override { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+class LLabel final : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+template <int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
+
+ bool IsControl() const final { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+class LModI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+class LDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = minuend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* minuend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const { return hydrogen()->representation().IsDouble(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
+ }
+
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+class LRSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LRSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+
+ uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Opcode opcode() const override { return LInstruction::kArithmeticD; }
+ void CompileToNative(LCodeGen* generator) override;
+ const char* Mnemonic() const override;
+
+ private:
+ Token::Value op_;
+};
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ Opcode opcode() const override { return LInstruction::kArithmeticT; }
+ void CompileToNative(LCodeGen* generator) override;
+ const char* Mnemonic() const override;
+
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+ private:
+ Token::Value op_;
+};
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ DCHECK(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+ void PrintDataTo(StringStream* stream) override;
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+ LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ temps_[0] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* temp_vector() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) {}
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+
+ void PrintDataTo(StringStream* stream) override;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ void PrintDataTo(StringStream* stream) override;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+ const ZoneList<LOperand*>& operands, Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount,
+ zone) {
+ DCHECK(descriptor.GetRegisterParameterCount() +
+ kImplicitRegisterParameterCount ==
+ operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ // The target and context are passed as implicit parameters that are not
+ // explicitly listed in the descriptor.
+ static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ CallInterfaceDescriptor descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ int InputCount() final { return inputs_.length(); }
+ LOperand* InputAt(int i) final { return inputs_[i]; }
+
+ int TempCount() final { return 0; }
+ LOperand* TempAt(int i) final { return NULL; }
+};
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+ LOperand* slot, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
+ inputs_[0] = object;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
+ }
+
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+ void PrintDataTo(StringStream* stream) override;
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+ LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+ LOperand* value, LOperand* slot, LOperand* vector) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ temps_[0] = slot;
+ temps_[1] = vector;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp_slot() { return temps_[0]; }
+ LOperand* temp_vector() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LTransitionElementsKind(LOperand* object, LOperand* context,
+ LOperand* new_map_temp) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = new_map_temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+ LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+ LOperand* key, LOperand* current_capacity) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = elements;
+ inputs_[3] = key;
+ inputs_[4] = current_capacity;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* elements() { return inputs_[2]; }
+ LOperand* key() { return inputs_[3]; }
+ LOperand* current_capacity() { return inputs_[4]; }
+
+ DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+ DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+class LCheckArrayBufferNotNeutered final
+ : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+ LOperand* view() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+ "check-array-buffer-not-neutered")
+ DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ void PrintDataTo(StringStream* stream) override;
+};
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
+};
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
+
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(info, graph),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ next_block_(NULL),
+ allocator_(allocator) {}
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+ LInstruction* DoRSub(HSub* instr);
+
+ static bool HasMagicNumberForDivisor(int32_t divisor);
+
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathFround(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HDiv* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr, HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ void VisitInstruction(HInstruction* current);
+ void AddInstruction(LInstruction* instr, HInstruction* current);
+
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
+
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ HBasicBlock* next_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CRANKSHAFT_S390_LITHIUM_S390_H_
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 31ff12537e..28dfe8a8dd 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -125,7 +125,7 @@ bool LCodeGen::GeneratePrologue() {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
@@ -306,34 +306,27 @@ bool LCodeGen::GenerateJumpTable() {
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
/* stack layout
- 4: return address <-- rsp
- 3: garbage
+ 3: return address <-- rsp
2: garbage
1: garbage
0: garbage
*/
- // Reserve space for context and stub marker.
- __ subp(rsp, Immediate(2 * kPointerSize));
- __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
- __ Push(kScratchRegister); // Save entry address for ret(0)
+ // Reserve space for stub marker.
+ __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize));
+ __ Push(MemOperand(
+ rsp, TypedFrameConstants::kFrameTypeSize)); // Copy return address.
+ __ Push(kScratchRegister);
/* stack layout
- 4: return address
- 3: garbage
+ 3: return address
2: garbage
1: return address
0: entry address <-- rsp
*/
- // Remember context pointer.
- __ movp(kScratchRegister,
- MemOperand(rbp, StandardFrameConstants::kContextOffset));
- // Save context pointer into the stack frame.
- __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
-
// Create a stack frame.
- __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
- __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
+ __ movp(MemOperand(rsp, 3 * kPointerSize), rbp);
+ __ leap(rbp, MemOperand(rsp, 3 * kPointerSize));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
@@ -342,8 +335,7 @@ bool LCodeGen::GenerateJumpTable() {
__ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
/* stack layout
- 4: old rbp
- 3: context pointer
+ 3: old rbp
2: stub marker
1: return address
0: entry address <-- rsp
@@ -379,9 +371,8 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ pushq(rbp); // Caller's frame pointer.
- __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ leap(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -2012,16 +2003,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// true -> true.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ j(equal, instr->TrueLabel(chunk_));
@@ -2029,13 +2021,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Cmp(reg, Smi::FromInt(0));
__ j(equal, instr->FalseLabel(chunk_));
@@ -2058,13 +2050,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2075,19 +2067,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2317,11 +2309,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(rdx));
DCHECK(ToRegister(instr->right()).is(rax));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ testp(rax, rax);
-
- EmitBranch(instr, TokenToCondition(instr->op(), false));
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, equal);
}
@@ -3011,11 +3002,11 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check for arguments adapter frame.
Label done, adapted;
__ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+ __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
@@ -3030,6 +3021,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
+ } else {
+ __ movp(result, rbp);
}
}
@@ -3141,13 +3134,24 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(rax);
+ // It is safe to use rbx, rcx and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) rbx (expected number of arguments) will be initialized below.
+ PrepareForTailCall(actual, rbx, rcx, r8);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3186,10 +3190,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3206,23 +3209,36 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ Set(rax, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function.
- if (function.is_identical_to(info()->closure())) {
- __ CallSelf();
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
- __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+ Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+ if (is_tail_call) {
+ __ Jump(target);
+ } else {
+ __ Call(target);
+ }
}
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ }
} else {
// We need to adapt arguments.
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
- generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator);
}
}
@@ -3264,39 +3280,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- // Change context.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Set(rax, instr->arity());
-
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- Handle<JSFunction> jsfun = Handle<JSFunction>::null();
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
- generator.BeforeCall(__ CallSize(target));
- __ Call(target);
- }
- generator.AfterCall();
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3304,8 +3287,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
- Register tmp = input_reg.is(rax) ? rcx : rax;
- Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+ uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
+ available_regs &= ~input_reg.bit();
+ if (instr->context()->IsRegister()) {
+ // Make sure that the context isn't overwritten in the AllocateHeapNumber
+ // macro below.
+ available_regs &= ~ToRegister(instr->context()).bit();
+ }
+
+ Register tmp =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+ available_regs &= ~tmp.bit();
+ Register tmp2 =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -3402,8 +3396,14 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
}
}
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ Roundsd(output_reg, input_reg, kRoundDown);
+}
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3461,8 +3461,23 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
}
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ XMMRegister xmm_scratch = double_scratch0();
+ XMMRegister output_reg = ToDoubleRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ CpuFeatureScope scope(masm(), SSE4_1);
+ Label done;
+ __ Roundsd(output_reg, input_reg, kRoundUp);
+ __ Move(xmm_scratch, -0.5);
+ __ Addsd(xmm_scratch, output_reg);
+ __ Ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &done, Label::kNear);
+ __ Move(xmm_scratch, 1.0);
+ __ Subsd(output_reg, xmm_scratch);
+ __ bind(&done);
+}
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3655,54 +3670,77 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Lzcntl(result, input);
}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
+ }
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
}
-}
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ movp(rbp, scratch2);
+ __ SmiToInteger32(
+ caller_args_count_reg,
+ Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
- int arity = instr->arity();
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count.
+ __ movp(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(rdx));
- DCHECK(vector_register.is(rbx));
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+ ReturnAddressState::kNotOnStack);
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
+ DCHECK(ToRegister(instr->context()).is(rsi));
+ DCHECK(ToRegister(instr->function()).is(rdi));
+ DCHECK(instr->HasPointerMap());
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
- __ Move(vector_register, vector);
- __ Move(slot_register, Smi::FromInt(index));
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use rbx, rcx and r8 as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) rbx (expected number of arguments) will be initialized below.
+ PrepareForTailCall(actual, rbx, rcx, r8);
+ }
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(rdi, no_reg, actual, flag, generator);
} else {
- __ Set(rax, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -5208,13 +5246,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(rax));
- __ Push(rax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->value()).is(rbx));
@@ -5574,13 +5605,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index 873a3dd1ac..139645e6cd 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -192,11 +192,14 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
// Generate a direct call to a known function. Expects the function
// to be in rdi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 6be40931de..e86b90c838 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -261,27 +261,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -602,11 +581,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -921,17 +896,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
@@ -941,7 +906,11 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -954,14 +923,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1081,16 +1050,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1114,6 +1073,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1122,6 +1084,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1152,22 +1117,33 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
}
}
-
LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LMathFloorI* result = new (zone()) LMathFloorI(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathFloorD* result = new (zone()) LMathFloorD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathRoundD* result = new (zone()) LMathRoundD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathFround* result = new (zone()) LMathFround(input);
@@ -1234,21 +1210,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* function = UseFixed(instr->function(), rdi);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(rdx);
- vector = FixedTemp(rbx);
- }
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LCallRuntime* result = new(zone()) LCallRuntime(context);
@@ -1813,13 +1774,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2540,13 +2494,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* value = UseFixed(instr->value(), rbx);
@@ -2584,11 +2531,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2650,13 +2595,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index 406159b1ff..1feba4bf20 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -29,9 +29,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -101,12 +99,14 @@ class LCodeGen;
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
- V(MathFloor) \
+ V(MathFloorD) \
+ V(MathFloorI) \
V(MathFround) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
- V(MathRound) \
+ V(MathRoundD) \
+ V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -131,7 +131,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -143,7 +142,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -152,7 +150,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -224,6 +221,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -262,6 +266,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -544,6 +550,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
LOperand* elements() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
};
@@ -805,23 +812,43 @@ class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
void PrintDataTo(StringStream* stream) override;
};
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathFloor(LOperand* value) {
- inputs_[0] = value;
- }
+ explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* value, LOperand* temp) {
+ LMathRoundI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -829,7 +856,7 @@ class LMathRound final : public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
@@ -1715,23 +1742,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1790,29 +1800,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2399,19 +2386,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2528,18 +2502,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index f80e0768a6..1ca3a99271 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -57,13 +57,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
- dynamic_frame_alignment_ = info()->IsOptimizing() &&
- ((chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone());
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -98,31 +91,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -130,61 +98,29 @@ bool LCodeGen::GeneratePrologue() {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
- __ StubPrologue();
+ __ StubPrologue(StackFrame::STUB);
} else {
__ Prologue(info()->GeneratePreagedPrologue());
}
}
- if (info()->IsOptimizing() &&
- dynamic_frame_alignment_ &&
- FLAG_debug_code) {
- __ test(esp, Immediate(kPointerSize));
- __ Assert(zero, kFrameIsExpectedToBeAligned);
- }
-
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
DCHECK(slots != 0 || !info()->IsOptimizing());
if (slots > 0) {
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
+ MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0),
- Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- }
-
- if (support_aligned_spilled_doubles_) {
- Comment(";;; Store dynamic frame alignment tag for spilled doubles");
- // Store dynamic frame alignment state in the first local.
- int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp, offset), edx);
- } else {
- __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
- }
- }
+ if (FLAG_debug_code) {
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
}
}
@@ -265,50 +201,11 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
- // Move state of dynamic frame alignment into edx.
- __ Move(edx, Immediate(kNoAlignmentPadding));
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- // Align ebp + 4 to a multiple of 2 * kPointerSize.
- __ test(ebp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
-
- // Move all parts of the frame over one word. The frame consists of:
- // unoptimized frame slots, alignment state, context, frame pointer, return
- // address, receiver, and the arguments.
- __ mov(ecx, Immediate(scope()->num_parameters() +
- 5 + graph()->osr()->UnoptimizedFrameSlots()));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ sub(Operand(ebp), Immediate(kPointerSize));
- __ bind(&do_not_pad);
- }
-
- // Save the first local, which is overwritten by the alignment state.
- Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
- __ push(alignment_loc);
-
- // Set the dynamic frame alignment state.
- __ mov(alignment_loc, edx);
-
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 1);
- __ sub(esp, Immediate((slots - 1) * kPointerSize));
-
- // Initailize FPU state.
- __ fninit();
+ DCHECK(slots >= 0);
+ __ sub(esp, Immediate(slots * kPointerSize));
}
@@ -376,32 +273,24 @@ bool LCodeGen::GenerateJumpTable() {
}
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
-
/* stack layout
- 4: entry address
- 3: return address <-- esp
- 2: garbage
+ 3: entry address
+ 2: return address <-- esp
1: garbage
0: garbage
*/
- __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
- __ push(MemOperand(esp, kPointerSize)); // Copy return address.
- __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
+ __ push(MemOperand(esp, 0)); // Copy return address.
+ __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
/* stack layout
4: entry address
3: return address
- 2: garbage
1: return address
0: entry address <-- esp
*/
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
-
- // Copy context.
- __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
// Fill ebp with the right stack frame address.
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
@@ -411,8 +300,7 @@ bool LCodeGen::GenerateJumpTable() {
Immediate(Smi::FromInt(StackFrame::STUB)));
/* stack layout
- 4: old ebp
- 3: context pointer
+ 3: old ebp
2: stub marker
1: return address
0: entry address <-- esp
@@ -449,9 +337,8 @@ bool LCodeGen::GenerateDeferredCode() {
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ push(ebp); // Caller's frame pointer.
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -2240,15 +2127,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected =
+ instr->hydrogen()->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, instr->TrueLabel(chunk_));
@@ -2256,13 +2144,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, factory()->false_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SMI)) {
+ if (expected.Contains(ToBooleanICStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, instr->FalseLabel(chunk_));
@@ -2282,18 +2170,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, instr->FalseLabel(chunk_));
}
}
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::STRING)) {
+ if (expected.Contains(ToBooleanICStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2304,19 +2192,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ if (expected.Contains(ToBooleanICStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2524,7 +2412,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
}
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
}
@@ -2554,11 +2442,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->left()).is(edx));
DCHECK(ToRegister(instr->right()).is(eax));
- Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ test(eax, eax);
-
- EmitBranch(instr, ComputeCompareCondition(instr->op()));
+ __ CompareRoot(eax, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, equal);
}
@@ -2717,7 +2604,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ Immediate(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
@@ -2750,18 +2637,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
__ bind(&done);
}
-
-void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
- int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+void LCodeGen::EmitReturn(LReturn* instr) {
+ int extra_value_count = 1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
- if (dynamic_frame_alignment && FLAG_debug_code) {
- __ cmp(Operand(esp,
- (parameter_count + extra_value_count) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
@@ -2769,19 +2649,9 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
// The argument count parameter is a smi
__ SmiUntag(reg);
Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
- if (dynamic_frame_alignment && FLAG_debug_code) {
- DCHECK(extra_value_count == 2);
- __ cmp(Operand(esp, reg, times_pointer_size,
- extra_value_count * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
- }
// emit code to restore stack based on instr->parameter_count()
__ pop(return_addr_reg); // save return address
- if (dynamic_frame_alignment) {
- __ inc(reg); // 1 more for alignment
- }
__ shl(reg, kPointerSizeLog2);
__ add(esp, reg);
__ jmp(return_addr_reg);
@@ -2799,25 +2669,12 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit);
}
- if (dynamic_frame_alignment_) {
- // Fetch the state of the dynamic frame alignment.
- __ mov(edx, Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
- }
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
}
- if (dynamic_frame_alignment_) {
- Label no_padding;
- __ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding, Label::kNear);
- EmitReturn(instr, true);
- __ bind(&no_padding);
- }
-
- EmitReturn(instr, false);
+ EmitReturn(instr);
}
@@ -3218,11 +3075,12 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
if (instr->hydrogen()->from_inlined()) {
__ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
+ } else if (instr->hydrogen()->arguments_adaptor()) {
// Check for arguments adapter frame.
Label done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ mov(result,
+ Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(Operand(result),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted, Label::kNear);
@@ -3238,6 +3096,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
+ } else {
+ __ mov(result, Operand(ebp));
}
}
@@ -3272,6 +3132,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label receiver_ok, global_object;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
Register scratch = ToRegister(instr->temp());
if (!instr->hydrogen()->known_function()) {
@@ -3280,20 +3141,20 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, dist);
}
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
__ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
@@ -3341,13 +3202,25 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
+
+ InvokeFlag flag = CALL_FUNCTION;
+ if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
+ DCHECK(!info()->saves_caller_doubles());
+ // TODO(ishell): drop current frame before pushing arguments to the stack.
+ flag = JUMP_FUNCTION;
+ ParameterCount actual(eax);
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
+
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
- safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
}
@@ -3391,10 +3264,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
CallRuntime(Runtime::kDeclareGlobals, instr);
}
-
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr) {
+ bool is_tail_call, LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
@@ -3410,21 +3282,38 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
+ bool is_self_call = function.is_identical_to(info()->closure());
+
// Invoke function directly.
- if (function.is_identical_to(info()->closure())) {
- __ CallSelf();
+ if (is_self_call) {
+ Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
+ if (is_tail_call) {
+ __ Jump(self, RelocInfo::CODE_TARGET);
+ } else {
+ __ Call(self, RelocInfo::CODE_TARGET);
+ }
} else {
- __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+ Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
+ if (is_tail_call) {
+ __ jmp(target);
+ } else {
+ __ call(target);
+ }
+ }
+
+ if (!is_tail_call) {
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
+ ParameterCount actual(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(function_reg, expected, actual, flag, generator);
}
}
@@ -3466,35 +3355,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
-void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ mov(edx, factory()->undefined_value());
- __ mov(eax, instr->arity());
-
- bool is_self_call = false;
- if (instr->hydrogen()->function()->IsConstant()) {
- HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
- Handle<JSFunction> jsfun =
- Handle<JSFunction>::cast(fun_const->handle(isolate()));
- is_self_call = jsfun.is_identical_to(info()->closure());
- }
-
- if (is_self_call) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3502,8 +3362,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+ uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
+ available_regs &= ~input_reg.bit();
+ if (instr->context()->IsRegister()) {
+ // Make sure that the context isn't overwritten in the AllocateHeapNumber
+ // macro below.
+ available_regs &= ~ToRegister(instr->context()).bit();
+ }
+
+ Register tmp =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+ available_regs &= ~tmp.bit();
+ Register tmp2 =
+ Register::from_code(base::bits::CountTrailingZeros32(available_regs));
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -3621,6 +3492,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ sub(esp, Immediate(kPointerSize));
__ fist_s(Operand(esp, 0));
__ pop(output_reg);
+ __ X87SetRC(0x0000);
__ X87CheckIA();
DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
__ fnclex();
@@ -3653,6 +3525,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Clear exception bits.
__ fnclex();
__ fistp_s(MemOperand(esp, 0));
+ // Restore round mode.
+ __ X87SetRC(0x0000);
// Check overflow.
__ X87CheckIA();
__ pop(result);
@@ -3687,6 +3561,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Clear exception bits.
__ fnclex();
__ fistp_s(MemOperand(esp, 0));
+ // Restore round mode.
+ __ X87SetRC(0x0000);
// Check overflow.
__ X87CheckIA();
__ pop(result);
@@ -3927,54 +3803,78 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
X87CommitWrite(result_reg);
}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(instr->HasPointerMap());
-
- Handle<JSFunction> known_function = instr->hydrogen()->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+#if DEBUG
+ if (actual.is_reg()) {
+ DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
} else {
- CallKnownFunction(known_function,
- instr->hydrogen()->formal_parameter_count(),
- instr->arity(), instr);
+ DCHECK(!AreAliased(scratch1, scratch2, scratch3));
}
-}
+#endif
+ if (FLAG_code_comments) {
+ if (actual.is_reg()) {
+ Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ } else {
+ Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
+ }
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Register caller_args_count_reg = scratch1;
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+ // Drop current frame and load arguments count from arguments adaptor frame.
+ __ mov(ebp, scratch2);
+ __ mov(caller_args_count_reg,
+ Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(caller_args_count_reg);
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- HCallFunction* hinstr = instr->hydrogen();
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count.
+ __ mov(caller_args_count_reg,
+ Immediate(info()->literal()->parameter_count()));
+
+ __ bind(&formal_parameter_count_loaded);
+ __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
+ ReturnAddressState::kNotOnStack, 0);
+ Comment(";;; }");
+}
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ HInvokeFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- ConvertReceiverMode mode = hinstr->convert_mode();
- if (hinstr->HasVectorAndSlot()) {
- Register slot_register = ToRegister(instr->temp_slot());
- Register vector_register = ToRegister(instr->temp_vector());
- DCHECK(slot_register.is(edx));
- DCHECK(vector_register.is(ebx));
+ DCHECK(instr->HasPointerMap());
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
- int index = vector->GetIndex(hinstr->slot());
+ bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
- __ mov(vector_register, vector);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
+ if (is_tail_call) {
+ DCHECK(!info()->saves_caller_doubles());
+ ParameterCount actual(instr->arity());
+ // It is safe to use ebx, ecx and edx as scratch registers here given that
+ // 1) we are not going to return to caller function anyway,
+ // 2) ebx (expected arguments count) and edx (new.target) will be
+ // initialized below.
+ PrepareForTailCall(actual, ebx, ecx, edx);
+ }
- Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ Handle<JSFunction> known_function = hinstr->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount actual(instr->arity());
+ InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
+ __ InvokeFunction(edi, no_reg, actual, flag, generator);
} else {
- __ Set(eax, arity);
- CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+ CallKnownFunction(known_function, hinstr->formal_parameter_count(),
+ instr->arity(), is_tail_call, instr);
}
}
@@ -5181,7 +5081,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
- 1 << JSArrayBuffer::WasNeutered::kShift);
+ Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
}
@@ -5197,8 +5097,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType last;
instr->hydrogen()->GetCheckInterval(&first, &last);
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
// If there is only one type in the interval check for equality.
if (first == last) {
@@ -5207,8 +5106,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
@@ -5219,7 +5117,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
} else {
@@ -5589,13 +5487,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- DCHECK(ToRegister(instr->value()).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5662,7 +5553,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory()->function_string())) {
@@ -5683,7 +5574,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
// clang-format off
@@ -5947,13 +5838,6 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ bind(&done);
}
-
-void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
- Register context = ToRegister(instr->context());
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 0cfbf70388..3719236a40 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -31,8 +31,6 @@ class LCodeGen: public LCodeGenBase {
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
- dynamic_frame_alignment_(false),
- support_aligned_spilled_doubles_(false),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
@@ -221,11 +219,14 @@ class LCodeGen: public LCodeGenBase {
void LoadContextFromDeferred(LOperand* context);
- // Generate a direct call to a known function. Expects the function
+ void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
+ Register scratch2, Register scratch3);
+
+ // Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
- LInstruction* instr);
+ bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -329,7 +330,7 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+ void EmitReturn(LReturn* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
@@ -354,8 +355,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
- bool dynamic_frame_alignment_;
- bool support_aligned_spilled_doubles_;
bool frame_is_built_;
class X87Stack : public ZoneObject {
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index f770509076..163d2c9cfb 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -278,27 +278,6 @@ void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
}
-void LCallFunction::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- if (hydrogen()->HasVectorAndSlot()) {
- stream->Add(" (type-feedback-vector ");
- temp_vector()->PrintTo(stream);
- stream->Add(" ");
- temp_slot()->PrintTo(stream);
- stream->Add(")");
- }
-}
-
-
-void LCallJSFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add("#%d / ", arity());
-}
-
-
void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
for (int i = 0; i < InputCount(); i++) {
InputAt(i)->PrintTo(stream);
@@ -445,13 +424,6 @@ LPlatformChunk* LChunkBuilder::Build() {
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
- // Reserve the first spill slot for the state of dynamic alignment.
- if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- DCHECK_EQ(alignment_state_index, 4);
- USE(alignment_state_index);
- }
-
// If compiling for OSR, reserve space for the unoptimized frame,
// which will be subsumed into this frame.
if (graph()->has_osr()) {
@@ -623,12 +595,7 @@ LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
- return instr;
+ return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
}
@@ -952,22 +919,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall() || instr->IsPrologue()) {
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(current_block_->last_environment());
- hydrogen_value_for_lazy_bailout = sim;
- }
- LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block_);
- }
+ CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
}
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- return new (zone()) LPrologue();
+ LInstruction* result = new (zone()) LPrologue();
+ if (info_->num_heap_slots() > 0) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -980,8 +941,8 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+ ToBooleanICStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
@@ -990,7 +951,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
: new (zone()) LBranch(UseRegisterAtStart(value), temp);
if (!easy_case &&
- ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
@@ -1118,16 +1079,6 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
}
-LInstruction* LChunkBuilder::DoCallJSFunction(
- HCallJSFunction* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
-
- LCallJSFunction* result = new(zone()) LCallJSFunction(function);
-
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) {
CallInterfaceDescriptor descriptor = instr->descriptor();
@@ -1150,6 +1101,9 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
descriptor, ops, zone());
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1158,6 +1112,9 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
+ result->MarkAsSyntacticTailCall();
+ }
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1263,22 +1220,6 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
}
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(edx);
- vector = FixedTemp(ebx);
- }
-
- LCallFunction* call =
- new (zone()) LCallFunction(context, function, slot, vector);
- return MarkAsCall(DefineFixed(call, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
@@ -1840,13 +1781,6 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
}
-LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
- HBoundsCheckBaseIndexInformation* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -2512,11 +2446,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kNotEnoughSpillSlotsForOsr);
spill_index = 0;
}
- if (spill_index == 0) {
- // The dynamic frame alignment state overwrites the first local.
- // The first local is saved at the end of the unoptimized frame.
- spill_index = graph()->osr()->UnoptimizedFrameSlots();
- }
spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2556,13 +2485,6 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), ebx);
@@ -2600,11 +2522,9 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind());
+ HEnvironment* inner = outer->CopyForInlining(
+ instr->closure(), instr->arguments_count(), instr->function(), undefined,
+ instr->inlining_kind(), instr->syntactic_tail_call_mode());
// Only replay binding of arguments object if it wasn't removed from graph.
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object());
@@ -2665,13 +2585,6 @@ LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
return AssignPointerMap(result);
}
-
-LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->context());
- return new(zone()) LStoreFrameContext(context);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index 0f2813f85a..d83322acd3 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -33,9 +33,7 @@ class LCodeGen;
V(BitI) \
V(BoundsCheck) \
V(Branch) \
- V(CallJSFunction) \
V(CallWithDescriptor) \
- V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
V(CheckArrayBufferNotNeutered) \
@@ -136,7 +134,6 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
- V(StoreFrameContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -148,7 +145,6 @@ class LCodeGen;
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
- V(ToFastProperties) \
V(TransitionElementsKind) \
V(TrapAllocationMemento) \
V(Typeof) \
@@ -229,6 +225,13 @@ class LInstruction : public ZoneObject {
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsSyntacticTailCall() {
+ bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
+ }
+ bool IsSyntacticTailCall() const {
+ return IsSyntacticTailCallBits::decode(bit_field_);
+ }
+
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
@@ -267,6 +270,8 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
+ class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
+ };
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -555,6 +560,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
LOperand* elements() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+ DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
};
@@ -1735,23 +1741,6 @@ class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
};
-class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallJSFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
- DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
@@ -1810,29 +1799,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
- public:
- LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = function;
- temps_[0] = slot;
- temps_[1] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp_slot() { return temps_[0]; }
- LOperand* temp_vector() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- void PrintDataTo(StringStream* stream) override;
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2433,19 +2399,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2560,18 +2513,6 @@ class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
};
-class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreFrameContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public: