summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h5
-rw-r--r--deps/v8/src/x64/assembler-x64.cc142
-rw-r--r--deps/v8/src/x64/assembler-x64.h78
-rw-r--r--deps/v8/src/x64/builtins-x64.cc19
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc78
-rw-r--r--deps/v8/src/x64/codegen-x64.cc14
-rw-r--r--deps/v8/src/x64/debug-x64.cc6
-rw-r--r--deps/v8/src/x64/disasm-x64.cc138
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc447
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc12
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc18
-rw-r--r--deps/v8/src/x64/lithium-x64.cc12
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc59
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h4
14 files changed, 815 insertions, 217 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index b64bbfb664..c3d2cdf2eb 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -179,6 +179,11 @@ void Assembler::emit_optional_rex_32(Register rm_reg) {
}
+void Assembler::emit_optional_rex_32(XMMRegister rm_reg) {
+ if (rm_reg.high_bit()) emit(0x41);
+}
+
+
void Assembler::emit_optional_rex_32(const Operand& op) {
if (op.rex_ != 0) emit(0x40 | op.rex_);
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index ce68524524..dfd51a4bed 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -617,6 +617,24 @@ void Assembler::shift(Register dst,
}
+void Assembler::shift(Operand dst, Immediate shift_amount, int subcode,
+ int size) {
+ EnsureSpace ensure_space(this);
+ DCHECK(size == kInt64Size ? is_uint6(shift_amount.value_)
+ : is_uint5(shift_amount.value_));
+ if (shift_amount.value_ == 1) {
+ emit_rex(dst, size);
+ emit(0xD1);
+ emit_operand(subcode, dst);
+ } else {
+ emit_rex(dst, size);
+ emit(0xC1);
+ emit_operand(subcode, dst);
+ emit(shift_amount.value_);
+ }
+}
+
+
void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -625,6 +643,14 @@ void Assembler::shift(Register dst, int subcode, int size) {
}
+void Assembler::shift(Operand dst, int subcode, int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, size);
+ emit(0xD3);
+ emit_operand(subcode, dst);
+}
+
+
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -909,6 +935,14 @@ void Assembler::emit_imul(Register src, int size) {
}
+void Assembler::emit_imul(const Operand& src, int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(src, size);
+ emit(0xF7);
+ emit_operand(0x5, src);
+}
+
+
void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
@@ -948,11 +982,13 @@ void Assembler::emit_imul(Register dst, const Operand& src, Immediate imm,
emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
+ emit_operand(dst, src);
+ emit(imm.value_);
} else {
emit(0x69);
+ emit_operand(dst, src);
+ emitl(imm.value_);
}
- emit_operand(dst, src);
- emit(imm.value_);
}
@@ -1471,7 +1507,23 @@ void Assembler::emit_repmovs(int size) {
}
-void Assembler::mul(Register src) {
+void Assembler::mull(Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src);
+ emit(0xF7);
+ emit_modrm(0x4, src);
+}
+
+
+void Assembler::mull(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src);
+ emit(0xF7);
+ emit_operand(0x4, src);
+}
+
+
+void Assembler::mulq(Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src);
emit(0xF7);
@@ -2586,6 +2638,7 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
void Assembler::psllq(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
+ emit_optional_rex_32(reg);
emit(0x0F);
emit(0x73);
emit_sse_operand(rsi, reg); // rsi == 6
@@ -2593,6 +2646,39 @@ void Assembler::psllq(XMMRegister reg, byte imm8) {
}
+void Assembler::psrlq(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(rdx, reg); // rdx == 2
+ emit(imm8);
+}
+
+
+void Assembler::pslld(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
+
+
+void Assembler::psrld(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rdx, reg); // rdx == 2
+ emit(imm8);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -2683,6 +2769,16 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
}
+void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2723,6 +2819,16 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
}
+void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2793,6 +2899,16 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::subsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2803,6 +2919,16 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::divsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2918,6 +3044,16 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
}
+void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x76);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 529b100e24..3b55396c14 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -810,33 +810,48 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
+ // Multiply eax by src, put the result in edx:eax.
+ void mull(Register src);
+ void mull(const Operand& src);
// Multiply rax by src, put the result in rdx:rax.
- void mul(Register src);
-
-#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
- void instruction##p(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kPointerSize); \
- } \
- \
- void instruction##l(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kInt32Size); \
- } \
- \
- void instruction##q(Register dst, Immediate imm8) { \
- shift(dst, imm8, subcode, kInt64Size); \
- } \
- \
- void instruction##p_cl(Register dst) { \
- shift(dst, subcode, kPointerSize); \
- } \
- \
- void instruction##l_cl(Register dst) { \
- shift(dst, subcode, kInt32Size); \
- } \
- \
- void instruction##q_cl(Register dst) { \
- shift(dst, subcode, kInt64Size); \
- }
+ void mulq(Register src);
+
+#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
+ void instruction##p(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Register dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p(Operand dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kPointerSize); \
+ } \
+ \
+ void instruction##l(Operand dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt32Size); \
+ } \
+ \
+ void instruction##q(Operand dst, Immediate imm8) { \
+ shift(dst, imm8, subcode, kInt64Size); \
+ } \
+ \
+ void instruction##p_cl(Register dst) { shift(dst, subcode, kPointerSize); } \
+ \
+ void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
+ \
+ void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
+ \
+ void instruction##p_cl(Operand dst) { shift(dst, subcode, kPointerSize); } \
+ \
+ void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
+ \
+ void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
#undef DECLARE_SHIFT_INSTRUCTION
@@ -1049,6 +1064,9 @@ class Assembler : public AssemblerBase {
void movapd(XMMRegister dst, XMMRegister src);
void psllq(XMMRegister reg, byte imm8);
+ void psrlq(XMMRegister reg, byte imm8);
+ void pslld(XMMRegister reg, byte imm8);
+ void psrld(XMMRegister reg, byte imm8);
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
@@ -1064,6 +1082,7 @@ class Assembler : public AssemblerBase {
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, const Operand& src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
+ void cvtsd2ss(XMMRegister dst, const Operand& src);
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
@@ -1071,9 +1090,11 @@ class Assembler : public AssemblerBase {
void addsd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
+ void subsd(XMMRegister dst, const Operand& src);
void mulsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
+ void divsd(XMMRegister dst, const Operand& src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
@@ -1084,6 +1105,7 @@ class Assembler : public AssemblerBase {
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
void cmpltsd(XMMRegister dst, XMMRegister src);
+ void pcmpeqd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
@@ -1258,6 +1280,7 @@ class Assembler : public AssemblerBase {
// Optionally do as emit_rex_32(Register) if the register number has
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
+ inline void emit_optional_rex_32(XMMRegister rm_reg);
// Optionally do as emit_rex_32(const Operand&) if the operand register
// numbers have a high bit set.
@@ -1365,9 +1388,11 @@ class Assembler : public AssemblerBase {
int size);
// Emit machine code for a shift operation.
+ void shift(Operand dst, Immediate shift_amount, int subcode, int size);
void shift(Register dst, Immediate shift_amount, int subcode, int size);
// Shift dst by cl % 64 bits.
void shift(Register dst, int subcode, int size);
+ void shift(Operand dst, int subcode, int size);
void emit_farith(int b1, int b2, int i);
@@ -1451,6 +1476,7 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
// rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
void emit_imul(Register src, int size);
+ void emit_imul(const Operand& src, int size);
void emit_imul(Register dst, Register src, int size);
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 194d8a6b2e..64ba35115b 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1075,14 +1075,19 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use inline caching to speed up access to arguments.
if (FLAG_vector_ics) {
- __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(0));
+ // TODO(mvstanton): Vector-based ics need additional infrastructure to
+ // be embedded here. For now, just call the runtime.
+ __ Push(receiver);
+ __ Push(key);
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ } else {
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
}
- Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
// Push the nth argument.
__ Push(rax);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 7d1e4f5e0d..5ea5f72497 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -867,6 +867,34 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label miss;
+
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register index = LoadDescriptor::NameRegister();
+ Register scratch = rbx;
+ Register result = rax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX,
+ RECEIVER_IS_STRING);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ PropertyAccessCompiler::TailCallBuiltin(
+ masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
@@ -2117,6 +2145,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
TypeFeedbackVector::MegamorphicSentinel(isolate));
+ // We have to update statistics for runtime profiling.
+ const int with_types_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+ __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
+ const int generic_offset =
+ FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
+ __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
__ jmp(&slow_start);
}
@@ -2711,14 +2746,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ testb(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
+ if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ testb(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+ }
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
@@ -3076,14 +3113,35 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rbx: instance type
// rcx: sub string length (smi)
// rdx: from index (smi)
- StringCharAtGenerator generator(
- rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
+ &runtime, STRING_INDEX_IS_NUMBER,
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
generator.SkipSlow(masm, &runtime);
}
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in rax.
+ Label check_heap_number, call_builtin;
+ __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_builtin, Label::kNear);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ popq(rcx); // Pop return address.
+ __ pushq(rax);
+ __ pushq(rcx); // Push return address.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 44e1618c31..ceee95488f 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -397,6 +397,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
+
+ // Allocating heap numbers in the loop below can fail and cause a jump to
+ // gc_required. We can't leave a partly initialized FixedArray behind,
+ // so pessimistically fill it with holes now.
+ Label initialization_loop, initialization_loop_entry;
+ __ jmp(&initialization_loop_entry, Label::kNear);
+ __ bind(&initialization_loop);
+ __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+ __ bind(&initialization_loop_entry);
+ __ decp(r9);
+ __ j(not_sign, &initialization_loop);
+
+ __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
__ jmp(&entry);
// Call into runtime if GC is required.
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index c8b7c2246a..c8f9456f75 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -164,7 +164,11 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
+ RegList regs = receiver.bit() | name.bit();
+ if (FLAG_vector_ics) {
+ regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
+ }
+ Generate_DebugBreakCallHelper(masm, regs, 0, false);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 2b8fc2d4dc..837da27941 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -252,7 +252,7 @@ static v8::base::LazyInstance<InstructionTable>::type instruction_table =
LAZY_INSTANCE_INITIALIZER;
-static InstructionDesc cmov_instructions[16] = {
+static const InstructionDesc cmov_instructions[16] = {
{"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
@@ -709,65 +709,62 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
int DisassemblerX64::ShiftInstruction(byte* data) {
byte op = *data & (~1);
+ int count = 1;
if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
UnimplementedInstruction();
- return 1;
+ return count;
}
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- regop &= 0x7; // The REX.R bit does not affect the operation.
- int imm8 = -1;
- int num_bytes = 2;
- if (mod != 3) {
- UnimplementedInstruction();
- return num_bytes;
- }
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "rol";
- break;
- case 1:
- mnem = "ror";
- break;
- case 2:
- mnem = "rcl";
- break;
- case 3:
- mnem = "rcr";
- break;
- case 4:
- mnem = "shl";
- break;
- case 5:
- mnem = "shr";
- break;
- case 7:
- mnem = "sar";
- break;
- default:
- UnimplementedInstruction();
- return num_bytes;
- }
- DCHECK_NE(NULL, mnem);
- if (op == 0xD0) {
- imm8 = 1;
- } else if (op == 0xC0) {
- imm8 = *(data + 2);
- num_bytes = 3;
+ // Print mneumonic.
+ {
+ byte modrm = *(data + count);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ regop &= 0x7; // The REX.R bit does not affect the operation.
+ const char* mnem = NULL;
+ switch (regop) {
+ case 0:
+ mnem = "rol";
+ break;
+ case 1:
+ mnem = "ror";
+ break;
+ case 2:
+ mnem = "rcl";
+ break;
+ case 3:
+ mnem = "rcr";
+ break;
+ case 4:
+ mnem = "shl";
+ break;
+ case 5:
+ mnem = "shr";
+ break;
+ case 7:
+ mnem = "sar";
+ break;
+ default:
+ UnimplementedInstruction();
+ return count + 1;
+ }
+ DCHECK_NE(NULL, mnem);
+ AppendToBuffer("%s%c ", mnem, operand_size_code());
}
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- byte_size_operand_ ? NameOfByteCPURegister(rm)
- : NameOfCPURegister(rm));
+ count += PrintRightOperand(data + count);
if (op == 0xD2) {
- AppendToBuffer("cl");
+ AppendToBuffer(", cl");
} else {
- AppendToBuffer("%d", imm8);
+ int imm8 = -1;
+ if (op == 0xD0) {
+ imm8 = 1;
+ } else {
+ DCHECK_EQ(0xC0, op);
+ imm8 = *(data + count);
+ count++;
+ }
+ AppendToBuffer(", %d", imm8);
}
- return num_bytes;
+ return count;
}
@@ -1069,10 +1066,15 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x72) {
+ current += 1;
+ AppendToBuffer("%s,%s,%d", (regop == 6) ? "pslld" : "psrld",
+ NameOfXMMRegister(rm), *current & 0x7f);
+ current += 1;
} else if (opcode == 0x73) {
current += 1;
- DCHECK(regop == 6);
- AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
+ AppendToBuffer("%s,%s,%d", (regop == 6) ? "psllq" : "psrlq",
+ NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
} else {
const char* mnemonic = "?";
@@ -1086,6 +1088,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x76) {
+ mnemonic = "pcmpeqd";
} else {
UnimplementedInstruction();
}
@@ -1489,15 +1493,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x69: // fall through
case 0x6B: {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- int32_t imm = *data == 0x6B ? *(data + 2)
- : *reinterpret_cast<int32_t*>(data + 2);
- AppendToBuffer("imul%c %s,%s,0x%x",
- operand_size_code(),
- NameOfCPURegister(regop),
- NameOfCPURegister(rm), imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
+ int count = 1;
+ count += PrintOperands("imul", REG_OPER_OP_ORDER, data + count);
+ AppendToBuffer(",0x");
+ if (*data == 0x69) {
+ count += PrintImmediate(data + count, operand_size());
+ } else {
+ count += PrintImmediate(data + count, OPERAND_BYTE_SIZE);
+ }
+ data += count;
break;
}
@@ -1811,19 +1815,19 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
//------------------------------------------------------------------------------
-static const char* cpu_regs[16] = {
+static const char* const cpu_regs[16] = {
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
};
-static const char* byte_cpu_regs[16] = {
+static const char* const byte_cpu_regs[16] = {
"al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
"r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
};
-static const char* xmm_regs[16] = {
+static const char* const xmm_regs[16] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
};
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 02c2d9c798..25bfd34029 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -1058,7 +1058,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
- int slot = stmt->ForInFeedbackSlot();
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1086,6 +1086,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(rax);
// Check for proxies.
@@ -1110,6 +1111,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1149,7 +1151,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// No need for a write barrier, we are storing a Smi in the feedback vector.
__ Move(rbx, FeedbackVector());
- __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ int vector_index = FeedbackVector()->GetIndex(slot);
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
@@ -1321,7 +1324,14 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Move(LoadDescriptor::NameRegister(), home_object_symbol);
- CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+ if (FLAG_vector_ics) {
+ __ Move(VectorLoadICDescriptor::SlotRegister(),
+ SmiFromSlot(expr->HomeObjectFeedbackSlot()));
+ CallLoadIC(NOT_CONTEXTUAL);
+ } else {
+ CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+ }
+
__ Cmp(rax, isolate()->factory()->undefined_value());
Label done;
@@ -1386,7 +1396,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ Move(LoadDescriptor::NameRegister(), proxy->var()->name());
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(proxy->VariableFeedbackSlot()));
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
}
ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
@@ -1472,7 +1482,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(proxy->VariableFeedbackSlot()));
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
context()->Plug(rax);
@@ -1655,6 +1665,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in rax.
@@ -1683,6 +1694,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
@@ -1710,7 +1723,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
if (property->emit_store()) {
- __ CallRuntime(Runtime::kSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype, 2);
} else {
__ Drop(2);
}
@@ -1847,22 +1860,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind {
- VARIABLE,
- NAMED_PROPERTY,
- KEYED_PROPERTY,
- NAMED_SUPER_PROPERTY
- };
- LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
- : NAMED_PROPERTY)
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
@@ -1887,6 +1886,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
__ Push(result_register());
}
break;
+ case KEYED_SUPER_PROPERTY:
+ VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(property->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(property->key());
+ __ Push(result_register());
+ if (expr->is_compound()) {
+ __ Push(MemOperand(rsp, 2 * kPointerSize));
+ __ Push(MemOperand(rsp, 2 * kPointerSize));
+ __ Push(result_register());
+ }
+ break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1918,6 +1929,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
@@ -1964,7 +1979,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyAssignment(expr);
break;
case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyAssignment(expr);
+ EmitNamedSuperPropertyStore(property);
+ context()->Plug(rax);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyStore(property);
+ context()->Plug(rax);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
@@ -2099,7 +2119,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(load_receiver, Operand(rsp, kPointerSize));
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
+ SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
@@ -2118,7 +2138,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->DoneFeedbackSlot()));
+ SmiFromSlot(expr->DoneFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // rax=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
@@ -2131,7 +2151,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->ValueFeedbackSlot()));
+ SmiFromSlot(expr->ValueFeedbackSlot()));
}
CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
@@ -2257,22 +2277,25 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required;
Label allocated;
- Handle<Map> map(isolate()->native_context()->iterator_result_map());
+ const int instance_size = 5 * kPointerSize;
+ DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+ instance_size);
- __ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
+ __ Allocate(instance_size, rax, rcx, rdx, &gc_required, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&gc_required);
- __ Push(Smi::FromInt(map->instance_size()));
+ __ Push(Smi::FromInt(instance_size));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
- __ Move(rbx, map);
+ __ movp(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
+ __ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
- DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
@@ -2298,7 +2321,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ Move(LoadDescriptor::NameRegister(), key->value());
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(prop->PropertyFeedbackSlot()));
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
@@ -2323,7 +2346,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(prop->PropertyFeedbackSlot()));
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
CallIC(ic, prop->PropertyFeedbackId());
@@ -2331,6 +2354,14 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object, key.
+ SetSourcePosition(prop->position());
+
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+}
+
+
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
@@ -2392,6 +2423,60 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ // Constructor is in rax.
+ DCHECK(lit != NULL);
+ __ Push(rax);
+
+ // No access check is needed here since the constructor is created by the
+ // class literal.
+ Register scratch = rbx;
+ __ movp(scratch, FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Push(scratch);
+
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ DCHECK(key != NULL);
+
+ if (property->is_static()) {
+ __ Push(Operand(rsp, kPointerSize)); // constructor
+ } else {
+ __ Push(Operand(rsp, 0)); // prototype
+ }
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ __ CallRuntime(Runtime::kDefineClassGetter, 3);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ __ CallRuntime(Runtime::kDefineClassSetter, 3);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // prototype
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+
+ // constructor
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
@@ -2407,16 +2492,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@@ -2435,6 +2512,42 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
CallStoreIC();
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ __ Push(rax);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ // stack: value, this; rax: home_object
+ Register scratch = rcx;
+ Register scratch2 = rdx;
+ __ Move(scratch, result_register()); // home_object
+ __ movp(rax, MemOperand(rsp, kPointerSize)); // value
+ __ movp(scratch2, MemOperand(rsp, 0)); // this
+ __ movp(MemOperand(rsp, kPointerSize), scratch2); // this
+ __ movp(MemOperand(rsp, 0), scratch); // home_object
+ // stack: this, home_object; rax: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ __ Push(rax);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = rcx;
+ Register scratch2 = rdx;
+ __ movp(scratch2, MemOperand(rsp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; rax: key, rdx: value
+ __ movp(scratch, MemOperand(rsp, kPointerSize)); // this
+ __ movp(MemOperand(rsp, 2 * kPointerSize), scratch);
+ __ movp(scratch, MemOperand(rsp, 0)); // home_object
+ __ movp(MemOperand(rsp, kPointerSize), scratch);
+ __ movp(MemOperand(rsp, 0), rax);
+ __ Move(rax, scratch2);
+ // stack: this, home_object, key; rax: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
case KEYED_PROPERTY: {
__ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2547,21 +2660,32 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// rax : value
// stack : receiver ('this'), home_object
- Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(rax);
__ Push(key->value());
+ __ Push(rax);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
- context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // rax : value
+ // stack : receiver ('this'), home_object, key
+ DCHECK(prop != NULL);
+
+ __ Push(rax);
+ __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy),
+ 4);
}
@@ -2600,11 +2724,19 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(rax);
} else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), rax);
- __ Pop(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
+ if (!expr->IsSuperAccess()) {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), rax);
+ __ Pop(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(expr->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForStackValue(expr->key());
+ EmitKeyedSuperPropertyLoad(expr);
+ }
context()->Plug(rax);
}
}
@@ -2708,6 +2840,41 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetSourcePosition(prop->position());
+ // Load the function from the receiver.
+ SuperReference* super_ref = prop->obj()->AsSuperReference();
+ EmitLoadHomeObject(super_ref);
+ __ Push(rax);
+ VisitForAccumulatorValue(super_ref->this_var());
+ __ Push(rax);
+ __ Push(rax);
+ __ Push(Operand(rsp, kPointerSize * 2));
+ VisitForStackValue(prop->key());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+ // - home_object
+ // - key
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+
+ // Replace home_object with target function.
+ __ movp(Operand(rsp, kPointerSize), rax);
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr, CallICState::METHOD);
+}
+
+
void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2722,7 +2889,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
SetSourcePosition(expr->position());
Handle<Code> ic = CallIC::initialize_stub(
isolate(), arg_count, call_type);
- __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
+ __ Move(rdx, SmiFromSlot(expr->CallFeedbackSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
@@ -2745,6 +2912,9 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
+ // Push the enclosing function.
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
__ Push(args.GetReceiverOperand());
@@ -2756,7 +2926,14 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+}
+
+
+void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
+ DCHECK(super_ref != NULL);
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ CallRuntime(Runtime::kGetPrototype, 1);
}
@@ -2795,6 +2972,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// rdx (receiver). Touch up the stack with the right values.
__ movp(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
__ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+
+ PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
@@ -2826,6 +3005,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(rax); // Function.
__ Push(rdx); // Receiver.
+ PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2847,9 +3027,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
- // super.x() is handled in EmitCallWithLoadIC.
- if (property->IsSuperAccess() && is_named_call) {
- EmitSuperCallWithLoadIC(expr);
+ if (property->IsSuperAccess()) {
+ if (is_named_call) {
+ EmitSuperCallWithLoadIC(expr);
+ } else {
+ EmitKeyedSuperCallWithLoadIC(expr);
+ }
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
@@ -2861,6 +3044,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitKeyedCallWithLoadIC(expr, property->key());
}
}
+ } else if (call_type == Call::SUPER_CALL) {
+ SuperReference* super_ref = callee->AsSuperReference();
+ EmitLoadSuperConstructor(super_ref);
+ __ Push(result_register());
+ VisitForStackValue(super_ref->this_var());
+ EmitCall(expr, CallICState::METHOD);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
@@ -2888,7 +3077,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
- VisitForStackValue(expr->expression());
+ if (expr->expression()->IsSuperReference()) {
+ EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(expr->expression());
+ }
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2908,12 +3102,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
if (FLAG_pretenuring_call_new) {
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot() ==
- expr->CallNewFeedbackSlot() + 1);
+ DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+ expr->CallNewFeedbackSlot().ToInt() + 1);
}
__ Move(rbx, FeedbackVector());
- __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
+ __ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
@@ -3220,6 +3414,31 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(rax, if_false);
+ Register map = rbx;
+ __ movp(map, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
+ __ j(less, if_false);
+ __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(less_equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
@@ -4185,7 +4404,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Move(LoadDescriptor::NameRegister(), expr->name());
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
+ SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
@@ -4343,22 +4562,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- if (prop->IsSuperAccess()) {
- // throw exception.
- VisitSuperReference(prop->obj()->AsSuperReference());
- return;
- }
- }
+ LhsKind assign_type = GetAssignType(prop);
// Evaluate expression and get value.
if (assign_type == VARIABLE) {
@@ -4370,18 +4575,50 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (expr->is_postfix() && !context()->IsEffect()) {
__ Push(Smi::FromInt(0));
}
- if (assign_type == NAMED_PROPERTY) {
- VisitForStackValue(prop->obj());
- __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- // Leave receiver on stack
- __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
- // Copy of key, needed for later store.
- __ movp(LoadDescriptor::NameRegister(), Operand(rsp, 0));
- EmitKeyedPropertyLoad(prop);
+ switch (assign_type) {
+ case NAMED_PROPERTY: {
+ VisitForStackValue(prop->obj());
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
+ EmitNamedPropertyLoad(prop);
+ break;
+ }
+
+ case NAMED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ __ Push(MemOperand(rsp, kPointerSize));
+ __ Push(result_register());
+ EmitNamedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ __ Push(result_register());
+ __ Push(MemOperand(rsp, 2 * kPointerSize));
+ __ Push(MemOperand(rsp, 2 * kPointerSize));
+ __ Push(result_register());
+ EmitKeyedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_PROPERTY: {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ // Leave receiver on stack
+ __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
+ // Copy of key, needed for later store.
+ __ movp(LoadDescriptor::NameRegister(), Operand(rsp, 0));
+ EmitKeyedPropertyLoad(prop);
+ break;
+ }
+
+ case VARIABLE:
+ UNREACHABLE();
}
}
@@ -4413,9 +4650,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
break;
+ case NAMED_SUPER_PROPERTY:
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
+ break;
case KEYED_PROPERTY:
__ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
+ case KEYED_SUPER_PROPERTY:
+ __ movp(Operand(rsp, 3 * kPointerSize), rax);
+ break;
}
}
}
@@ -4448,9 +4691,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
break;
+ case NAMED_SUPER_PROPERTY:
+ __ movp(Operand(rsp, 2 * kPointerSize), rax);
+ break;
case KEYED_PROPERTY:
__ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
+ case KEYED_SUPER_PROPERTY:
+ __ movp(Operand(rsp, 3 * kPointerSize), rax);
+ break;
}
}
}
@@ -4507,6 +4756,28 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ EmitNamedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(rax);
+ }
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ EmitKeyedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(rax);
+ }
+ break;
+ }
case KEYED_PROPERTY: {
__ Pop(StoreDescriptor::NameRegister());
__ Pop(StoreDescriptor::ReceiverRegister());
@@ -4538,7 +4809,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_ics) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(proxy->VariableFeedbackSlot()));
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
}
// Use a regular load, not a contextual load, to avoid a reference
// error.
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 84fdca4fb9..f19979d467 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -29,6 +29,9 @@ const Register StoreDescriptor::NameRegister() { return rcx; }
const Register StoreDescriptor::ValueRegister() { return rax; }
+const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
+
+
const Register ElementTransitionAndStoreDescriptor::MapRegister() {
return rbx;
}
@@ -152,6 +155,15 @@ void TransitionElementsKindDescriptor::Initialize(
}
+void AllocateHeapNumberDescriptor::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // rsi -- context
+ Register registers[] = {rsi};
+ data->Initialize(arraysize(registers), registers, nullptr);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 1981d55f79..7e482ee3fa 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -2850,13 +2850,14 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
- Register vector = ToRegister(instr->temp_vector());
- DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
- __ Move(vector, instr->hydrogen()->feedback_vector());
+ Register vector_register = ToRegister(instr->temp_vector());
+ DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+ __ Move(vector_register, vector);
// No need to allocate this register.
DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
- __ Move(VectorLoadICDescriptor::SlotRegister(),
- Smi::FromInt(instr->hydrogen()->slot()));
+ int index = vector->GetIndex(instr->hydrogen()->slot());
+ __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
}
@@ -2871,7 +2872,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3013,7 +3014,8 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3295,7 +3297,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 69f50b1bee..541d37ad6e 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <sstream>
+
#include "src/v8.h"
#if V8_TARGET_ARCH_X64
@@ -360,9 +362,9 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
- OStringStream os;
+ std::ostringstream os;
os << hydrogen()->access() << " <- ";
- stream->Add(os.c_str());
+ stream->Add(os.str().c_str());
value()->PrintTo(stream);
}
@@ -720,11 +722,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
- }
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
}
LInstruction* result =
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 5033303402..21b0f9b19c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2743,6 +2743,57 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
}
+void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
+ if (src == 0) {
+ xorps(dst, dst);
+ } else {
+ unsigned cnt = base::bits::CountPopulation32(src);
+ unsigned nlz = base::bits::CountLeadingZeros32(src);
+ unsigned ntz = base::bits::CountTrailingZeros32(src);
+ if (nlz + cnt + ntz == 32) {
+ pcmpeqd(dst, dst);
+ if (ntz == 0) {
+ psrld(dst, 32 - cnt);
+ } else {
+ pslld(dst, 32 - cnt);
+ if (nlz != 0) psrld(dst, nlz);
+ }
+ } else {
+ movl(kScratchRegister, Immediate(src));
+ movq(dst, kScratchRegister);
+ }
+ }
+}
+
+
+void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (upper == 0) {
+ Move(dst, lower);
+ } else {
+ unsigned cnt = base::bits::CountPopulation64(src);
+ unsigned nlz = base::bits::CountLeadingZeros64(src);
+ unsigned ntz = base::bits::CountTrailingZeros64(src);
+ if (nlz + cnt + ntz == 64) {
+ pcmpeqd(dst, dst);
+ if (ntz == 0) {
+ psrlq(dst, 64 - cnt);
+ } else {
+ psllq(dst, 64 - cnt);
+ if (nlz != 0) psrlq(dst, nlz);
+ }
+ } else if (lower == 0) {
+ Move(dst, upper);
+ psllq(dst, 32);
+ } else {
+ movq(kScratchRegister, src);
+ movq(dst, kScratchRegister);
+ }
+ }
+}
+
+
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
@@ -3984,6 +4035,13 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on x64.
+ UNREACHABLE();
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
movp(rbp, rsp);
@@ -4035,6 +4093,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
+ Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index d051773b40..24eea38394 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -888,6 +888,9 @@ class MacroAssembler: public Assembler {
movp(dst, reinterpret_cast<void*>(value.location()), rmode);
}
+ void Move(XMMRegister dst, uint32_t src);
+ void Move(XMMRegister dst, uint64_t src);
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
@@ -1438,6 +1441,7 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
// Expects object in rax and returns map with validated enum cache